]>
Commit | Line | Data |
---|---|---|
21d34711 CH |
1 | /* |
2 | * NVM Express device driver | |
3 | * Copyright (c) 2011-2014, Intel Corporation. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | ||
15 | #include <linux/blkdev.h> | |
16 | #include <linux/blk-mq.h> | |
5fd4ce1b | 17 | #include <linux/delay.h> |
21d34711 | 18 | #include <linux/errno.h> |
1673f1f0 | 19 | #include <linux/hdreg.h> |
21d34711 | 20 | #include <linux/kernel.h> |
5bae7f73 CH |
21 | #include <linux/module.h> |
22 | #include <linux/list_sort.h> | |
21d34711 CH |
23 | #include <linux/slab.h> |
24 | #include <linux/types.h> | |
1673f1f0 CH |
25 | #include <linux/pr.h> |
26 | #include <linux/ptrace.h> | |
27 | #include <linux/nvme_ioctl.h> | |
28 | #include <linux/t10-pi.h> | |
29 | #include <scsi/sg.h> | |
30 | #include <asm/unaligned.h> | |
21d34711 CH |
31 | |
32 | #include "nvme.h" | |
33 | ||
f3ca80fc CH |
34 | #define NVME_MINORS (1U << MINORBITS) |
35 | ||
ba0ba7d3 ML |
36 | unsigned char admin_timeout = 60; |
37 | module_param(admin_timeout, byte, 0644); | |
38 | MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); | |
576d55d6 | 39 | EXPORT_SYMBOL_GPL(admin_timeout); |
ba0ba7d3 ML |
40 | |
41 | unsigned char nvme_io_timeout = 30; | |
42 | module_param_named(io_timeout, nvme_io_timeout, byte, 0644); | |
43 | MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); | |
576d55d6 | 44 | EXPORT_SYMBOL_GPL(nvme_io_timeout); |
ba0ba7d3 ML |
45 | |
46 | unsigned char shutdown_timeout = 5; | |
47 | module_param(shutdown_timeout, byte, 0644); | |
48 | MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); | |
49 | ||
5bae7f73 CH |
50 | static int nvme_major; |
51 | module_param(nvme_major, int, 0); | |
52 | ||
f3ca80fc CH |
53 | static int nvme_char_major; |
54 | module_param(nvme_char_major, int, 0); | |
55 | ||
56 | static LIST_HEAD(nvme_ctrl_list); | |
9f2482b9 | 57 | static DEFINE_SPINLOCK(dev_list_lock); |
1673f1f0 | 58 | |
f3ca80fc CH |
59 | static struct class *nvme_class; |
60 | ||
c55a2fd4 ML |
61 | void nvme_cancel_request(struct request *req, void *data, bool reserved) |
62 | { | |
63 | int status; | |
64 | ||
65 | if (!blk_mq_request_started(req)) | |
66 | return; | |
67 | ||
68 | dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, | |
69 | "Cancelling I/O %d", req->tag); | |
70 | ||
71 | status = NVME_SC_ABORT_REQ; | |
72 | if (blk_queue_dying(req->q)) | |
73 | status |= NVME_SC_DNR; | |
74 | blk_mq_complete_request(req, status); | |
75 | } | |
76 | EXPORT_SYMBOL_GPL(nvme_cancel_request); | |
77 | ||
bb8d261e CH |
78 | bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, |
79 | enum nvme_ctrl_state new_state) | |
80 | { | |
81 | enum nvme_ctrl_state old_state = ctrl->state; | |
82 | bool changed = false; | |
83 | ||
84 | spin_lock_irq(&ctrl->lock); | |
85 | switch (new_state) { | |
86 | case NVME_CTRL_LIVE: | |
87 | switch (old_state) { | |
7d2e8008 | 88 | case NVME_CTRL_NEW: |
bb8d261e CH |
89 | case NVME_CTRL_RESETTING: |
90 | changed = true; | |
91 | /* FALLTHRU */ | |
92 | default: | |
93 | break; | |
94 | } | |
95 | break; | |
96 | case NVME_CTRL_RESETTING: | |
97 | switch (old_state) { | |
98 | case NVME_CTRL_NEW: | |
99 | case NVME_CTRL_LIVE: | |
100 | changed = true; | |
101 | /* FALLTHRU */ | |
102 | default: | |
103 | break; | |
104 | } | |
105 | break; | |
106 | case NVME_CTRL_DELETING: | |
107 | switch (old_state) { | |
108 | case NVME_CTRL_LIVE: | |
109 | case NVME_CTRL_RESETTING: | |
110 | changed = true; | |
111 | /* FALLTHRU */ | |
112 | default: | |
113 | break; | |
114 | } | |
115 | break; | |
0ff9d4e1 KB |
116 | case NVME_CTRL_DEAD: |
117 | switch (old_state) { | |
118 | case NVME_CTRL_DELETING: | |
119 | changed = true; | |
120 | /* FALLTHRU */ | |
121 | default: | |
122 | break; | |
123 | } | |
124 | break; | |
bb8d261e CH |
125 | default: |
126 | break; | |
127 | } | |
128 | spin_unlock_irq(&ctrl->lock); | |
129 | ||
130 | if (changed) | |
131 | ctrl->state = new_state; | |
132 | ||
133 | return changed; | |
134 | } | |
135 | EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); | |
136 | ||
1673f1f0 CH |
137 | static void nvme_free_ns(struct kref *kref) |
138 | { | |
139 | struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); | |
140 | ||
141 | if (ns->type == NVME_NS_LIGHTNVM) | |
142 | nvme_nvm_unregister(ns->queue, ns->disk->disk_name); | |
143 | ||
144 | spin_lock(&dev_list_lock); | |
145 | ns->disk->private_data = NULL; | |
146 | spin_unlock(&dev_list_lock); | |
147 | ||
1673f1f0 | 148 | put_disk(ns->disk); |
075790eb KB |
149 | ida_simple_remove(&ns->ctrl->ns_ida, ns->instance); |
150 | nvme_put_ctrl(ns->ctrl); | |
1673f1f0 CH |
151 | kfree(ns); |
152 | } | |
153 | ||
5bae7f73 | 154 | static void nvme_put_ns(struct nvme_ns *ns) |
1673f1f0 CH |
155 | { |
156 | kref_put(&ns->kref, nvme_free_ns); | |
157 | } | |
158 | ||
159 | static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk) | |
160 | { | |
161 | struct nvme_ns *ns; | |
162 | ||
163 | spin_lock(&dev_list_lock); | |
164 | ns = disk->private_data; | |
e439bb12 SG |
165 | if (ns) { |
166 | if (!kref_get_unless_zero(&ns->kref)) | |
167 | goto fail; | |
168 | if (!try_module_get(ns->ctrl->ops->module)) | |
169 | goto fail_put_ns; | |
170 | } | |
1673f1f0 CH |
171 | spin_unlock(&dev_list_lock); |
172 | ||
173 | return ns; | |
e439bb12 SG |
174 | |
175 | fail_put_ns: | |
176 | kref_put(&ns->kref, nvme_free_ns); | |
177 | fail: | |
178 | spin_unlock(&dev_list_lock); | |
179 | return NULL; | |
1673f1f0 CH |
180 | } |
181 | ||
7688faa6 CH |
182 | void nvme_requeue_req(struct request *req) |
183 | { | |
184 | unsigned long flags; | |
185 | ||
186 | blk_mq_requeue_request(req); | |
187 | spin_lock_irqsave(req->q->queue_lock, flags); | |
188 | if (!blk_queue_stopped(req->q)) | |
189 | blk_mq_kick_requeue_list(req->q); | |
190 | spin_unlock_irqrestore(req->q->queue_lock, flags); | |
191 | } | |
576d55d6 | 192 | EXPORT_SYMBOL_GPL(nvme_requeue_req); |
7688faa6 | 193 | |
4160982e | 194 | struct request *nvme_alloc_request(struct request_queue *q, |
eb71f435 | 195 | struct nvme_command *cmd, unsigned int flags, int qid) |
21d34711 | 196 | { |
21d34711 | 197 | struct request *req; |
21d34711 | 198 | |
eb71f435 CH |
199 | if (qid == NVME_QID_ANY) { |
200 | req = blk_mq_alloc_request(q, nvme_is_write(cmd), flags); | |
201 | } else { | |
202 | req = blk_mq_alloc_request_hctx(q, nvme_is_write(cmd), flags, | |
203 | qid ? qid - 1 : 0); | |
204 | } | |
21d34711 | 205 | if (IS_ERR(req)) |
4160982e | 206 | return req; |
21d34711 CH |
207 | |
208 | req->cmd_type = REQ_TYPE_DRV_PRIV; | |
209 | req->cmd_flags |= REQ_FAILFAST_DRIVER; | |
210 | req->__data_len = 0; | |
211 | req->__sector = (sector_t) -1; | |
212 | req->bio = req->biotail = NULL; | |
213 | ||
21d34711 CH |
214 | req->cmd = (unsigned char *)cmd; |
215 | req->cmd_len = sizeof(struct nvme_command); | |
21d34711 | 216 | |
4160982e CH |
217 | return req; |
218 | } | |
576d55d6 | 219 | EXPORT_SYMBOL_GPL(nvme_alloc_request); |
4160982e | 220 | |
8093f7ca ML |
221 | static inline void nvme_setup_flush(struct nvme_ns *ns, |
222 | struct nvme_command *cmnd) | |
223 | { | |
224 | memset(cmnd, 0, sizeof(*cmnd)); | |
225 | cmnd->common.opcode = nvme_cmd_flush; | |
226 | cmnd->common.nsid = cpu_to_le32(ns->ns_id); | |
227 | } | |
228 | ||
229 | static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req, | |
230 | struct nvme_command *cmnd) | |
231 | { | |
232 | struct nvme_dsm_range *range; | |
233 | struct page *page; | |
234 | int offset; | |
235 | unsigned int nr_bytes = blk_rq_bytes(req); | |
236 | ||
237 | range = kmalloc(sizeof(*range), GFP_ATOMIC); | |
238 | if (!range) | |
239 | return BLK_MQ_RQ_QUEUE_BUSY; | |
240 | ||
241 | range->cattr = cpu_to_le32(0); | |
242 | range->nlb = cpu_to_le32(nr_bytes >> ns->lba_shift); | |
243 | range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); | |
244 | ||
245 | memset(cmnd, 0, sizeof(*cmnd)); | |
246 | cmnd->dsm.opcode = nvme_cmd_dsm; | |
247 | cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); | |
248 | cmnd->dsm.nr = 0; | |
249 | cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); | |
250 | ||
251 | req->completion_data = range; | |
252 | page = virt_to_page(range); | |
253 | offset = offset_in_page(range); | |
254 | blk_add_request_payload(req, page, offset, sizeof(*range)); | |
255 | ||
256 | /* | |
257 | * we set __data_len back to the size of the area to be discarded | |
258 | * on disk. This allows us to report completion on the full amount | |
259 | * of blocks described by the request. | |
260 | */ | |
261 | req->__data_len = nr_bytes; | |
262 | ||
263 | return 0; | |
264 | } | |
265 | ||
266 | static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req, | |
267 | struct nvme_command *cmnd) | |
268 | { | |
269 | u16 control = 0; | |
270 | u32 dsmgmt = 0; | |
271 | ||
272 | if (req->cmd_flags & REQ_FUA) | |
273 | control |= NVME_RW_FUA; | |
274 | if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) | |
275 | control |= NVME_RW_LR; | |
276 | ||
277 | if (req->cmd_flags & REQ_RAHEAD) | |
278 | dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; | |
279 | ||
280 | memset(cmnd, 0, sizeof(*cmnd)); | |
281 | cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); | |
282 | cmnd->rw.command_id = req->tag; | |
283 | cmnd->rw.nsid = cpu_to_le32(ns->ns_id); | |
284 | cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); | |
285 | cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); | |
286 | ||
287 | if (ns->ms) { | |
288 | switch (ns->pi_type) { | |
289 | case NVME_NS_DPS_PI_TYPE3: | |
290 | control |= NVME_RW_PRINFO_PRCHK_GUARD; | |
291 | break; | |
292 | case NVME_NS_DPS_PI_TYPE1: | |
293 | case NVME_NS_DPS_PI_TYPE2: | |
294 | control |= NVME_RW_PRINFO_PRCHK_GUARD | | |
295 | NVME_RW_PRINFO_PRCHK_REF; | |
296 | cmnd->rw.reftag = cpu_to_le32( | |
297 | nvme_block_nr(ns, blk_rq_pos(req))); | |
298 | break; | |
299 | } | |
300 | if (!blk_integrity_rq(req)) | |
301 | control |= NVME_RW_PRINFO_PRACT; | |
302 | } | |
303 | ||
304 | cmnd->rw.control = cpu_to_le16(control); | |
305 | cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); | |
306 | } | |
307 | ||
308 | int nvme_setup_cmd(struct nvme_ns *ns, struct request *req, | |
309 | struct nvme_command *cmd) | |
310 | { | |
311 | int ret = 0; | |
312 | ||
313 | if (req->cmd_type == REQ_TYPE_DRV_PRIV) | |
314 | memcpy(cmd, req->cmd, sizeof(*cmd)); | |
3a5e02ce | 315 | else if (req_op(req) == REQ_OP_FLUSH) |
8093f7ca | 316 | nvme_setup_flush(ns, cmd); |
c2df40df | 317 | else if (req_op(req) == REQ_OP_DISCARD) |
8093f7ca ML |
318 | ret = nvme_setup_discard(ns, req, cmd); |
319 | else | |
320 | nvme_setup_rw(ns, req, cmd); | |
321 | ||
322 | return ret; | |
323 | } | |
324 | EXPORT_SYMBOL_GPL(nvme_setup_cmd); | |
325 | ||
4160982e CH |
326 | /* |
327 | * Returns 0 on success. If the result is negative, it's a Linux error code; | |
328 | * if the result is positive, it's an NVM Express status code | |
329 | */ | |
330 | int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, | |
1cb3cce5 | 331 | struct nvme_completion *cqe, void *buffer, unsigned bufflen, |
eb71f435 | 332 | unsigned timeout, int qid, int at_head, int flags) |
4160982e CH |
333 | { |
334 | struct request *req; | |
335 | int ret; | |
336 | ||
eb71f435 | 337 | req = nvme_alloc_request(q, cmd, flags, qid); |
4160982e CH |
338 | if (IS_ERR(req)) |
339 | return PTR_ERR(req); | |
340 | ||
341 | req->timeout = timeout ? timeout : ADMIN_TIMEOUT; | |
1cb3cce5 | 342 | req->special = cqe; |
4160982e | 343 | |
21d34711 CH |
344 | if (buffer && bufflen) { |
345 | ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); | |
346 | if (ret) | |
347 | goto out; | |
4160982e CH |
348 | } |
349 | ||
eb71f435 | 350 | blk_execute_rq(req->q, NULL, req, at_head); |
4160982e CH |
351 | ret = req->errors; |
352 | out: | |
353 | blk_mq_free_request(req); | |
354 | return ret; | |
355 | } | |
eb71f435 | 356 | EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd); |
4160982e CH |
357 | |
358 | int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, | |
359 | void *buffer, unsigned bufflen) | |
360 | { | |
eb71f435 CH |
361 | return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0, |
362 | NVME_QID_ANY, 0, 0); | |
4160982e | 363 | } |
576d55d6 | 364 | EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); |
4160982e | 365 | |
0b7f1f26 KB |
366 | int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, |
367 | void __user *ubuffer, unsigned bufflen, | |
368 | void __user *meta_buffer, unsigned meta_len, u32 meta_seed, | |
369 | u32 *result, unsigned timeout) | |
4160982e | 370 | { |
7a5abb4b | 371 | bool write = nvme_is_write(cmd); |
1cb3cce5 | 372 | struct nvme_completion cqe; |
0b7f1f26 KB |
373 | struct nvme_ns *ns = q->queuedata; |
374 | struct gendisk *disk = ns ? ns->disk : NULL; | |
4160982e | 375 | struct request *req; |
0b7f1f26 KB |
376 | struct bio *bio = NULL; |
377 | void *meta = NULL; | |
4160982e CH |
378 | int ret; |
379 | ||
eb71f435 | 380 | req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY); |
4160982e CH |
381 | if (IS_ERR(req)) |
382 | return PTR_ERR(req); | |
383 | ||
384 | req->timeout = timeout ? timeout : ADMIN_TIMEOUT; | |
1cb3cce5 | 385 | req->special = &cqe; |
4160982e CH |
386 | |
387 | if (ubuffer && bufflen) { | |
21d34711 CH |
388 | ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, |
389 | GFP_KERNEL); | |
390 | if (ret) | |
391 | goto out; | |
392 | bio = req->bio; | |
21d34711 | 393 | |
0b7f1f26 KB |
394 | if (!disk) |
395 | goto submit; | |
396 | bio->bi_bdev = bdget_disk(disk, 0); | |
397 | if (!bio->bi_bdev) { | |
398 | ret = -ENODEV; | |
399 | goto out_unmap; | |
400 | } | |
401 | ||
e9fc63d6 | 402 | if (meta_buffer && meta_len) { |
0b7f1f26 KB |
403 | struct bio_integrity_payload *bip; |
404 | ||
405 | meta = kmalloc(meta_len, GFP_KERNEL); | |
406 | if (!meta) { | |
407 | ret = -ENOMEM; | |
408 | goto out_unmap; | |
409 | } | |
410 | ||
411 | if (write) { | |
412 | if (copy_from_user(meta, meta_buffer, | |
413 | meta_len)) { | |
414 | ret = -EFAULT; | |
415 | goto out_free_meta; | |
416 | } | |
417 | } | |
418 | ||
419 | bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); | |
06c1e390 KB |
420 | if (IS_ERR(bip)) { |
421 | ret = PTR_ERR(bip); | |
0b7f1f26 KB |
422 | goto out_free_meta; |
423 | } | |
424 | ||
425 | bip->bip_iter.bi_size = meta_len; | |
426 | bip->bip_iter.bi_sector = meta_seed; | |
427 | ||
428 | ret = bio_integrity_add_page(bio, virt_to_page(meta), | |
429 | meta_len, offset_in_page(meta)); | |
430 | if (ret != meta_len) { | |
431 | ret = -ENOMEM; | |
432 | goto out_free_meta; | |
433 | } | |
434 | } | |
435 | } | |
436 | submit: | |
437 | blk_execute_rq(req->q, disk, req, 0); | |
438 | ret = req->errors; | |
21d34711 | 439 | if (result) |
1cb3cce5 | 440 | *result = le32_to_cpu(cqe.result); |
0b7f1f26 KB |
441 | if (meta && !ret && !write) { |
442 | if (copy_to_user(meta_buffer, meta, meta_len)) | |
443 | ret = -EFAULT; | |
444 | } | |
445 | out_free_meta: | |
446 | kfree(meta); | |
447 | out_unmap: | |
448 | if (bio) { | |
449 | if (disk && bio->bi_bdev) | |
450 | bdput(bio->bi_bdev); | |
451 | blk_rq_unmap_user(bio); | |
452 | } | |
21d34711 CH |
453 | out: |
454 | blk_mq_free_request(req); | |
455 | return ret; | |
456 | } | |
457 | ||
0b7f1f26 KB |
458 | int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, |
459 | void __user *ubuffer, unsigned bufflen, u32 *result, | |
460 | unsigned timeout) | |
461 | { | |
462 | return __nvme_submit_user_cmd(q, cmd, ubuffer, bufflen, NULL, 0, 0, | |
463 | result, timeout); | |
464 | } | |
465 | ||
1c63dc66 | 466 | int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) |
21d34711 CH |
467 | { |
468 | struct nvme_command c = { }; | |
469 | int error; | |
470 | ||
471 | /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ | |
472 | c.identify.opcode = nvme_admin_identify; | |
473 | c.identify.cns = cpu_to_le32(1); | |
474 | ||
475 | *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); | |
476 | if (!*id) | |
477 | return -ENOMEM; | |
478 | ||
479 | error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, | |
480 | sizeof(struct nvme_id_ctrl)); | |
481 | if (error) | |
482 | kfree(*id); | |
483 | return error; | |
484 | } | |
485 | ||
540c801c KB |
486 | static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list) |
487 | { | |
488 | struct nvme_command c = { }; | |
489 | ||
490 | c.identify.opcode = nvme_admin_identify; | |
491 | c.identify.cns = cpu_to_le32(2); | |
492 | c.identify.nsid = cpu_to_le32(nsid); | |
493 | return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000); | |
494 | } | |
495 | ||
1c63dc66 | 496 | int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid, |
21d34711 CH |
497 | struct nvme_id_ns **id) |
498 | { | |
499 | struct nvme_command c = { }; | |
500 | int error; | |
501 | ||
502 | /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ | |
503 | c.identify.opcode = nvme_admin_identify, | |
504 | c.identify.nsid = cpu_to_le32(nsid), | |
505 | ||
506 | *id = kmalloc(sizeof(struct nvme_id_ns), GFP_KERNEL); | |
507 | if (!*id) | |
508 | return -ENOMEM; | |
509 | ||
510 | error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, | |
511 | sizeof(struct nvme_id_ns)); | |
512 | if (error) | |
513 | kfree(*id); | |
514 | return error; | |
515 | } | |
516 | ||
1c63dc66 | 517 | int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, |
21d34711 CH |
518 | dma_addr_t dma_addr, u32 *result) |
519 | { | |
520 | struct nvme_command c; | |
1cb3cce5 CH |
521 | struct nvme_completion cqe; |
522 | int ret; | |
21d34711 CH |
523 | |
524 | memset(&c, 0, sizeof(c)); | |
525 | c.features.opcode = nvme_admin_get_features; | |
526 | c.features.nsid = cpu_to_le32(nsid); | |
527 | c.features.prp1 = cpu_to_le64(dma_addr); | |
528 | c.features.fid = cpu_to_le32(fid); | |
529 | ||
eb71f435 CH |
530 | ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0, |
531 | NVME_QID_ANY, 0, 0); | |
1cb3cce5 CH |
532 | if (ret >= 0) |
533 | *result = le32_to_cpu(cqe.result); | |
534 | return ret; | |
21d34711 CH |
535 | } |
536 | ||
1c63dc66 | 537 | int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, |
21d34711 CH |
538 | dma_addr_t dma_addr, u32 *result) |
539 | { | |
540 | struct nvme_command c; | |
1cb3cce5 CH |
541 | struct nvme_completion cqe; |
542 | int ret; | |
21d34711 CH |
543 | |
544 | memset(&c, 0, sizeof(c)); | |
545 | c.features.opcode = nvme_admin_set_features; | |
546 | c.features.prp1 = cpu_to_le64(dma_addr); | |
547 | c.features.fid = cpu_to_le32(fid); | |
548 | c.features.dword11 = cpu_to_le32(dword11); | |
549 | ||
eb71f435 CH |
550 | ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0, |
551 | NVME_QID_ANY, 0, 0); | |
1cb3cce5 CH |
552 | if (ret >= 0) |
553 | *result = le32_to_cpu(cqe.result); | |
554 | return ret; | |
21d34711 CH |
555 | } |
556 | ||
1c63dc66 | 557 | int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log) |
21d34711 CH |
558 | { |
559 | struct nvme_command c = { }; | |
560 | int error; | |
561 | ||
562 | c.common.opcode = nvme_admin_get_log_page, | |
563 | c.common.nsid = cpu_to_le32(0xFFFFFFFF), | |
564 | c.common.cdw10[0] = cpu_to_le32( | |
565 | (((sizeof(struct nvme_smart_log) / 4) - 1) << 16) | | |
566 | NVME_LOG_SMART), | |
567 | ||
568 | *log = kmalloc(sizeof(struct nvme_smart_log), GFP_KERNEL); | |
569 | if (!*log) | |
570 | return -ENOMEM; | |
571 | ||
572 | error = nvme_submit_sync_cmd(dev->admin_q, &c, *log, | |
573 | sizeof(struct nvme_smart_log)); | |
574 | if (error) | |
575 | kfree(*log); | |
576 | return error; | |
577 | } | |
1673f1f0 | 578 | |
9a0be7ab CH |
579 | int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) |
580 | { | |
581 | u32 q_count = (*count - 1) | ((*count - 1) << 16); | |
582 | u32 result; | |
583 | int status, nr_io_queues; | |
584 | ||
585 | status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, 0, | |
586 | &result); | |
f5fa90dc | 587 | if (status < 0) |
9a0be7ab CH |
588 | return status; |
589 | ||
f5fa90dc CH |
590 | /* |
591 | * Degraded controllers might return an error when setting the queue | |
592 | * count. We still want to be able to bring them online and offer | |
593 | * access to the admin queue, as that might be only way to fix them up. | |
594 | */ | |
595 | if (status > 0) { | |
596 | dev_err(ctrl->dev, "Could not set queue count (%d)\n", status); | |
597 | *count = 0; | |
598 | } else { | |
599 | nr_io_queues = min(result & 0xffff, result >> 16) + 1; | |
600 | *count = min(*count, nr_io_queues); | |
601 | } | |
602 | ||
9a0be7ab CH |
603 | return 0; |
604 | } | |
576d55d6 | 605 | EXPORT_SYMBOL_GPL(nvme_set_queue_count); |
9a0be7ab | 606 | |
1673f1f0 CH |
607 | static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) |
608 | { | |
609 | struct nvme_user_io io; | |
610 | struct nvme_command c; | |
611 | unsigned length, meta_len; | |
612 | void __user *metadata; | |
613 | ||
614 | if (copy_from_user(&io, uio, sizeof(io))) | |
615 | return -EFAULT; | |
63088ec7 KB |
616 | if (io.flags) |
617 | return -EINVAL; | |
1673f1f0 CH |
618 | |
619 | switch (io.opcode) { | |
620 | case nvme_cmd_write: | |
621 | case nvme_cmd_read: | |
622 | case nvme_cmd_compare: | |
623 | break; | |
624 | default: | |
625 | return -EINVAL; | |
626 | } | |
627 | ||
628 | length = (io.nblocks + 1) << ns->lba_shift; | |
629 | meta_len = (io.nblocks + 1) * ns->ms; | |
630 | metadata = (void __user *)(uintptr_t)io.metadata; | |
631 | ||
632 | if (ns->ext) { | |
633 | length += meta_len; | |
634 | meta_len = 0; | |
635 | } else if (meta_len) { | |
636 | if ((io.metadata & 3) || !io.metadata) | |
637 | return -EINVAL; | |
638 | } | |
639 | ||
640 | memset(&c, 0, sizeof(c)); | |
641 | c.rw.opcode = io.opcode; | |
642 | c.rw.flags = io.flags; | |
643 | c.rw.nsid = cpu_to_le32(ns->ns_id); | |
644 | c.rw.slba = cpu_to_le64(io.slba); | |
645 | c.rw.length = cpu_to_le16(io.nblocks); | |
646 | c.rw.control = cpu_to_le16(io.control); | |
647 | c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); | |
648 | c.rw.reftag = cpu_to_le32(io.reftag); | |
649 | c.rw.apptag = cpu_to_le16(io.apptag); | |
650 | c.rw.appmask = cpu_to_le16(io.appmask); | |
651 | ||
652 | return __nvme_submit_user_cmd(ns->queue, &c, | |
653 | (void __user *)(uintptr_t)io.addr, length, | |
654 | metadata, meta_len, io.slba, NULL, 0); | |
655 | } | |
656 | ||
f3ca80fc | 657 | static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, |
1673f1f0 CH |
658 | struct nvme_passthru_cmd __user *ucmd) |
659 | { | |
660 | struct nvme_passthru_cmd cmd; | |
661 | struct nvme_command c; | |
662 | unsigned timeout = 0; | |
663 | int status; | |
664 | ||
665 | if (!capable(CAP_SYS_ADMIN)) | |
666 | return -EACCES; | |
667 | if (copy_from_user(&cmd, ucmd, sizeof(cmd))) | |
668 | return -EFAULT; | |
63088ec7 KB |
669 | if (cmd.flags) |
670 | return -EINVAL; | |
1673f1f0 CH |
671 | |
672 | memset(&c, 0, sizeof(c)); | |
673 | c.common.opcode = cmd.opcode; | |
674 | c.common.flags = cmd.flags; | |
675 | c.common.nsid = cpu_to_le32(cmd.nsid); | |
676 | c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); | |
677 | c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); | |
678 | c.common.cdw10[0] = cpu_to_le32(cmd.cdw10); | |
679 | c.common.cdw10[1] = cpu_to_le32(cmd.cdw11); | |
680 | c.common.cdw10[2] = cpu_to_le32(cmd.cdw12); | |
681 | c.common.cdw10[3] = cpu_to_le32(cmd.cdw13); | |
682 | c.common.cdw10[4] = cpu_to_le32(cmd.cdw14); | |
683 | c.common.cdw10[5] = cpu_to_le32(cmd.cdw15); | |
684 | ||
685 | if (cmd.timeout_ms) | |
686 | timeout = msecs_to_jiffies(cmd.timeout_ms); | |
687 | ||
688 | status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, | |
d1ea7be5 | 689 | (void __user *)(uintptr_t)cmd.addr, cmd.data_len, |
1673f1f0 CH |
690 | &cmd.result, timeout); |
691 | if (status >= 0) { | |
692 | if (put_user(cmd.result, &ucmd->result)) | |
693 | return -EFAULT; | |
694 | } | |
695 | ||
696 | return status; | |
697 | } | |
698 | ||
699 | static int nvme_ioctl(struct block_device *bdev, fmode_t mode, | |
700 | unsigned int cmd, unsigned long arg) | |
701 | { | |
702 | struct nvme_ns *ns = bdev->bd_disk->private_data; | |
703 | ||
704 | switch (cmd) { | |
705 | case NVME_IOCTL_ID: | |
706 | force_successful_syscall_return(); | |
707 | return ns->ns_id; | |
708 | case NVME_IOCTL_ADMIN_CMD: | |
709 | return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg); | |
710 | case NVME_IOCTL_IO_CMD: | |
711 | return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg); | |
712 | case NVME_IOCTL_SUBMIT_IO: | |
713 | return nvme_submit_io(ns, (void __user *)arg); | |
44907332 | 714 | #ifdef CONFIG_BLK_DEV_NVME_SCSI |
1673f1f0 CH |
715 | case SG_GET_VERSION_NUM: |
716 | return nvme_sg_get_version_num((void __user *)arg); | |
717 | case SG_IO: | |
718 | return nvme_sg_io(ns, (void __user *)arg); | |
44907332 | 719 | #endif |
1673f1f0 CH |
720 | default: |
721 | return -ENOTTY; | |
722 | } | |
723 | } | |
724 | ||
725 | #ifdef CONFIG_COMPAT | |
726 | static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode, | |
727 | unsigned int cmd, unsigned long arg) | |
728 | { | |
729 | switch (cmd) { | |
730 | case SG_IO: | |
731 | return -ENOIOCTLCMD; | |
732 | } | |
733 | return nvme_ioctl(bdev, mode, cmd, arg); | |
734 | } | |
735 | #else | |
736 | #define nvme_compat_ioctl NULL | |
737 | #endif | |
738 | ||
739 | static int nvme_open(struct block_device *bdev, fmode_t mode) | |
740 | { | |
741 | return nvme_get_ns_from_disk(bdev->bd_disk) ? 0 : -ENXIO; | |
742 | } | |
743 | ||
744 | static void nvme_release(struct gendisk *disk, fmode_t mode) | |
745 | { | |
e439bb12 SG |
746 | struct nvme_ns *ns = disk->private_data; |
747 | ||
748 | module_put(ns->ctrl->ops->module); | |
749 | nvme_put_ns(ns); | |
1673f1f0 CH |
750 | } |
751 | ||
752 | static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |
753 | { | |
754 | /* some standard values */ | |
755 | geo->heads = 1 << 6; | |
756 | geo->sectors = 1 << 5; | |
757 | geo->cylinders = get_capacity(bdev->bd_disk) >> 11; | |
758 | return 0; | |
759 | } | |
760 | ||
761 | #ifdef CONFIG_BLK_DEV_INTEGRITY | |
762 | static void nvme_init_integrity(struct nvme_ns *ns) | |
763 | { | |
764 | struct blk_integrity integrity; | |
765 | ||
766 | switch (ns->pi_type) { | |
767 | case NVME_NS_DPS_PI_TYPE3: | |
768 | integrity.profile = &t10_pi_type3_crc; | |
ba36c21b NB |
769 | integrity.tag_size = sizeof(u16) + sizeof(u32); |
770 | integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; | |
1673f1f0 CH |
771 | break; |
772 | case NVME_NS_DPS_PI_TYPE1: | |
773 | case NVME_NS_DPS_PI_TYPE2: | |
774 | integrity.profile = &t10_pi_type1_crc; | |
ba36c21b NB |
775 | integrity.tag_size = sizeof(u16); |
776 | integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; | |
1673f1f0 CH |
777 | break; |
778 | default: | |
779 | integrity.profile = NULL; | |
780 | break; | |
781 | } | |
782 | integrity.tuple_size = ns->ms; | |
783 | blk_integrity_register(ns->disk, &integrity); | |
784 | blk_queue_max_integrity_segments(ns->queue, 1); | |
785 | } | |
786 | #else | |
787 | static void nvme_init_integrity(struct nvme_ns *ns) | |
788 | { | |
789 | } | |
790 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | |
791 | ||
792 | static void nvme_config_discard(struct nvme_ns *ns) | |
793 | { | |
08095e70 | 794 | struct nvme_ctrl *ctrl = ns->ctrl; |
1673f1f0 | 795 | u32 logical_block_size = queue_logical_block_size(ns->queue); |
08095e70 KB |
796 | |
797 | if (ctrl->quirks & NVME_QUIRK_DISCARD_ZEROES) | |
798 | ns->queue->limits.discard_zeroes_data = 1; | |
799 | else | |
800 | ns->queue->limits.discard_zeroes_data = 0; | |
801 | ||
1673f1f0 CH |
802 | ns->queue->limits.discard_alignment = logical_block_size; |
803 | ns->queue->limits.discard_granularity = logical_block_size; | |
bd0fc288 | 804 | blk_queue_max_discard_sectors(ns->queue, UINT_MAX); |
1673f1f0 CH |
805 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); |
806 | } | |
807 | ||
5bae7f73 | 808 | static int nvme_revalidate_disk(struct gendisk *disk) |
1673f1f0 CH |
809 | { |
810 | struct nvme_ns *ns = disk->private_data; | |
811 | struct nvme_id_ns *id; | |
812 | u8 lbaf, pi_type; | |
813 | u16 old_ms; | |
814 | unsigned short bs; | |
815 | ||
69d9a99c KB |
816 | if (test_bit(NVME_NS_DEAD, &ns->flags)) { |
817 | set_capacity(disk, 0); | |
818 | return -ENODEV; | |
819 | } | |
1673f1f0 | 820 | if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) { |
1b3c47c1 SG |
821 | dev_warn(disk_to_dev(ns->disk), "%s: Identify failure\n", |
822 | __func__); | |
1673f1f0 CH |
823 | return -ENODEV; |
824 | } | |
825 | if (id->ncap == 0) { | |
826 | kfree(id); | |
827 | return -ENODEV; | |
828 | } | |
829 | ||
830 | if (nvme_nvm_ns_supported(ns, id) && ns->type != NVME_NS_LIGHTNVM) { | |
831 | if (nvme_nvm_register(ns->queue, disk->disk_name)) { | |
1b3c47c1 | 832 | dev_warn(disk_to_dev(ns->disk), |
1673f1f0 CH |
833 | "%s: LightNVM init failure\n", __func__); |
834 | kfree(id); | |
835 | return -ENODEV; | |
836 | } | |
837 | ns->type = NVME_NS_LIGHTNVM; | |
838 | } | |
839 | ||
2b9b6e86 KB |
840 | if (ns->ctrl->vs >= NVME_VS(1, 1)) |
841 | memcpy(ns->eui, id->eui64, sizeof(ns->eui)); | |
842 | if (ns->ctrl->vs >= NVME_VS(1, 2)) | |
843 | memcpy(ns->uuid, id->nguid, sizeof(ns->uuid)); | |
844 | ||
1673f1f0 CH |
845 | old_ms = ns->ms; |
846 | lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK; | |
847 | ns->lba_shift = id->lbaf[lbaf].ds; | |
848 | ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); | |
849 | ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); | |
850 | ||
851 | /* | |
852 | * If identify namespace failed, use default 512 byte block size so | |
853 | * block layer can use before failing read/write for 0 capacity. | |
854 | */ | |
855 | if (ns->lba_shift == 0) | |
856 | ns->lba_shift = 9; | |
857 | bs = 1 << ns->lba_shift; | |
1673f1f0 CH |
858 | /* XXX: PI implementation requires metadata equal t10 pi tuple size */ |
859 | pi_type = ns->ms == sizeof(struct t10_pi_tuple) ? | |
860 | id->dps & NVME_NS_DPS_PI_MASK : 0; | |
861 | ||
862 | blk_mq_freeze_queue(disk->queue); | |
863 | if (blk_get_integrity(disk) && (ns->pi_type != pi_type || | |
864 | ns->ms != old_ms || | |
865 | bs != queue_logical_block_size(disk->queue) || | |
866 | (ns->ms && ns->ext))) | |
867 | blk_integrity_unregister(disk); | |
868 | ||
869 | ns->pi_type = pi_type; | |
870 | blk_queue_logical_block_size(ns->queue, bs); | |
871 | ||
4b9d5b15 | 872 | if (ns->ms && !blk_get_integrity(disk) && !ns->ext) |
1673f1f0 | 873 | nvme_init_integrity(ns); |
1673f1f0 CH |
874 | if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk)) |
875 | set_capacity(disk, 0); | |
876 | else | |
877 | set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); | |
878 | ||
879 | if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM) | |
880 | nvme_config_discard(ns); | |
881 | blk_mq_unfreeze_queue(disk->queue); | |
882 | ||
883 | kfree(id); | |
884 | return 0; | |
885 | } | |
886 | ||
887 | static char nvme_pr_type(enum pr_type type) | |
888 | { | |
889 | switch (type) { | |
890 | case PR_WRITE_EXCLUSIVE: | |
891 | return 1; | |
892 | case PR_EXCLUSIVE_ACCESS: | |
893 | return 2; | |
894 | case PR_WRITE_EXCLUSIVE_REG_ONLY: | |
895 | return 3; | |
896 | case PR_EXCLUSIVE_ACCESS_REG_ONLY: | |
897 | return 4; | |
898 | case PR_WRITE_EXCLUSIVE_ALL_REGS: | |
899 | return 5; | |
900 | case PR_EXCLUSIVE_ACCESS_ALL_REGS: | |
901 | return 6; | |
902 | default: | |
903 | return 0; | |
904 | } | |
905 | }; | |
906 | ||
907 | static int nvme_pr_command(struct block_device *bdev, u32 cdw10, | |
908 | u64 key, u64 sa_key, u8 op) | |
909 | { | |
910 | struct nvme_ns *ns = bdev->bd_disk->private_data; | |
911 | struct nvme_command c; | |
912 | u8 data[16] = { 0, }; | |
913 | ||
914 | put_unaligned_le64(key, &data[0]); | |
915 | put_unaligned_le64(sa_key, &data[8]); | |
916 | ||
917 | memset(&c, 0, sizeof(c)); | |
918 | c.common.opcode = op; | |
919 | c.common.nsid = cpu_to_le32(ns->ns_id); | |
920 | c.common.cdw10[0] = cpu_to_le32(cdw10); | |
921 | ||
922 | return nvme_submit_sync_cmd(ns->queue, &c, data, 16); | |
923 | } | |
924 | ||
925 | static int nvme_pr_register(struct block_device *bdev, u64 old, | |
926 | u64 new, unsigned flags) | |
927 | { | |
928 | u32 cdw10; | |
929 | ||
930 | if (flags & ~PR_FL_IGNORE_KEY) | |
931 | return -EOPNOTSUPP; | |
932 | ||
933 | cdw10 = old ? 2 : 0; | |
934 | cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; | |
935 | cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ | |
936 | return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); | |
937 | } | |
938 | ||
939 | static int nvme_pr_reserve(struct block_device *bdev, u64 key, | |
940 | enum pr_type type, unsigned flags) | |
941 | { | |
942 | u32 cdw10; | |
943 | ||
944 | if (flags & ~PR_FL_IGNORE_KEY) | |
945 | return -EOPNOTSUPP; | |
946 | ||
947 | cdw10 = nvme_pr_type(type) << 8; | |
948 | cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); | |
949 | return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); | |
950 | } | |
951 | ||
952 | static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, | |
953 | enum pr_type type, bool abort) | |
954 | { | |
955 | u32 cdw10 = nvme_pr_type(type) << 8 | abort ? 2 : 1; | |
956 | return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); | |
957 | } | |
958 | ||
959 | static int nvme_pr_clear(struct block_device *bdev, u64 key) | |
960 | { | |
8c0b3915 | 961 | u32 cdw10 = 1 | (key ? 1 << 3 : 0); |
1673f1f0 CH |
962 | return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register); |
963 | } | |
964 | ||
965 | static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) | |
966 | { | |
967 | u32 cdw10 = nvme_pr_type(type) << 8 | key ? 1 << 3 : 0; | |
968 | return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); | |
969 | } | |
970 | ||
971 | static const struct pr_ops nvme_pr_ops = { | |
972 | .pr_register = nvme_pr_register, | |
973 | .pr_reserve = nvme_pr_reserve, | |
974 | .pr_release = nvme_pr_release, | |
975 | .pr_preempt = nvme_pr_preempt, | |
976 | .pr_clear = nvme_pr_clear, | |
977 | }; | |
978 | ||
5bae7f73 | 979 | static const struct block_device_operations nvme_fops = { |
1673f1f0 CH |
980 | .owner = THIS_MODULE, |
981 | .ioctl = nvme_ioctl, | |
982 | .compat_ioctl = nvme_compat_ioctl, | |
983 | .open = nvme_open, | |
984 | .release = nvme_release, | |
985 | .getgeo = nvme_getgeo, | |
986 | .revalidate_disk= nvme_revalidate_disk, | |
987 | .pr_ops = &nvme_pr_ops, | |
988 | }; | |
989 | ||
5fd4ce1b CH |
990 | static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled) |
991 | { | |
992 | unsigned long timeout = | |
993 | ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; | |
994 | u32 csts, bit = enabled ? NVME_CSTS_RDY : 0; | |
995 | int ret; | |
996 | ||
997 | while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { | |
998 | if ((csts & NVME_CSTS_RDY) == bit) | |
999 | break; | |
1000 | ||
1001 | msleep(100); | |
1002 | if (fatal_signal_pending(current)) | |
1003 | return -EINTR; | |
1004 | if (time_after(jiffies, timeout)) { | |
1b3c47c1 | 1005 | dev_err(ctrl->device, |
5fd4ce1b CH |
1006 | "Device not ready; aborting %s\n", enabled ? |
1007 | "initialisation" : "reset"); | |
1008 | return -ENODEV; | |
1009 | } | |
1010 | } | |
1011 | ||
1012 | return ret; | |
1013 | } | |
1014 | ||
1015 | /* | |
1016 | * If the device has been passed off to us in an enabled state, just clear | |
1017 | * the enabled bit. The spec says we should set the 'shutdown notification | |
1018 | * bits', but doing so may cause the device to complete commands to the | |
1019 | * admin queue ... and we don't know what memory that might be pointing at! | |
1020 | */ | |
1021 | int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap) | |
1022 | { | |
1023 | int ret; | |
1024 | ||
1025 | ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; | |
1026 | ctrl->ctrl_config &= ~NVME_CC_ENABLE; | |
1027 | ||
1028 | ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); | |
1029 | if (ret) | |
1030 | return ret; | |
1031 | return nvme_wait_ready(ctrl, cap, false); | |
1032 | } | |
576d55d6 | 1033 | EXPORT_SYMBOL_GPL(nvme_disable_ctrl); |
5fd4ce1b CH |
1034 | |
1035 | int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap) | |
1036 | { | |
1037 | /* | |
1038 | * Default to a 4K page size, with the intention to update this | |
1039 | * path in the future to accomodate architectures with differing | |
1040 | * kernel and IO page sizes. | |
1041 | */ | |
1042 | unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12, page_shift = 12; | |
1043 | int ret; | |
1044 | ||
1045 | if (page_shift < dev_page_min) { | |
1b3c47c1 | 1046 | dev_err(ctrl->device, |
5fd4ce1b CH |
1047 | "Minimum device page size %u too large for host (%u)\n", |
1048 | 1 << dev_page_min, 1 << page_shift); | |
1049 | return -ENODEV; | |
1050 | } | |
1051 | ||
1052 | ctrl->page_size = 1 << page_shift; | |
1053 | ||
1054 | ctrl->ctrl_config = NVME_CC_CSS_NVM; | |
1055 | ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT; | |
1056 | ctrl->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; | |
1057 | ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; | |
1058 | ctrl->ctrl_config |= NVME_CC_ENABLE; | |
1059 | ||
1060 | ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); | |
1061 | if (ret) | |
1062 | return ret; | |
1063 | return nvme_wait_ready(ctrl, cap, true); | |
1064 | } | |
576d55d6 | 1065 | EXPORT_SYMBOL_GPL(nvme_enable_ctrl); |
5fd4ce1b CH |
1066 | |
1067 | int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) | |
1068 | { | |
1069 | unsigned long timeout = SHUTDOWN_TIMEOUT + jiffies; | |
1070 | u32 csts; | |
1071 | int ret; | |
1072 | ||
1073 | ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; | |
1074 | ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; | |
1075 | ||
1076 | ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); | |
1077 | if (ret) | |
1078 | return ret; | |
1079 | ||
1080 | while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { | |
1081 | if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT) | |
1082 | break; | |
1083 | ||
1084 | msleep(100); | |
1085 | if (fatal_signal_pending(current)) | |
1086 | return -EINTR; | |
1087 | if (time_after(jiffies, timeout)) { | |
1b3c47c1 | 1088 | dev_err(ctrl->device, |
5fd4ce1b CH |
1089 | "Device shutdown incomplete; abort shutdown\n"); |
1090 | return -ENODEV; | |
1091 | } | |
1092 | } | |
1093 | ||
1094 | return ret; | |
1095 | } | |
576d55d6 | 1096 | EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl); |
5fd4ce1b | 1097 | |
da35825d CH |
1098 | static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, |
1099 | struct request_queue *q) | |
1100 | { | |
7c88cb00 JA |
1101 | bool vwc = false; |
1102 | ||
da35825d | 1103 | if (ctrl->max_hw_sectors) { |
45686b61 CH |
1104 | u32 max_segments = |
1105 | (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1; | |
1106 | ||
da35825d | 1107 | blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); |
45686b61 | 1108 | blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); |
da35825d CH |
1109 | } |
1110 | if (ctrl->stripe_size) | |
1111 | blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9); | |
da35825d | 1112 | blk_queue_virt_boundary(q, ctrl->page_size - 1); |
7c88cb00 JA |
1113 | if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) |
1114 | vwc = true; | |
1115 | blk_queue_write_cache(q, vwc, vwc); | |
da35825d CH |
1116 | } |
1117 | ||
7fd8930f CH |
1118 | /* |
1119 | * Initialize the cached copies of the Identify data and various controller | |
1120 | * register in our nvme_ctrl structure. This should be called as soon as | |
1121 | * the admin queue is fully up and running. | |
1122 | */ | |
1123 | int nvme_init_identify(struct nvme_ctrl *ctrl) | |
1124 | { | |
1125 | struct nvme_id_ctrl *id; | |
1126 | u64 cap; | |
1127 | int ret, page_shift; | |
a229dbf6 | 1128 | u32 max_hw_sectors; |
7fd8930f | 1129 | |
f3ca80fc CH |
1130 | ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); |
1131 | if (ret) { | |
1b3c47c1 | 1132 | dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); |
f3ca80fc CH |
1133 | return ret; |
1134 | } | |
1135 | ||
7fd8930f CH |
1136 | ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap); |
1137 | if (ret) { | |
1b3c47c1 | 1138 | dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); |
7fd8930f CH |
1139 | return ret; |
1140 | } | |
1141 | page_shift = NVME_CAP_MPSMIN(cap) + 12; | |
1142 | ||
f3ca80fc CH |
1143 | if (ctrl->vs >= NVME_VS(1, 1)) |
1144 | ctrl->subsystem = NVME_CAP_NSSRC(cap); | |
1145 | ||
7fd8930f CH |
1146 | ret = nvme_identify_ctrl(ctrl, &id); |
1147 | if (ret) { | |
1b3c47c1 | 1148 | dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); |
7fd8930f CH |
1149 | return -EIO; |
1150 | } | |
1151 | ||
118472ab | 1152 | ctrl->vid = le16_to_cpu(id->vid); |
7fd8930f | 1153 | ctrl->oncs = le16_to_cpup(&id->oncs); |
6bf25d16 | 1154 | atomic_set(&ctrl->abort_limit, id->acl + 1); |
7fd8930f | 1155 | ctrl->vwc = id->vwc; |
931e1c22 | 1156 | ctrl->cntlid = le16_to_cpup(&id->cntlid); |
7fd8930f CH |
1157 | memcpy(ctrl->serial, id->sn, sizeof(id->sn)); |
1158 | memcpy(ctrl->model, id->mn, sizeof(id->mn)); | |
1159 | memcpy(ctrl->firmware_rev, id->fr, sizeof(id->fr)); | |
1160 | if (id->mdts) | |
a229dbf6 | 1161 | max_hw_sectors = 1 << (id->mdts + page_shift - 9); |
7fd8930f | 1162 | else |
a229dbf6 CH |
1163 | max_hw_sectors = UINT_MAX; |
1164 | ctrl->max_hw_sectors = | |
1165 | min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); | |
7fd8930f CH |
1166 | |
1167 | if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && id->vs[3]) { | |
1168 | unsigned int max_hw_sectors; | |
1169 | ||
1170 | ctrl->stripe_size = 1 << (id->vs[3] + page_shift); | |
1171 | max_hw_sectors = ctrl->stripe_size >> (page_shift - 9); | |
1172 | if (ctrl->max_hw_sectors) { | |
1173 | ctrl->max_hw_sectors = min(max_hw_sectors, | |
1174 | ctrl->max_hw_sectors); | |
1175 | } else { | |
1176 | ctrl->max_hw_sectors = max_hw_sectors; | |
1177 | } | |
1178 | } | |
1179 | ||
da35825d CH |
1180 | nvme_set_queue_limits(ctrl, ctrl->admin_q); |
1181 | ||
7fd8930f CH |
1182 | kfree(id); |
1183 | return 0; | |
1184 | } | |
576d55d6 | 1185 | EXPORT_SYMBOL_GPL(nvme_init_identify); |
7fd8930f | 1186 | |
f3ca80fc | 1187 | static int nvme_dev_open(struct inode *inode, struct file *file) |
1673f1f0 | 1188 | { |
f3ca80fc CH |
1189 | struct nvme_ctrl *ctrl; |
1190 | int instance = iminor(inode); | |
1191 | int ret = -ENODEV; | |
1673f1f0 | 1192 | |
f3ca80fc CH |
1193 | spin_lock(&dev_list_lock); |
1194 | list_for_each_entry(ctrl, &nvme_ctrl_list, node) { | |
1195 | if (ctrl->instance != instance) | |
1196 | continue; | |
1197 | ||
1198 | if (!ctrl->admin_q) { | |
1199 | ret = -EWOULDBLOCK; | |
1200 | break; | |
1201 | } | |
1202 | if (!kref_get_unless_zero(&ctrl->kref)) | |
1203 | break; | |
1204 | file->private_data = ctrl; | |
1205 | ret = 0; | |
1206 | break; | |
1207 | } | |
1208 | spin_unlock(&dev_list_lock); | |
1209 | ||
1210 | return ret; | |
1673f1f0 CH |
1211 | } |
1212 | ||
f3ca80fc | 1213 | static int nvme_dev_release(struct inode *inode, struct file *file) |
1673f1f0 | 1214 | { |
f3ca80fc CH |
1215 | nvme_put_ctrl(file->private_data); |
1216 | return 0; | |
1217 | } | |
1218 | ||
bfd89471 CH |
1219 | static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) |
1220 | { | |
1221 | struct nvme_ns *ns; | |
1222 | int ret; | |
1223 | ||
1224 | mutex_lock(&ctrl->namespaces_mutex); | |
1225 | if (list_empty(&ctrl->namespaces)) { | |
1226 | ret = -ENOTTY; | |
1227 | goto out_unlock; | |
1228 | } | |
1229 | ||
1230 | ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); | |
1231 | if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { | |
1b3c47c1 | 1232 | dev_warn(ctrl->device, |
bfd89471 CH |
1233 | "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n"); |
1234 | ret = -EINVAL; | |
1235 | goto out_unlock; | |
1236 | } | |
1237 | ||
1b3c47c1 | 1238 | dev_warn(ctrl->device, |
bfd89471 CH |
1239 | "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n"); |
1240 | kref_get(&ns->kref); | |
1241 | mutex_unlock(&ctrl->namespaces_mutex); | |
1242 | ||
1243 | ret = nvme_user_cmd(ctrl, ns, argp); | |
1244 | nvme_put_ns(ns); | |
1245 | return ret; | |
1246 | ||
1247 | out_unlock: | |
1248 | mutex_unlock(&ctrl->namespaces_mutex); | |
1249 | return ret; | |
1250 | } | |
1251 | ||
f3ca80fc CH |
1252 | static long nvme_dev_ioctl(struct file *file, unsigned int cmd, |
1253 | unsigned long arg) | |
1254 | { | |
1255 | struct nvme_ctrl *ctrl = file->private_data; | |
1256 | void __user *argp = (void __user *)arg; | |
f3ca80fc CH |
1257 | |
1258 | switch (cmd) { | |
1259 | case NVME_IOCTL_ADMIN_CMD: | |
1260 | return nvme_user_cmd(ctrl, NULL, argp); | |
1261 | case NVME_IOCTL_IO_CMD: | |
bfd89471 | 1262 | return nvme_dev_user_cmd(ctrl, argp); |
f3ca80fc | 1263 | case NVME_IOCTL_RESET: |
1b3c47c1 | 1264 | dev_warn(ctrl->device, "resetting controller\n"); |
f3ca80fc CH |
1265 | return ctrl->ops->reset_ctrl(ctrl); |
1266 | case NVME_IOCTL_SUBSYS_RESET: | |
1267 | return nvme_reset_subsystem(ctrl); | |
9ec3bb2f KB |
1268 | case NVME_IOCTL_RESCAN: |
1269 | nvme_queue_scan(ctrl); | |
1270 | return 0; | |
f3ca80fc CH |
1271 | default: |
1272 | return -ENOTTY; | |
1273 | } | |
1274 | } | |
1275 | ||
1276 | static const struct file_operations nvme_dev_fops = { | |
1277 | .owner = THIS_MODULE, | |
1278 | .open = nvme_dev_open, | |
1279 | .release = nvme_dev_release, | |
1280 | .unlocked_ioctl = nvme_dev_ioctl, | |
1281 | .compat_ioctl = nvme_dev_ioctl, | |
1282 | }; | |
1283 | ||
1284 | static ssize_t nvme_sysfs_reset(struct device *dev, | |
1285 | struct device_attribute *attr, const char *buf, | |
1286 | size_t count) | |
1287 | { | |
1288 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); | |
1289 | int ret; | |
1290 | ||
1291 | ret = ctrl->ops->reset_ctrl(ctrl); | |
1292 | if (ret < 0) | |
1293 | return ret; | |
1294 | return count; | |
1673f1f0 | 1295 | } |
f3ca80fc | 1296 | static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); |
1673f1f0 | 1297 | |
9ec3bb2f KB |
1298 | static ssize_t nvme_sysfs_rescan(struct device *dev, |
1299 | struct device_attribute *attr, const char *buf, | |
1300 | size_t count) | |
1301 | { | |
1302 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); | |
1303 | ||
1304 | nvme_queue_scan(ctrl); | |
1305 | return count; | |
1306 | } | |
1307 | static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); | |
1308 | ||
118472ab KB |
1309 | static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, |
1310 | char *buf) | |
1311 | { | |
1312 | struct nvme_ns *ns = dev_to_disk(dev)->private_data; | |
1313 | struct nvme_ctrl *ctrl = ns->ctrl; | |
1314 | int serial_len = sizeof(ctrl->serial); | |
1315 | int model_len = sizeof(ctrl->model); | |
1316 | ||
1317 | if (memchr_inv(ns->uuid, 0, sizeof(ns->uuid))) | |
1318 | return sprintf(buf, "eui.%16phN\n", ns->uuid); | |
1319 | ||
1320 | if (memchr_inv(ns->eui, 0, sizeof(ns->eui))) | |
1321 | return sprintf(buf, "eui.%8phN\n", ns->eui); | |
1322 | ||
1323 | while (ctrl->serial[serial_len - 1] == ' ') | |
1324 | serial_len--; | |
1325 | while (ctrl->model[model_len - 1] == ' ') | |
1326 | model_len--; | |
1327 | ||
1328 | return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid, | |
1329 | serial_len, ctrl->serial, model_len, ctrl->model, ns->ns_id); | |
1330 | } | |
1331 | static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL); | |
1332 | ||
2b9b6e86 KB |
1333 | static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, |
1334 | char *buf) | |
1335 | { | |
1336 | struct nvme_ns *ns = dev_to_disk(dev)->private_data; | |
1337 | return sprintf(buf, "%pU\n", ns->uuid); | |
1338 | } | |
1339 | static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL); | |
1340 | ||
1341 | static ssize_t eui_show(struct device *dev, struct device_attribute *attr, | |
1342 | char *buf) | |
1343 | { | |
1344 | struct nvme_ns *ns = dev_to_disk(dev)->private_data; | |
1345 | return sprintf(buf, "%8phd\n", ns->eui); | |
1346 | } | |
1347 | static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL); | |
1348 | ||
1349 | static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, | |
1350 | char *buf) | |
1351 | { | |
1352 | struct nvme_ns *ns = dev_to_disk(dev)->private_data; | |
1353 | return sprintf(buf, "%d\n", ns->ns_id); | |
1354 | } | |
1355 | static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL); | |
1356 | ||
1357 | static struct attribute *nvme_ns_attrs[] = { | |
118472ab | 1358 | &dev_attr_wwid.attr, |
2b9b6e86 KB |
1359 | &dev_attr_uuid.attr, |
1360 | &dev_attr_eui.attr, | |
1361 | &dev_attr_nsid.attr, | |
1362 | NULL, | |
1363 | }; | |
1364 | ||
1365 | static umode_t nvme_attrs_are_visible(struct kobject *kobj, | |
1366 | struct attribute *a, int n) | |
1367 | { | |
1368 | struct device *dev = container_of(kobj, struct device, kobj); | |
1369 | struct nvme_ns *ns = dev_to_disk(dev)->private_data; | |
1370 | ||
1371 | if (a == &dev_attr_uuid.attr) { | |
1372 | if (!memchr_inv(ns->uuid, 0, sizeof(ns->uuid))) | |
1373 | return 0; | |
1374 | } | |
1375 | if (a == &dev_attr_eui.attr) { | |
1376 | if (!memchr_inv(ns->eui, 0, sizeof(ns->eui))) | |
1377 | return 0; | |
1378 | } | |
1379 | return a->mode; | |
1380 | } | |
1381 | ||
1382 | static const struct attribute_group nvme_ns_attr_group = { | |
1383 | .attrs = nvme_ns_attrs, | |
1384 | .is_visible = nvme_attrs_are_visible, | |
1385 | }; | |
1386 | ||
931e1c22 | 1387 | #define nvme_show_str_function(field) \ |
779ff756 KB |
1388 | static ssize_t field##_show(struct device *dev, \ |
1389 | struct device_attribute *attr, char *buf) \ | |
1390 | { \ | |
1391 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ | |
1392 | return sprintf(buf, "%.*s\n", (int)sizeof(ctrl->field), ctrl->field); \ | |
1393 | } \ | |
1394 | static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); | |
1395 | ||
931e1c22 ML |
1396 | #define nvme_show_int_function(field) \ |
1397 | static ssize_t field##_show(struct device *dev, \ | |
1398 | struct device_attribute *attr, char *buf) \ | |
1399 | { \ | |
1400 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ | |
1401 | return sprintf(buf, "%d\n", ctrl->field); \ | |
1402 | } \ | |
1403 | static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); | |
1404 | ||
1405 | nvme_show_str_function(model); | |
1406 | nvme_show_str_function(serial); | |
1407 | nvme_show_str_function(firmware_rev); | |
1408 | nvme_show_int_function(cntlid); | |
779ff756 KB |
1409 | |
1410 | static struct attribute *nvme_dev_attrs[] = { | |
1411 | &dev_attr_reset_controller.attr, | |
9ec3bb2f | 1412 | &dev_attr_rescan_controller.attr, |
779ff756 KB |
1413 | &dev_attr_model.attr, |
1414 | &dev_attr_serial.attr, | |
1415 | &dev_attr_firmware_rev.attr, | |
931e1c22 | 1416 | &dev_attr_cntlid.attr, |
779ff756 KB |
1417 | NULL |
1418 | }; | |
1419 | ||
1420 | static struct attribute_group nvme_dev_attrs_group = { | |
1421 | .attrs = nvme_dev_attrs, | |
1422 | }; | |
1423 | ||
1424 | static const struct attribute_group *nvme_dev_attr_groups[] = { | |
1425 | &nvme_dev_attrs_group, | |
1426 | NULL, | |
1427 | }; | |
1428 | ||
5bae7f73 CH |
1429 | static int ns_cmp(void *priv, struct list_head *a, struct list_head *b) |
1430 | { | |
1431 | struct nvme_ns *nsa = container_of(a, struct nvme_ns, list); | |
1432 | struct nvme_ns *nsb = container_of(b, struct nvme_ns, list); | |
1433 | ||
1434 | return nsa->ns_id - nsb->ns_id; | |
1435 | } | |
1436 | ||
1437 | static struct nvme_ns *nvme_find_ns(struct nvme_ctrl *ctrl, unsigned nsid) | |
1438 | { | |
1439 | struct nvme_ns *ns; | |
1440 | ||
69d3b8ac CH |
1441 | lockdep_assert_held(&ctrl->namespaces_mutex); |
1442 | ||
5bae7f73 CH |
1443 | list_for_each_entry(ns, &ctrl->namespaces, list) { |
1444 | if (ns->ns_id == nsid) | |
1445 | return ns; | |
1446 | if (ns->ns_id > nsid) | |
1447 | break; | |
1448 | } | |
1449 | return NULL; | |
1450 | } | |
1451 | ||
1452 | static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) | |
1453 | { | |
1454 | struct nvme_ns *ns; | |
1455 | struct gendisk *disk; | |
1456 | int node = dev_to_node(ctrl->dev); | |
1457 | ||
69d3b8ac CH |
1458 | lockdep_assert_held(&ctrl->namespaces_mutex); |
1459 | ||
5bae7f73 CH |
1460 | ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); |
1461 | if (!ns) | |
1462 | return; | |
1463 | ||
075790eb KB |
1464 | ns->instance = ida_simple_get(&ctrl->ns_ida, 1, 0, GFP_KERNEL); |
1465 | if (ns->instance < 0) | |
1466 | goto out_free_ns; | |
1467 | ||
5bae7f73 CH |
1468 | ns->queue = blk_mq_init_queue(ctrl->tagset); |
1469 | if (IS_ERR(ns->queue)) | |
075790eb | 1470 | goto out_release_instance; |
5bae7f73 CH |
1471 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); |
1472 | ns->queue->queuedata = ns; | |
1473 | ns->ctrl = ctrl; | |
1474 | ||
1475 | disk = alloc_disk_node(0, node); | |
1476 | if (!disk) | |
1477 | goto out_free_queue; | |
1478 | ||
1479 | kref_init(&ns->kref); | |
1480 | ns->ns_id = nsid; | |
1481 | ns->disk = disk; | |
1482 | ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */ | |
5bae7f73 | 1483 | |
da35825d | 1484 | |
5bae7f73 | 1485 | blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); |
da35825d | 1486 | nvme_set_queue_limits(ctrl, ns->queue); |
5bae7f73 CH |
1487 | |
1488 | disk->major = nvme_major; | |
1489 | disk->first_minor = 0; | |
1490 | disk->fops = &nvme_fops; | |
1491 | disk->private_data = ns; | |
1492 | disk->queue = ns->queue; | |
1493 | disk->driverfs_dev = ctrl->device; | |
1494 | disk->flags = GENHD_FL_EXT_DEVT; | |
075790eb | 1495 | sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, ns->instance); |
5bae7f73 | 1496 | |
5bae7f73 CH |
1497 | if (nvme_revalidate_disk(ns->disk)) |
1498 | goto out_free_disk; | |
1499 | ||
0bf77e9d | 1500 | list_add_tail_rcu(&ns->list, &ctrl->namespaces); |
5bae7f73 | 1501 | kref_get(&ctrl->kref); |
2b9b6e86 KB |
1502 | if (ns->type == NVME_NS_LIGHTNVM) |
1503 | return; | |
5bae7f73 | 1504 | |
2b9b6e86 KB |
1505 | add_disk(ns->disk); |
1506 | if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj, | |
1507 | &nvme_ns_attr_group)) | |
1508 | pr_warn("%s: failed to create sysfs group for identification\n", | |
1509 | ns->disk->disk_name); | |
5bae7f73 CH |
1510 | return; |
1511 | out_free_disk: | |
1512 | kfree(disk); | |
5bae7f73 CH |
1513 | out_free_queue: |
1514 | blk_cleanup_queue(ns->queue); | |
075790eb KB |
1515 | out_release_instance: |
1516 | ida_simple_remove(&ctrl->ns_ida, ns->instance); | |
5bae7f73 CH |
1517 | out_free_ns: |
1518 | kfree(ns); | |
1519 | } | |
1520 | ||
1521 | static void nvme_ns_remove(struct nvme_ns *ns) | |
1522 | { | |
b7b9c227 ML |
1523 | lockdep_assert_held(&ns->ctrl->namespaces_mutex); |
1524 | ||
646017a6 KB |
1525 | if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) |
1526 | return; | |
69d3b8ac | 1527 | |
5bae7f73 CH |
1528 | if (ns->disk->flags & GENHD_FL_UP) { |
1529 | if (blk_get_integrity(ns->disk)) | |
1530 | blk_integrity_unregister(ns->disk); | |
2b9b6e86 KB |
1531 | sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, |
1532 | &nvme_ns_attr_group); | |
5bae7f73 | 1533 | del_gendisk(ns->disk); |
5bae7f73 CH |
1534 | blk_mq_abort_requeue_list(ns->queue); |
1535 | blk_cleanup_queue(ns->queue); | |
1536 | } | |
1537 | list_del_init(&ns->list); | |
0bf77e9d | 1538 | synchronize_rcu(); |
5bae7f73 CH |
1539 | nvme_put_ns(ns); |
1540 | } | |
1541 | ||
540c801c KB |
1542 | static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid) |
1543 | { | |
1544 | struct nvme_ns *ns; | |
1545 | ||
1546 | ns = nvme_find_ns(ctrl, nsid); | |
1547 | if (ns) { | |
1548 | if (revalidate_disk(ns->disk)) | |
1549 | nvme_ns_remove(ns); | |
1550 | } else | |
1551 | nvme_alloc_ns(ctrl, nsid); | |
1552 | } | |
1553 | ||
47b0e50a SB |
1554 | static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, |
1555 | unsigned nsid) | |
1556 | { | |
1557 | struct nvme_ns *ns, *next; | |
1558 | ||
1559 | list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { | |
1560 | if (ns->ns_id > nsid) | |
1561 | nvme_ns_remove(ns); | |
1562 | } | |
1563 | } | |
1564 | ||
540c801c KB |
1565 | static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn) |
1566 | { | |
1567 | struct nvme_ns *ns; | |
1568 | __le32 *ns_list; | |
1569 | unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024); | |
1570 | int ret = 0; | |
1571 | ||
1572 | ns_list = kzalloc(0x1000, GFP_KERNEL); | |
1573 | if (!ns_list) | |
1574 | return -ENOMEM; | |
1575 | ||
1576 | for (i = 0; i < num_lists; i++) { | |
1577 | ret = nvme_identify_ns_list(ctrl, prev, ns_list); | |
1578 | if (ret) | |
47b0e50a | 1579 | goto free; |
540c801c KB |
1580 | |
1581 | for (j = 0; j < min(nn, 1024U); j++) { | |
1582 | nsid = le32_to_cpu(ns_list[j]); | |
1583 | if (!nsid) | |
1584 | goto out; | |
1585 | ||
1586 | nvme_validate_ns(ctrl, nsid); | |
1587 | ||
1588 | while (++prev < nsid) { | |
1589 | ns = nvme_find_ns(ctrl, prev); | |
1590 | if (ns) | |
1591 | nvme_ns_remove(ns); | |
1592 | } | |
1593 | } | |
1594 | nn -= j; | |
1595 | } | |
1596 | out: | |
47b0e50a SB |
1597 | nvme_remove_invalid_namespaces(ctrl, prev); |
1598 | free: | |
540c801c KB |
1599 | kfree(ns_list); |
1600 | return ret; | |
1601 | } | |
1602 | ||
5955be21 | 1603 | static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn) |
5bae7f73 | 1604 | { |
5bae7f73 CH |
1605 | unsigned i; |
1606 | ||
69d3b8ac CH |
1607 | lockdep_assert_held(&ctrl->namespaces_mutex); |
1608 | ||
540c801c KB |
1609 | for (i = 1; i <= nn; i++) |
1610 | nvme_validate_ns(ctrl, i); | |
1611 | ||
47b0e50a | 1612 | nvme_remove_invalid_namespaces(ctrl, nn); |
5bae7f73 CH |
1613 | } |
1614 | ||
5955be21 | 1615 | static void nvme_scan_work(struct work_struct *work) |
5bae7f73 | 1616 | { |
5955be21 CH |
1617 | struct nvme_ctrl *ctrl = |
1618 | container_of(work, struct nvme_ctrl, scan_work); | |
5bae7f73 | 1619 | struct nvme_id_ctrl *id; |
540c801c | 1620 | unsigned nn; |
5bae7f73 | 1621 | |
5955be21 CH |
1622 | if (ctrl->state != NVME_CTRL_LIVE) |
1623 | return; | |
1624 | ||
5bae7f73 CH |
1625 | if (nvme_identify_ctrl(ctrl, &id)) |
1626 | return; | |
540c801c | 1627 | |
69d3b8ac | 1628 | mutex_lock(&ctrl->namespaces_mutex); |
540c801c KB |
1629 | nn = le32_to_cpu(id->nn); |
1630 | if (ctrl->vs >= NVME_VS(1, 1) && | |
1631 | !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { | |
1632 | if (!nvme_scan_ns_list(ctrl, nn)) | |
1633 | goto done; | |
1634 | } | |
5955be21 | 1635 | nvme_scan_ns_sequential(ctrl, nn); |
540c801c KB |
1636 | done: |
1637 | list_sort(NULL, &ctrl->namespaces, ns_cmp); | |
69d3b8ac | 1638 | mutex_unlock(&ctrl->namespaces_mutex); |
5bae7f73 | 1639 | kfree(id); |
5955be21 CH |
1640 | |
1641 | if (ctrl->ops->post_scan) | |
1642 | ctrl->ops->post_scan(ctrl); | |
5bae7f73 | 1643 | } |
5955be21 CH |
1644 | |
1645 | void nvme_queue_scan(struct nvme_ctrl *ctrl) | |
1646 | { | |
1647 | /* | |
1648 | * Do not queue new scan work when a controller is reset during | |
1649 | * removal. | |
1650 | */ | |
1651 | if (ctrl->state == NVME_CTRL_LIVE) | |
1652 | schedule_work(&ctrl->scan_work); | |
1653 | } | |
1654 | EXPORT_SYMBOL_GPL(nvme_queue_scan); | |
5bae7f73 CH |
1655 | |
1656 | void nvme_remove_namespaces(struct nvme_ctrl *ctrl) | |
1657 | { | |
1658 | struct nvme_ns *ns, *next; | |
1659 | ||
0ff9d4e1 KB |
1660 | /* |
1661 | * The dead states indicates the controller was not gracefully | |
1662 | * disconnected. In that case, we won't be able to flush any data while | |
1663 | * removing the namespaces' disks; fail all the queues now to avoid | |
1664 | * potentially having to clean up the failed sync later. | |
1665 | */ | |
1666 | if (ctrl->state == NVME_CTRL_DEAD) | |
1667 | nvme_kill_queues(ctrl); | |
1668 | ||
b7b9c227 | 1669 | mutex_lock(&ctrl->namespaces_mutex); |
5bae7f73 CH |
1670 | list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) |
1671 | nvme_ns_remove(ns); | |
b7b9c227 | 1672 | mutex_unlock(&ctrl->namespaces_mutex); |
5bae7f73 | 1673 | } |
576d55d6 | 1674 | EXPORT_SYMBOL_GPL(nvme_remove_namespaces); |
5bae7f73 | 1675 | |
f866fc42 CH |
1676 | static void nvme_async_event_work(struct work_struct *work) |
1677 | { | |
1678 | struct nvme_ctrl *ctrl = | |
1679 | container_of(work, struct nvme_ctrl, async_event_work); | |
1680 | ||
1681 | spin_lock_irq(&ctrl->lock); | |
1682 | while (ctrl->event_limit > 0) { | |
1683 | int aer_idx = --ctrl->event_limit; | |
1684 | ||
1685 | spin_unlock_irq(&ctrl->lock); | |
1686 | ctrl->ops->submit_async_event(ctrl, aer_idx); | |
1687 | spin_lock_irq(&ctrl->lock); | |
1688 | } | |
1689 | spin_unlock_irq(&ctrl->lock); | |
1690 | } | |
1691 | ||
1692 | void nvme_complete_async_event(struct nvme_ctrl *ctrl, | |
1693 | struct nvme_completion *cqe) | |
1694 | { | |
1695 | u16 status = le16_to_cpu(cqe->status) >> 1; | |
1696 | u32 result = le32_to_cpu(cqe->result); | |
1697 | ||
1698 | if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) { | |
1699 | ++ctrl->event_limit; | |
1700 | schedule_work(&ctrl->async_event_work); | |
1701 | } | |
1702 | ||
1703 | if (status != NVME_SC_SUCCESS) | |
1704 | return; | |
1705 | ||
1706 | switch (result & 0xff07) { | |
1707 | case NVME_AER_NOTICE_NS_CHANGED: | |
1708 | dev_info(ctrl->device, "rescanning\n"); | |
1709 | nvme_queue_scan(ctrl); | |
1710 | break; | |
1711 | default: | |
1712 | dev_warn(ctrl->device, "async event result %08x\n", result); | |
1713 | } | |
1714 | } | |
1715 | EXPORT_SYMBOL_GPL(nvme_complete_async_event); | |
1716 | ||
1717 | void nvme_queue_async_events(struct nvme_ctrl *ctrl) | |
1718 | { | |
1719 | ctrl->event_limit = NVME_NR_AERS; | |
1720 | schedule_work(&ctrl->async_event_work); | |
1721 | } | |
1722 | EXPORT_SYMBOL_GPL(nvme_queue_async_events); | |
1723 | ||
f3ca80fc CH |
1724 | static DEFINE_IDA(nvme_instance_ida); |
1725 | ||
1726 | static int nvme_set_instance(struct nvme_ctrl *ctrl) | |
1727 | { | |
1728 | int instance, error; | |
1729 | ||
1730 | do { | |
1731 | if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL)) | |
1732 | return -ENODEV; | |
1733 | ||
1734 | spin_lock(&dev_list_lock); | |
1735 | error = ida_get_new(&nvme_instance_ida, &instance); | |
1736 | spin_unlock(&dev_list_lock); | |
1737 | } while (error == -EAGAIN); | |
1738 | ||
1739 | if (error) | |
1740 | return -ENODEV; | |
1741 | ||
1742 | ctrl->instance = instance; | |
1743 | return 0; | |
1744 | } | |
1745 | ||
1746 | static void nvme_release_instance(struct nvme_ctrl *ctrl) | |
1747 | { | |
1748 | spin_lock(&dev_list_lock); | |
1749 | ida_remove(&nvme_instance_ida, ctrl->instance); | |
1750 | spin_unlock(&dev_list_lock); | |
1751 | } | |
1752 | ||
53029b04 | 1753 | void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) |
576d55d6 | 1754 | { |
f866fc42 | 1755 | flush_work(&ctrl->async_event_work); |
5955be21 CH |
1756 | flush_work(&ctrl->scan_work); |
1757 | nvme_remove_namespaces(ctrl); | |
1758 | ||
53029b04 | 1759 | device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance)); |
f3ca80fc CH |
1760 | |
1761 | spin_lock(&dev_list_lock); | |
1762 | list_del(&ctrl->node); | |
1763 | spin_unlock(&dev_list_lock); | |
53029b04 | 1764 | } |
576d55d6 | 1765 | EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); |
53029b04 KB |
1766 | |
1767 | static void nvme_free_ctrl(struct kref *kref) | |
1768 | { | |
1769 | struct nvme_ctrl *ctrl = container_of(kref, struct nvme_ctrl, kref); | |
f3ca80fc CH |
1770 | |
1771 | put_device(ctrl->device); | |
1772 | nvme_release_instance(ctrl); | |
075790eb | 1773 | ida_destroy(&ctrl->ns_ida); |
f3ca80fc CH |
1774 | |
1775 | ctrl->ops->free_ctrl(ctrl); | |
1776 | } | |
1777 | ||
1778 | void nvme_put_ctrl(struct nvme_ctrl *ctrl) | |
1779 | { | |
1780 | kref_put(&ctrl->kref, nvme_free_ctrl); | |
1781 | } | |
576d55d6 | 1782 | EXPORT_SYMBOL_GPL(nvme_put_ctrl); |
f3ca80fc CH |
1783 | |
1784 | /* | |
1785 | * Initialize a NVMe controller structures. This needs to be called during | |
1786 | * earliest initialization so that we have the initialized structured around | |
1787 | * during probing. | |
1788 | */ | |
1789 | int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, | |
1790 | const struct nvme_ctrl_ops *ops, unsigned long quirks) | |
1791 | { | |
1792 | int ret; | |
1793 | ||
bb8d261e CH |
1794 | ctrl->state = NVME_CTRL_NEW; |
1795 | spin_lock_init(&ctrl->lock); | |
f3ca80fc | 1796 | INIT_LIST_HEAD(&ctrl->namespaces); |
69d3b8ac | 1797 | mutex_init(&ctrl->namespaces_mutex); |
f3ca80fc CH |
1798 | kref_init(&ctrl->kref); |
1799 | ctrl->dev = dev; | |
1800 | ctrl->ops = ops; | |
1801 | ctrl->quirks = quirks; | |
5955be21 | 1802 | INIT_WORK(&ctrl->scan_work, nvme_scan_work); |
f866fc42 | 1803 | INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); |
f3ca80fc CH |
1804 | |
1805 | ret = nvme_set_instance(ctrl); | |
1806 | if (ret) | |
1807 | goto out; | |
1808 | ||
779ff756 | 1809 | ctrl->device = device_create_with_groups(nvme_class, ctrl->dev, |
f3ca80fc | 1810 | MKDEV(nvme_char_major, ctrl->instance), |
f4f0f63e | 1811 | ctrl, nvme_dev_attr_groups, |
779ff756 | 1812 | "nvme%d", ctrl->instance); |
f3ca80fc CH |
1813 | if (IS_ERR(ctrl->device)) { |
1814 | ret = PTR_ERR(ctrl->device); | |
1815 | goto out_release_instance; | |
1816 | } | |
1817 | get_device(ctrl->device); | |
075790eb | 1818 | ida_init(&ctrl->ns_ida); |
f3ca80fc | 1819 | |
f3ca80fc CH |
1820 | spin_lock(&dev_list_lock); |
1821 | list_add_tail(&ctrl->node, &nvme_ctrl_list); | |
1822 | spin_unlock(&dev_list_lock); | |
1823 | ||
1824 | return 0; | |
f3ca80fc CH |
1825 | out_release_instance: |
1826 | nvme_release_instance(ctrl); | |
1827 | out: | |
1828 | return ret; | |
1829 | } | |
576d55d6 | 1830 | EXPORT_SYMBOL_GPL(nvme_init_ctrl); |
f3ca80fc | 1831 | |
69d9a99c KB |
1832 | /** |
1833 | * nvme_kill_queues(): Ends all namespace queues | |
1834 | * @ctrl: the dead controller that needs to end | |
1835 | * | |
1836 | * Call this function when the driver determines it is unable to get the | |
1837 | * controller in a state capable of servicing IO. | |
1838 | */ | |
1839 | void nvme_kill_queues(struct nvme_ctrl *ctrl) | |
1840 | { | |
1841 | struct nvme_ns *ns; | |
1842 | ||
0bf77e9d ML |
1843 | rcu_read_lock(); |
1844 | list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { | |
69d9a99c KB |
1845 | if (!kref_get_unless_zero(&ns->kref)) |
1846 | continue; | |
1847 | ||
1848 | /* | |
1849 | * Revalidating a dead namespace sets capacity to 0. This will | |
1850 | * end buffered writers dirtying pages that can't be synced. | |
1851 | */ | |
1852 | if (!test_and_set_bit(NVME_NS_DEAD, &ns->flags)) | |
1853 | revalidate_disk(ns->disk); | |
1854 | ||
1855 | blk_set_queue_dying(ns->queue); | |
1856 | blk_mq_abort_requeue_list(ns->queue); | |
1857 | blk_mq_start_stopped_hw_queues(ns->queue, true); | |
1858 | ||
1859 | nvme_put_ns(ns); | |
1860 | } | |
0bf77e9d | 1861 | rcu_read_unlock(); |
69d9a99c | 1862 | } |
237045fc | 1863 | EXPORT_SYMBOL_GPL(nvme_kill_queues); |
69d9a99c | 1864 | |
25646264 | 1865 | void nvme_stop_queues(struct nvme_ctrl *ctrl) |
363c9aac SG |
1866 | { |
1867 | struct nvme_ns *ns; | |
1868 | ||
0bf77e9d ML |
1869 | rcu_read_lock(); |
1870 | list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { | |
363c9aac SG |
1871 | spin_lock_irq(ns->queue->queue_lock); |
1872 | queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue); | |
1873 | spin_unlock_irq(ns->queue->queue_lock); | |
1874 | ||
1875 | blk_mq_cancel_requeue_work(ns->queue); | |
1876 | blk_mq_stop_hw_queues(ns->queue); | |
1877 | } | |
0bf77e9d | 1878 | rcu_read_unlock(); |
363c9aac | 1879 | } |
576d55d6 | 1880 | EXPORT_SYMBOL_GPL(nvme_stop_queues); |
363c9aac | 1881 | |
25646264 | 1882 | void nvme_start_queues(struct nvme_ctrl *ctrl) |
363c9aac SG |
1883 | { |
1884 | struct nvme_ns *ns; | |
1885 | ||
0bf77e9d ML |
1886 | rcu_read_lock(); |
1887 | list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { | |
363c9aac | 1888 | queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue); |
363c9aac SG |
1889 | blk_mq_start_stopped_hw_queues(ns->queue, true); |
1890 | blk_mq_kick_requeue_list(ns->queue); | |
1891 | } | |
0bf77e9d | 1892 | rcu_read_unlock(); |
363c9aac | 1893 | } |
576d55d6 | 1894 | EXPORT_SYMBOL_GPL(nvme_start_queues); |
363c9aac | 1895 | |
5bae7f73 CH |
1896 | int __init nvme_core_init(void) |
1897 | { | |
1898 | int result; | |
1899 | ||
1900 | result = register_blkdev(nvme_major, "nvme"); | |
1901 | if (result < 0) | |
1902 | return result; | |
1903 | else if (result > 0) | |
1904 | nvme_major = result; | |
1905 | ||
f3ca80fc CH |
1906 | result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme", |
1907 | &nvme_dev_fops); | |
1908 | if (result < 0) | |
1909 | goto unregister_blkdev; | |
1910 | else if (result > 0) | |
1911 | nvme_char_major = result; | |
1912 | ||
1913 | nvme_class = class_create(THIS_MODULE, "nvme"); | |
1914 | if (IS_ERR(nvme_class)) { | |
1915 | result = PTR_ERR(nvme_class); | |
1916 | goto unregister_chrdev; | |
1917 | } | |
1918 | ||
5bae7f73 | 1919 | return 0; |
f3ca80fc CH |
1920 | |
1921 | unregister_chrdev: | |
1922 | __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme"); | |
1923 | unregister_blkdev: | |
1924 | unregister_blkdev(nvme_major, "nvme"); | |
1925 | return result; | |
5bae7f73 CH |
1926 | } |
1927 | ||
1928 | void nvme_core_exit(void) | |
1929 | { | |
f3ca80fc CH |
1930 | class_destroy(nvme_class); |
1931 | __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme"); | |
23bd63ce | 1932 | unregister_blkdev(nvme_major, "nvme"); |
5bae7f73 | 1933 | } |
576d55d6 ML |
1934 | |
1935 | MODULE_LICENSE("GPL"); | |
1936 | MODULE_VERSION("1.0"); | |
1937 | module_init(nvme_core_init); | |
1938 | module_exit(nvme_core_exit); |