1 // SPDX-License-Identifier: GPL-2.0
3 * NVM Express device driver
4 * Copyright (c) 2011-2014, Intel Corporation.
8 #include <linux/async.h>
9 #include <linux/blkdev.h>
10 #include <linux/blk-mq.h>
11 #include <linux/blk-mq-pci.h>
12 #include <linux/dmi.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/once.h>
20 #include <linux/pci.h>
21 #include <linux/suspend.h>
22 #include <linux/t10-pi.h>
23 #include <linux/types.h>
24 #include <linux/io-64-nonatomic-lo-hi.h>
25 #include <linux/sed-opal.h>
26 #include <linux/pci-p2pdma.h>
31 #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
32 #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
34 #define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
37 * These can be higher, but we need to ensure that any command doesn't
38 * require an sg allocation that needs more than a page of data.
40 #define NVME_MAX_KB_SZ 4096
41 #define NVME_MAX_SEGS 127
43 static int use_threaded_interrupts
;
44 module_param(use_threaded_interrupts
, int, 0);
46 static bool use_cmb_sqes
= true;
47 module_param(use_cmb_sqes
, bool, 0444);
48 MODULE_PARM_DESC(use_cmb_sqes
, "use controller's memory buffer for I/O SQes");
50 static unsigned int max_host_mem_size_mb
= 128;
51 module_param(max_host_mem_size_mb
, uint
, 0444);
52 MODULE_PARM_DESC(max_host_mem_size_mb
,
53 "Maximum Host Memory Buffer (HMB) size per controller (in MiB)");
55 static unsigned int sgl_threshold
= SZ_32K
;
56 module_param(sgl_threshold
, uint
, 0644);
57 MODULE_PARM_DESC(sgl_threshold
,
58 "Use SGLs when average request segment size is larger or equal to "
59 "this size. Use 0 to disable SGLs.");
61 static int io_queue_depth_set(const char *val
, const struct kernel_param
*kp
);
62 static const struct kernel_param_ops io_queue_depth_ops
= {
63 .set
= io_queue_depth_set
,
67 static int io_queue_depth
= 1024;
68 module_param_cb(io_queue_depth
, &io_queue_depth_ops
, &io_queue_depth
, 0644);
69 MODULE_PARM_DESC(io_queue_depth
, "set io queue depth, should >= 2");
71 static int io_queue_count_set(const char *val
, const struct kernel_param
*kp
)
76 ret
= kstrtouint(val
, 10, &n
);
77 if (ret
!= 0 || n
> num_possible_cpus())
79 return param_set_uint(val
, kp
);
82 static const struct kernel_param_ops io_queue_count_ops
= {
83 .set
= io_queue_count_set
,
84 .get
= param_get_uint
,
87 static unsigned int write_queues
;
88 module_param_cb(write_queues
, &io_queue_count_ops
, &write_queues
, 0644);
89 MODULE_PARM_DESC(write_queues
,
90 "Number of queues to use for writes. If not set, reads and writes "
91 "will share a queue set.");
93 static unsigned int poll_queues
;
94 module_param_cb(poll_queues
, &io_queue_count_ops
, &poll_queues
, 0644);
95 MODULE_PARM_DESC(poll_queues
, "Number of queues to use for polled IO.");
100 static void nvme_dev_disable(struct nvme_dev
*dev
, bool shutdown
);
101 static bool __nvme_disable_io_queues(struct nvme_dev
*dev
, u8 opcode
);
104 * Represents an NVM Express device. Each nvme_dev is a PCI function.
107 struct nvme_queue
*queues
;
108 struct blk_mq_tag_set tagset
;
109 struct blk_mq_tag_set admin_tagset
;
112 struct dma_pool
*prp_page_pool
;
113 struct dma_pool
*prp_small_pool
;
114 unsigned online_queues
;
116 unsigned io_queues
[HCTX_MAX_TYPES
];
117 unsigned int num_vecs
;
122 unsigned long bar_mapped_size
;
123 struct work_struct remove_work
;
124 struct mutex shutdown_lock
;
130 struct nvme_ctrl ctrl
;
133 mempool_t
*iod_mempool
;
135 /* shadow doorbell buffer support: */
137 dma_addr_t dbbuf_dbs_dma_addr
;
139 dma_addr_t dbbuf_eis_dma_addr
;
141 /* host memory buffer support: */
143 u32 nr_host_mem_descs
;
144 dma_addr_t host_mem_descs_dma
;
145 struct nvme_host_mem_buf_desc
*host_mem_descs
;
146 void **host_mem_desc_bufs
;
147 unsigned int nr_allocated_queues
;
148 unsigned int nr_write_queues
;
149 unsigned int nr_poll_queues
;
152 static int io_queue_depth_set(const char *val
, const struct kernel_param
*kp
)
156 ret
= kstrtoint(val
, 10, &n
);
157 if (ret
!= 0 || n
< 2)
160 return param_set_int(val
, kp
);
163 static inline unsigned int sq_idx(unsigned int qid
, u32 stride
)
165 return qid
* 2 * stride
;
168 static inline unsigned int cq_idx(unsigned int qid
, u32 stride
)
170 return (qid
* 2 + 1) * stride
;
173 static inline struct nvme_dev
*to_nvme_dev(struct nvme_ctrl
*ctrl
)
175 return container_of(ctrl
, struct nvme_dev
, ctrl
);
179 * An NVM Express queue. Each device has at least two (one for admin
180 * commands and one for I/O commands).
183 struct nvme_dev
*dev
;
186 /* only used for poll queues: */
187 spinlock_t cq_poll_lock ____cacheline_aligned_in_smp
;
188 struct nvme_completion
*cqes
;
189 dma_addr_t sq_dma_addr
;
190 dma_addr_t cq_dma_addr
;
200 #define NVMEQ_ENABLED 0
201 #define NVMEQ_SQ_CMB 1
202 #define NVMEQ_DELETE_ERROR 2
203 #define NVMEQ_POLLED 3
208 struct completion delete_done
;
212 * The nvme_iod describes the data in an I/O.
214 * The sg pointer contains the list of PRP/SGL chunk allocations in addition
215 * to the actual struct scatterlist.
218 struct nvme_request req
;
219 struct nvme_queue
*nvmeq
;
222 int npages
; /* In the PRP list. 0 means small pool in use */
223 int nents
; /* Used in scatterlist */
224 dma_addr_t first_dma
;
225 unsigned int dma_len
; /* length of single DMA segment mapping */
227 struct scatterlist
*sg
;
230 static inline unsigned int nvme_dbbuf_size(struct nvme_dev
*dev
)
232 return dev
->nr_allocated_queues
* 8 * dev
->db_stride
;
235 static int nvme_dbbuf_dma_alloc(struct nvme_dev
*dev
)
237 unsigned int mem_size
= nvme_dbbuf_size(dev
);
242 dev
->dbbuf_dbs
= dma_alloc_coherent(dev
->dev
, mem_size
,
243 &dev
->dbbuf_dbs_dma_addr
,
247 dev
->dbbuf_eis
= dma_alloc_coherent(dev
->dev
, mem_size
,
248 &dev
->dbbuf_eis_dma_addr
,
250 if (!dev
->dbbuf_eis
) {
251 dma_free_coherent(dev
->dev
, mem_size
,
252 dev
->dbbuf_dbs
, dev
->dbbuf_dbs_dma_addr
);
253 dev
->dbbuf_dbs
= NULL
;
260 static void nvme_dbbuf_dma_free(struct nvme_dev
*dev
)
262 unsigned int mem_size
= nvme_dbbuf_size(dev
);
264 if (dev
->dbbuf_dbs
) {
265 dma_free_coherent(dev
->dev
, mem_size
,
266 dev
->dbbuf_dbs
, dev
->dbbuf_dbs_dma_addr
);
267 dev
->dbbuf_dbs
= NULL
;
269 if (dev
->dbbuf_eis
) {
270 dma_free_coherent(dev
->dev
, mem_size
,
271 dev
->dbbuf_eis
, dev
->dbbuf_eis_dma_addr
);
272 dev
->dbbuf_eis
= NULL
;
276 static void nvme_dbbuf_init(struct nvme_dev
*dev
,
277 struct nvme_queue
*nvmeq
, int qid
)
279 if (!dev
->dbbuf_dbs
|| !qid
)
282 nvmeq
->dbbuf_sq_db
= &dev
->dbbuf_dbs
[sq_idx(qid
, dev
->db_stride
)];
283 nvmeq
->dbbuf_cq_db
= &dev
->dbbuf_dbs
[cq_idx(qid
, dev
->db_stride
)];
284 nvmeq
->dbbuf_sq_ei
= &dev
->dbbuf_eis
[sq_idx(qid
, dev
->db_stride
)];
285 nvmeq
->dbbuf_cq_ei
= &dev
->dbbuf_eis
[cq_idx(qid
, dev
->db_stride
)];
288 static void nvme_dbbuf_set(struct nvme_dev
*dev
)
290 struct nvme_command c
;
295 memset(&c
, 0, sizeof(c
));
296 c
.dbbuf
.opcode
= nvme_admin_dbbuf
;
297 c
.dbbuf
.prp1
= cpu_to_le64(dev
->dbbuf_dbs_dma_addr
);
298 c
.dbbuf
.prp2
= cpu_to_le64(dev
->dbbuf_eis_dma_addr
);
300 if (nvme_submit_sync_cmd(dev
->ctrl
.admin_q
, &c
, NULL
, 0)) {
301 dev_warn(dev
->ctrl
.device
, "unable to set dbbuf\n");
302 /* Free memory and continue on */
303 nvme_dbbuf_dma_free(dev
);
307 static inline int nvme_dbbuf_need_event(u16 event_idx
, u16 new_idx
, u16 old
)
309 return (u16
)(new_idx
- event_idx
- 1) < (u16
)(new_idx
- old
);
312 /* Update dbbuf and return true if an MMIO is required */
313 static bool nvme_dbbuf_update_and_check_event(u16 value
, u32
*dbbuf_db
,
314 volatile u32
*dbbuf_ei
)
320 * Ensure that the queue is written before updating
321 * the doorbell in memory
325 old_value
= *dbbuf_db
;
329 * Ensure that the doorbell is updated before reading the event
330 * index from memory. The controller needs to provide similar
331 * ordering to ensure the envent index is updated before reading
336 if (!nvme_dbbuf_need_event(*dbbuf_ei
, value
, old_value
))
344 * Will slightly overestimate the number of pages needed. This is OK
345 * as it only leads to a small amount of wasted memory for the lifetime of
348 static int nvme_npages(unsigned size
, struct nvme_dev
*dev
)
350 unsigned nprps
= DIV_ROUND_UP(size
+ dev
->ctrl
.page_size
,
351 dev
->ctrl
.page_size
);
352 return DIV_ROUND_UP(8 * nprps
, PAGE_SIZE
- 8);
356 * Calculates the number of pages needed for the SGL segments. For example a 4k
357 * page can accommodate 256 SGL descriptors.
359 static int nvme_pci_npages_sgl(unsigned int num_seg
)
361 return DIV_ROUND_UP(num_seg
* sizeof(struct nvme_sgl_desc
), PAGE_SIZE
);
364 static unsigned int nvme_pci_iod_alloc_size(struct nvme_dev
*dev
,
365 unsigned int size
, unsigned int nseg
, bool use_sgl
)
370 alloc_size
= sizeof(__le64
*) * nvme_pci_npages_sgl(nseg
);
372 alloc_size
= sizeof(__le64
*) * nvme_npages(size
, dev
);
374 return alloc_size
+ sizeof(struct scatterlist
) * nseg
;
377 static int nvme_admin_init_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
378 unsigned int hctx_idx
)
380 struct nvme_dev
*dev
= data
;
381 struct nvme_queue
*nvmeq
= &dev
->queues
[0];
383 WARN_ON(hctx_idx
!= 0);
384 WARN_ON(dev
->admin_tagset
.tags
[0] != hctx
->tags
);
386 hctx
->driver_data
= nvmeq
;
390 static int nvme_init_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
391 unsigned int hctx_idx
)
393 struct nvme_dev
*dev
= data
;
394 struct nvme_queue
*nvmeq
= &dev
->queues
[hctx_idx
+ 1];
396 WARN_ON(dev
->tagset
.tags
[hctx_idx
] != hctx
->tags
);
397 hctx
->driver_data
= nvmeq
;
401 static int nvme_init_request(struct blk_mq_tag_set
*set
, struct request
*req
,
402 unsigned int hctx_idx
, unsigned int numa_node
)
404 struct nvme_dev
*dev
= set
->driver_data
;
405 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
406 int queue_idx
= (set
== &dev
->tagset
) ? hctx_idx
+ 1 : 0;
407 struct nvme_queue
*nvmeq
= &dev
->queues
[queue_idx
];
412 nvme_req(req
)->ctrl
= &dev
->ctrl
;
416 static int queue_irq_offset(struct nvme_dev
*dev
)
418 /* if we have more than 1 vec, admin queue offsets us by 1 */
419 if (dev
->num_vecs
> 1)
425 static int nvme_pci_map_queues(struct blk_mq_tag_set
*set
)
427 struct nvme_dev
*dev
= set
->driver_data
;
430 offset
= queue_irq_offset(dev
);
431 for (i
= 0, qoff
= 0; i
< set
->nr_maps
; i
++) {
432 struct blk_mq_queue_map
*map
= &set
->map
[i
];
434 map
->nr_queues
= dev
->io_queues
[i
];
435 if (!map
->nr_queues
) {
436 BUG_ON(i
== HCTX_TYPE_DEFAULT
);
441 * The poll queue(s) doesn't have an IRQ (and hence IRQ
442 * affinity), so use the regular blk-mq cpu mapping
444 map
->queue_offset
= qoff
;
445 if (i
!= HCTX_TYPE_POLL
&& offset
)
446 blk_mq_pci_map_queues(map
, to_pci_dev(dev
->dev
), offset
);
448 blk_mq_map_queues(map
);
449 qoff
+= map
->nr_queues
;
450 offset
+= map
->nr_queues
;
456 static inline void nvme_write_sq_db(struct nvme_queue
*nvmeq
)
458 if (nvme_dbbuf_update_and_check_event(nvmeq
->sq_tail
,
459 nvmeq
->dbbuf_sq_db
, nvmeq
->dbbuf_sq_ei
))
460 writel(nvmeq
->sq_tail
, nvmeq
->q_db
);
464 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
465 * @nvmeq: The queue to use
466 * @cmd: The command to send
467 * @write_sq: whether to write to the SQ doorbell
469 static void nvme_submit_cmd(struct nvme_queue
*nvmeq
, struct nvme_command
*cmd
,
472 spin_lock(&nvmeq
->sq_lock
);
473 memcpy(nvmeq
->sq_cmds
+ (nvmeq
->sq_tail
<< nvmeq
->sqes
),
475 if (++nvmeq
->sq_tail
== nvmeq
->q_depth
)
478 nvme_write_sq_db(nvmeq
);
479 spin_unlock(&nvmeq
->sq_lock
);
482 static void nvme_commit_rqs(struct blk_mq_hw_ctx
*hctx
)
484 struct nvme_queue
*nvmeq
= hctx
->driver_data
;
486 spin_lock(&nvmeq
->sq_lock
);
487 nvme_write_sq_db(nvmeq
);
488 spin_unlock(&nvmeq
->sq_lock
);
491 static void **nvme_pci_iod_list(struct request
*req
)
493 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
494 return (void **)(iod
->sg
+ blk_rq_nr_phys_segments(req
));
497 static inline bool nvme_pci_use_sgls(struct nvme_dev
*dev
, struct request
*req
)
499 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
500 int nseg
= blk_rq_nr_phys_segments(req
);
501 unsigned int avg_seg_size
;
506 avg_seg_size
= DIV_ROUND_UP(blk_rq_payload_bytes(req
), nseg
);
508 if (!(dev
->ctrl
.sgls
& ((1 << 0) | (1 << 1))))
510 if (!iod
->nvmeq
->qid
)
512 if (!sgl_threshold
|| avg_seg_size
< sgl_threshold
)
517 static void nvme_unmap_data(struct nvme_dev
*dev
, struct request
*req
)
519 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
520 const int last_prp
= dev
->ctrl
.page_size
/ sizeof(__le64
) - 1;
521 dma_addr_t dma_addr
= iod
->first_dma
, next_dma_addr
;
525 dma_unmap_page(dev
->dev
, dma_addr
, iod
->dma_len
,
530 WARN_ON_ONCE(!iod
->nents
);
532 if (is_pci_p2pdma_page(sg_page(iod
->sg
)))
533 pci_p2pdma_unmap_sg(dev
->dev
, iod
->sg
, iod
->nents
,
536 dma_unmap_sg(dev
->dev
, iod
->sg
, iod
->nents
, rq_dma_dir(req
));
539 if (iod
->npages
== 0)
540 dma_pool_free(dev
->prp_small_pool
, nvme_pci_iod_list(req
)[0],
543 for (i
= 0; i
< iod
->npages
; i
++) {
544 void *addr
= nvme_pci_iod_list(req
)[i
];
547 struct nvme_sgl_desc
*sg_list
= addr
;
550 le64_to_cpu((sg_list
[SGES_PER_PAGE
- 1]).addr
);
552 __le64
*prp_list
= addr
;
554 next_dma_addr
= le64_to_cpu(prp_list
[last_prp
]);
557 dma_pool_free(dev
->prp_page_pool
, addr
, dma_addr
);
558 dma_addr
= next_dma_addr
;
561 mempool_free(iod
->sg
, dev
->iod_mempool
);
564 static void nvme_print_sgl(struct scatterlist
*sgl
, int nents
)
567 struct scatterlist
*sg
;
569 for_each_sg(sgl
, sg
, nents
, i
) {
570 dma_addr_t phys
= sg_phys(sg
);
571 pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
572 "dma_address:%pad dma_length:%d\n",
573 i
, &phys
, sg
->offset
, sg
->length
, &sg_dma_address(sg
),
578 static blk_status_t
nvme_pci_setup_prps(struct nvme_dev
*dev
,
579 struct request
*req
, struct nvme_rw_command
*cmnd
)
581 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
582 struct dma_pool
*pool
;
583 int length
= blk_rq_payload_bytes(req
);
584 struct scatterlist
*sg
= iod
->sg
;
585 int dma_len
= sg_dma_len(sg
);
586 u64 dma_addr
= sg_dma_address(sg
);
587 u32 page_size
= dev
->ctrl
.page_size
;
588 int offset
= dma_addr
& (page_size
- 1);
590 void **list
= nvme_pci_iod_list(req
);
594 length
-= (page_size
- offset
);
600 dma_len
-= (page_size
- offset
);
602 dma_addr
+= (page_size
- offset
);
605 dma_addr
= sg_dma_address(sg
);
606 dma_len
= sg_dma_len(sg
);
609 if (length
<= page_size
) {
610 iod
->first_dma
= dma_addr
;
614 nprps
= DIV_ROUND_UP(length
, page_size
);
615 if (nprps
<= (256 / 8)) {
616 pool
= dev
->prp_small_pool
;
619 pool
= dev
->prp_page_pool
;
623 prp_list
= dma_pool_alloc(pool
, GFP_ATOMIC
, &prp_dma
);
625 iod
->first_dma
= dma_addr
;
627 return BLK_STS_RESOURCE
;
630 iod
->first_dma
= prp_dma
;
633 if (i
== page_size
>> 3) {
634 __le64
*old_prp_list
= prp_list
;
635 prp_list
= dma_pool_alloc(pool
, GFP_ATOMIC
, &prp_dma
);
637 return BLK_STS_RESOURCE
;
638 list
[iod
->npages
++] = prp_list
;
639 prp_list
[0] = old_prp_list
[i
- 1];
640 old_prp_list
[i
- 1] = cpu_to_le64(prp_dma
);
643 prp_list
[i
++] = cpu_to_le64(dma_addr
);
644 dma_len
-= page_size
;
645 dma_addr
+= page_size
;
651 if (unlikely(dma_len
< 0))
654 dma_addr
= sg_dma_address(sg
);
655 dma_len
= sg_dma_len(sg
);
659 cmnd
->dptr
.prp1
= cpu_to_le64(sg_dma_address(iod
->sg
));
660 cmnd
->dptr
.prp2
= cpu_to_le64(iod
->first_dma
);
665 WARN(DO_ONCE(nvme_print_sgl
, iod
->sg
, iod
->nents
),
666 "Invalid SGL for payload:%d nents:%d\n",
667 blk_rq_payload_bytes(req
), iod
->nents
);
668 return BLK_STS_IOERR
;
671 static void nvme_pci_sgl_set_data(struct nvme_sgl_desc
*sge
,
672 struct scatterlist
*sg
)
674 sge
->addr
= cpu_to_le64(sg_dma_address(sg
));
675 sge
->length
= cpu_to_le32(sg_dma_len(sg
));
676 sge
->type
= NVME_SGL_FMT_DATA_DESC
<< 4;
679 static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc
*sge
,
680 dma_addr_t dma_addr
, int entries
)
682 sge
->addr
= cpu_to_le64(dma_addr
);
683 if (entries
< SGES_PER_PAGE
) {
684 sge
->length
= cpu_to_le32(entries
* sizeof(*sge
));
685 sge
->type
= NVME_SGL_FMT_LAST_SEG_DESC
<< 4;
687 sge
->length
= cpu_to_le32(PAGE_SIZE
);
688 sge
->type
= NVME_SGL_FMT_SEG_DESC
<< 4;
692 static blk_status_t
nvme_pci_setup_sgls(struct nvme_dev
*dev
,
693 struct request
*req
, struct nvme_rw_command
*cmd
, int entries
)
695 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
696 struct dma_pool
*pool
;
697 struct nvme_sgl_desc
*sg_list
;
698 struct scatterlist
*sg
= iod
->sg
;
702 /* setting the transfer type as SGL */
703 cmd
->flags
= NVME_CMD_SGL_METABUF
;
706 nvme_pci_sgl_set_data(&cmd
->dptr
.sgl
, sg
);
710 if (entries
<= (256 / sizeof(struct nvme_sgl_desc
))) {
711 pool
= dev
->prp_small_pool
;
714 pool
= dev
->prp_page_pool
;
718 sg_list
= dma_pool_alloc(pool
, GFP_ATOMIC
, &sgl_dma
);
721 return BLK_STS_RESOURCE
;
724 nvme_pci_iod_list(req
)[0] = sg_list
;
725 iod
->first_dma
= sgl_dma
;
727 nvme_pci_sgl_set_seg(&cmd
->dptr
.sgl
, sgl_dma
, entries
);
730 if (i
== SGES_PER_PAGE
) {
731 struct nvme_sgl_desc
*old_sg_desc
= sg_list
;
732 struct nvme_sgl_desc
*link
= &old_sg_desc
[i
- 1];
734 sg_list
= dma_pool_alloc(pool
, GFP_ATOMIC
, &sgl_dma
);
736 return BLK_STS_RESOURCE
;
739 nvme_pci_iod_list(req
)[iod
->npages
++] = sg_list
;
740 sg_list
[i
++] = *link
;
741 nvme_pci_sgl_set_seg(link
, sgl_dma
, entries
);
744 nvme_pci_sgl_set_data(&sg_list
[i
++], sg
);
746 } while (--entries
> 0);
751 static blk_status_t
nvme_setup_prp_simple(struct nvme_dev
*dev
,
752 struct request
*req
, struct nvme_rw_command
*cmnd
,
755 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
756 unsigned int offset
= bv
->bv_offset
& (dev
->ctrl
.page_size
- 1);
757 unsigned int first_prp_len
= dev
->ctrl
.page_size
- offset
;
759 iod
->first_dma
= dma_map_bvec(dev
->dev
, bv
, rq_dma_dir(req
), 0);
760 if (dma_mapping_error(dev
->dev
, iod
->first_dma
))
761 return BLK_STS_RESOURCE
;
762 iod
->dma_len
= bv
->bv_len
;
764 cmnd
->dptr
.prp1
= cpu_to_le64(iod
->first_dma
);
765 if (bv
->bv_len
> first_prp_len
)
766 cmnd
->dptr
.prp2
= cpu_to_le64(iod
->first_dma
+ first_prp_len
);
770 static blk_status_t
nvme_setup_sgl_simple(struct nvme_dev
*dev
,
771 struct request
*req
, struct nvme_rw_command
*cmnd
,
774 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
776 iod
->first_dma
= dma_map_bvec(dev
->dev
, bv
, rq_dma_dir(req
), 0);
777 if (dma_mapping_error(dev
->dev
, iod
->first_dma
))
778 return BLK_STS_RESOURCE
;
779 iod
->dma_len
= bv
->bv_len
;
781 cmnd
->flags
= NVME_CMD_SGL_METABUF
;
782 cmnd
->dptr
.sgl
.addr
= cpu_to_le64(iod
->first_dma
);
783 cmnd
->dptr
.sgl
.length
= cpu_to_le32(iod
->dma_len
);
784 cmnd
->dptr
.sgl
.type
= NVME_SGL_FMT_DATA_DESC
<< 4;
788 static blk_status_t
nvme_map_data(struct nvme_dev
*dev
, struct request
*req
,
789 struct nvme_command
*cmnd
)
791 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
792 blk_status_t ret
= BLK_STS_RESOURCE
;
795 if (blk_rq_nr_phys_segments(req
) == 1) {
796 struct bio_vec bv
= req_bvec(req
);
798 if (!is_pci_p2pdma_page(bv
.bv_page
)) {
799 if (bv
.bv_offset
+ bv
.bv_len
<= dev
->ctrl
.page_size
* 2)
800 return nvme_setup_prp_simple(dev
, req
,
803 if (iod
->nvmeq
->qid
&&
804 dev
->ctrl
.sgls
& ((1 << 0) | (1 << 1)))
805 return nvme_setup_sgl_simple(dev
, req
,
811 iod
->sg
= mempool_alloc(dev
->iod_mempool
, GFP_ATOMIC
);
813 return BLK_STS_RESOURCE
;
814 sg_init_table(iod
->sg
, blk_rq_nr_phys_segments(req
));
815 iod
->nents
= blk_rq_map_sg(req
->q
, req
, iod
->sg
);
819 if (is_pci_p2pdma_page(sg_page(iod
->sg
)))
820 nr_mapped
= pci_p2pdma_map_sg_attrs(dev
->dev
, iod
->sg
,
821 iod
->nents
, rq_dma_dir(req
), DMA_ATTR_NO_WARN
);
823 nr_mapped
= dma_map_sg_attrs(dev
->dev
, iod
->sg
, iod
->nents
,
824 rq_dma_dir(req
), DMA_ATTR_NO_WARN
);
828 iod
->use_sgl
= nvme_pci_use_sgls(dev
, req
);
830 ret
= nvme_pci_setup_sgls(dev
, req
, &cmnd
->rw
, nr_mapped
);
832 ret
= nvme_pci_setup_prps(dev
, req
, &cmnd
->rw
);
834 if (ret
!= BLK_STS_OK
)
835 nvme_unmap_data(dev
, req
);
839 static blk_status_t
nvme_map_metadata(struct nvme_dev
*dev
, struct request
*req
,
840 struct nvme_command
*cmnd
)
842 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
844 iod
->meta_dma
= dma_map_bvec(dev
->dev
, rq_integrity_vec(req
),
846 if (dma_mapping_error(dev
->dev
, iod
->meta_dma
))
847 return BLK_STS_IOERR
;
848 cmnd
->rw
.metadata
= cpu_to_le64(iod
->meta_dma
);
853 * NOTE: ns is NULL when called on the admin queue.
855 static blk_status_t
nvme_queue_rq(struct blk_mq_hw_ctx
*hctx
,
856 const struct blk_mq_queue_data
*bd
)
858 struct nvme_ns
*ns
= hctx
->queue
->queuedata
;
859 struct nvme_queue
*nvmeq
= hctx
->driver_data
;
860 struct nvme_dev
*dev
= nvmeq
->dev
;
861 struct request
*req
= bd
->rq
;
862 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
863 struct nvme_command cmnd
;
871 * We should not need to do this, but we're still using this to
872 * ensure we can drain requests on a dying queue.
874 if (unlikely(!test_bit(NVMEQ_ENABLED
, &nvmeq
->flags
)))
875 return BLK_STS_IOERR
;
877 ret
= nvme_setup_cmd(ns
, req
, &cmnd
);
881 if (blk_rq_nr_phys_segments(req
)) {
882 ret
= nvme_map_data(dev
, req
, &cmnd
);
887 if (blk_integrity_rq(req
)) {
888 ret
= nvme_map_metadata(dev
, req
, &cmnd
);
893 blk_mq_start_request(req
);
894 nvme_submit_cmd(nvmeq
, &cmnd
, bd
->last
);
897 nvme_unmap_data(dev
, req
);
899 nvme_cleanup_cmd(req
);
903 static void nvme_pci_complete_rq(struct request
*req
)
905 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
906 struct nvme_dev
*dev
= iod
->nvmeq
->dev
;
908 if (blk_integrity_rq(req
))
909 dma_unmap_page(dev
->dev
, iod
->meta_dma
,
910 rq_integrity_vec(req
)->bv_len
, rq_data_dir(req
));
911 if (blk_rq_nr_phys_segments(req
))
912 nvme_unmap_data(dev
, req
);
913 nvme_complete_rq(req
);
916 /* We read the CQE phase first to check if the rest of the entry is valid */
917 static inline bool nvme_cqe_pending(struct nvme_queue
*nvmeq
)
919 struct nvme_completion
*hcqe
= &nvmeq
->cqes
[nvmeq
->cq_head
];
921 return (le16_to_cpu(READ_ONCE(hcqe
->status
)) & 1) == nvmeq
->cq_phase
;
924 static inline void nvme_ring_cq_doorbell(struct nvme_queue
*nvmeq
)
926 u16 head
= nvmeq
->cq_head
;
928 if (nvme_dbbuf_update_and_check_event(head
, nvmeq
->dbbuf_cq_db
,
930 writel(head
, nvmeq
->q_db
+ nvmeq
->dev
->db_stride
);
933 static inline struct blk_mq_tags
*nvme_queue_tagset(struct nvme_queue
*nvmeq
)
936 return nvmeq
->dev
->admin_tagset
.tags
[0];
937 return nvmeq
->dev
->tagset
.tags
[nvmeq
->qid
- 1];
940 static inline void nvme_handle_cqe(struct nvme_queue
*nvmeq
, u16 idx
)
942 struct nvme_completion
*cqe
= &nvmeq
->cqes
[idx
];
945 if (unlikely(cqe
->command_id
>= nvmeq
->q_depth
)) {
946 dev_warn(nvmeq
->dev
->ctrl
.device
,
947 "invalid id %d completed on queue %d\n",
948 cqe
->command_id
, le16_to_cpu(cqe
->sq_id
));
953 * AEN requests are special as they don't time out and can
954 * survive any kind of queue freeze and often don't respond to
955 * aborts. We don't even bother to allocate a struct request
956 * for them but rather special case them here.
958 if (unlikely(nvme_is_aen_req(nvmeq
->qid
, cqe
->command_id
))) {
959 nvme_complete_async_event(&nvmeq
->dev
->ctrl
,
960 cqe
->status
, &cqe
->result
);
964 req
= blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq
), cqe
->command_id
);
965 trace_nvme_sq(req
, cqe
->sq_head
, nvmeq
->sq_tail
);
966 nvme_end_request(req
, cqe
->status
, cqe
->result
);
969 static inline void nvme_update_cq_head(struct nvme_queue
*nvmeq
)
971 u16 tmp
= nvmeq
->cq_head
+ 1;
973 if (tmp
== nvmeq
->q_depth
) {
975 nvmeq
->cq_phase
^= 1;
977 nvmeq
->cq_head
= tmp
;
981 static inline int nvme_process_cq(struct nvme_queue
*nvmeq
)
985 while (nvme_cqe_pending(nvmeq
)) {
987 nvme_handle_cqe(nvmeq
, nvmeq
->cq_head
);
988 nvme_update_cq_head(nvmeq
);
992 nvme_ring_cq_doorbell(nvmeq
);
996 static irqreturn_t
nvme_irq(int irq
, void *data
)
998 struct nvme_queue
*nvmeq
= data
;
999 irqreturn_t ret
= IRQ_NONE
;
1002 * The rmb/wmb pair ensures we see all updates from a previous run of
1003 * the irq handler, even if that was on another CPU.
1006 if (nvme_process_cq(nvmeq
))
1013 static irqreturn_t
nvme_irq_check(int irq
, void *data
)
1015 struct nvme_queue
*nvmeq
= data
;
1016 if (nvme_cqe_pending(nvmeq
))
1017 return IRQ_WAKE_THREAD
;
1022 * Poll for completions for any interrupt driven queue
1023 * Can be called from any context.
1025 static void nvme_poll_irqdisable(struct nvme_queue
*nvmeq
)
1027 struct pci_dev
*pdev
= to_pci_dev(nvmeq
->dev
->dev
);
1029 WARN_ON_ONCE(test_bit(NVMEQ_POLLED
, &nvmeq
->flags
));
1031 disable_irq(pci_irq_vector(pdev
, nvmeq
->cq_vector
));
1032 nvme_process_cq(nvmeq
);
1033 enable_irq(pci_irq_vector(pdev
, nvmeq
->cq_vector
));
1036 static int nvme_poll(struct blk_mq_hw_ctx
*hctx
)
1038 struct nvme_queue
*nvmeq
= hctx
->driver_data
;
1041 if (!nvme_cqe_pending(nvmeq
))
1044 spin_lock(&nvmeq
->cq_poll_lock
);
1045 found
= nvme_process_cq(nvmeq
);
1046 spin_unlock(&nvmeq
->cq_poll_lock
);
1051 static void nvme_pci_submit_async_event(struct nvme_ctrl
*ctrl
)
1053 struct nvme_dev
*dev
= to_nvme_dev(ctrl
);
1054 struct nvme_queue
*nvmeq
= &dev
->queues
[0];
1055 struct nvme_command c
;
1057 memset(&c
, 0, sizeof(c
));
1058 c
.common
.opcode
= nvme_admin_async_event
;
1059 c
.common
.command_id
= NVME_AQ_BLK_MQ_DEPTH
;
1060 nvme_submit_cmd(nvmeq
, &c
, true);
1063 static int adapter_delete_queue(struct nvme_dev
*dev
, u8 opcode
, u16 id
)
1065 struct nvme_command c
;
1067 memset(&c
, 0, sizeof(c
));
1068 c
.delete_queue
.opcode
= opcode
;
1069 c
.delete_queue
.qid
= cpu_to_le16(id
);
1071 return nvme_submit_sync_cmd(dev
->ctrl
.admin_q
, &c
, NULL
, 0);
1074 static int adapter_alloc_cq(struct nvme_dev
*dev
, u16 qid
,
1075 struct nvme_queue
*nvmeq
, s16 vector
)
1077 struct nvme_command c
;
1078 int flags
= NVME_QUEUE_PHYS_CONTIG
;
1080 if (!test_bit(NVMEQ_POLLED
, &nvmeq
->flags
))
1081 flags
|= NVME_CQ_IRQ_ENABLED
;
1084 * Note: we (ab)use the fact that the prp fields survive if no data
1085 * is attached to the request.
1087 memset(&c
, 0, sizeof(c
));
1088 c
.create_cq
.opcode
= nvme_admin_create_cq
;
1089 c
.create_cq
.prp1
= cpu_to_le64(nvmeq
->cq_dma_addr
);
1090 c
.create_cq
.cqid
= cpu_to_le16(qid
);
1091 c
.create_cq
.qsize
= cpu_to_le16(nvmeq
->q_depth
- 1);
1092 c
.create_cq
.cq_flags
= cpu_to_le16(flags
);
1093 c
.create_cq
.irq_vector
= cpu_to_le16(vector
);
1095 return nvme_submit_sync_cmd(dev
->ctrl
.admin_q
, &c
, NULL
, 0);
1098 static int adapter_alloc_sq(struct nvme_dev
*dev
, u16 qid
,
1099 struct nvme_queue
*nvmeq
)
1101 struct nvme_ctrl
*ctrl
= &dev
->ctrl
;
1102 struct nvme_command c
;
1103 int flags
= NVME_QUEUE_PHYS_CONTIG
;
1106 * Some drives have a bug that auto-enables WRRU if MEDIUM isn't
1107 * set. Since URGENT priority is zeroes, it makes all queues
1110 if (ctrl
->quirks
& NVME_QUIRK_MEDIUM_PRIO_SQ
)
1111 flags
|= NVME_SQ_PRIO_MEDIUM
;
1114 * Note: we (ab)use the fact that the prp fields survive if no data
1115 * is attached to the request.
1117 memset(&c
, 0, sizeof(c
));
1118 c
.create_sq
.opcode
= nvme_admin_create_sq
;
1119 c
.create_sq
.prp1
= cpu_to_le64(nvmeq
->sq_dma_addr
);
1120 c
.create_sq
.sqid
= cpu_to_le16(qid
);
1121 c
.create_sq
.qsize
= cpu_to_le16(nvmeq
->q_depth
- 1);
1122 c
.create_sq
.sq_flags
= cpu_to_le16(flags
);
1123 c
.create_sq
.cqid
= cpu_to_le16(qid
);
1125 return nvme_submit_sync_cmd(dev
->ctrl
.admin_q
, &c
, NULL
, 0);
1128 static int adapter_delete_cq(struct nvme_dev
*dev
, u16 cqid
)
1130 return adapter_delete_queue(dev
, nvme_admin_delete_cq
, cqid
);
1133 static int adapter_delete_sq(struct nvme_dev
*dev
, u16 sqid
)
1135 return adapter_delete_queue(dev
, nvme_admin_delete_sq
, sqid
);
1138 static void abort_endio(struct request
*req
, blk_status_t error
)
1140 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
1141 struct nvme_queue
*nvmeq
= iod
->nvmeq
;
1143 dev_warn(nvmeq
->dev
->ctrl
.device
,
1144 "Abort status: 0x%x", nvme_req(req
)->status
);
1145 atomic_inc(&nvmeq
->dev
->ctrl
.abort_limit
);
1146 blk_mq_free_request(req
);
1149 static bool nvme_should_reset(struct nvme_dev
*dev
, u32 csts
)
1152 /* If true, indicates loss of adapter communication, possibly by a
1153 * NVMe Subsystem reset.
1155 bool nssro
= dev
->subsystem
&& (csts
& NVME_CSTS_NSSRO
);
1157 /* If there is a reset/reinit ongoing, we shouldn't reset again. */
1158 switch (dev
->ctrl
.state
) {
1159 case NVME_CTRL_RESETTING
:
1160 case NVME_CTRL_CONNECTING
:
1166 /* We shouldn't reset unless the controller is on fatal error state
1167 * _or_ if we lost the communication with it.
1169 if (!(csts
& NVME_CSTS_CFS
) && !nssro
)
1175 static void nvme_warn_reset(struct nvme_dev
*dev
, u32 csts
)
1177 /* Read a config register to help see what died. */
1181 result
= pci_read_config_word(to_pci_dev(dev
->dev
), PCI_STATUS
,
1183 if (result
== PCIBIOS_SUCCESSFUL
)
1184 dev_warn(dev
->ctrl
.device
,
1185 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
1188 dev_warn(dev
->ctrl
.device
,
1189 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
1193 static enum blk_eh_timer_return
nvme_timeout(struct request
*req
, bool reserved
)
1195 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
1196 struct nvme_queue
*nvmeq
= iod
->nvmeq
;
1197 struct nvme_dev
*dev
= nvmeq
->dev
;
1198 struct request
*abort_req
;
1199 struct nvme_command cmd
;
1200 u32 csts
= readl(dev
->bar
+ NVME_REG_CSTS
);
1202 /* If PCI error recovery process is happening, we cannot reset or
1203 * the recovery mechanism will surely fail.
1206 if (pci_channel_offline(to_pci_dev(dev
->dev
)))
1207 return BLK_EH_RESET_TIMER
;
1210 * Reset immediately if the controller is failed
1212 if (nvme_should_reset(dev
, csts
)) {
1213 nvme_warn_reset(dev
, csts
);
1214 nvme_dev_disable(dev
, false);
1215 nvme_reset_ctrl(&dev
->ctrl
);
1220 * Did we miss an interrupt?
1222 if (test_bit(NVMEQ_POLLED
, &nvmeq
->flags
))
1223 nvme_poll(req
->mq_hctx
);
1225 nvme_poll_irqdisable(nvmeq
);
1227 if (blk_mq_request_completed(req
)) {
1228 dev_warn(dev
->ctrl
.device
,
1229 "I/O %d QID %d timeout, completion polled\n",
1230 req
->tag
, nvmeq
->qid
);
1235 * Shutdown immediately if controller times out while starting. The
1236 * reset work will see the pci device disabled when it gets the forced
1237 * cancellation error. All outstanding requests are completed on
1238 * shutdown, so we return BLK_EH_DONE.
1240 switch (dev
->ctrl
.state
) {
1241 case NVME_CTRL_CONNECTING
:
1242 nvme_change_ctrl_state(&dev
->ctrl
, NVME_CTRL_DELETING
);
1244 case NVME_CTRL_DELETING
:
1245 dev_warn_ratelimited(dev
->ctrl
.device
,
1246 "I/O %d QID %d timeout, disable controller\n",
1247 req
->tag
, nvmeq
->qid
);
1248 nvme_dev_disable(dev
, true);
1249 nvme_req(req
)->flags
|= NVME_REQ_CANCELLED
;
1251 case NVME_CTRL_RESETTING
:
1252 return BLK_EH_RESET_TIMER
;
1258 * Shutdown the controller immediately and schedule a reset if the
1259 * command was already aborted once before and still hasn't been
1260 * returned to the driver, or if this is the admin queue.
1262 if (!nvmeq
->qid
|| iod
->aborted
) {
1263 dev_warn(dev
->ctrl
.device
,
1264 "I/O %d QID %d timeout, reset controller\n",
1265 req
->tag
, nvmeq
->qid
);
1266 nvme_dev_disable(dev
, false);
1267 nvme_reset_ctrl(&dev
->ctrl
);
1269 nvme_req(req
)->flags
|= NVME_REQ_CANCELLED
;
1273 if (atomic_dec_return(&dev
->ctrl
.abort_limit
) < 0) {
1274 atomic_inc(&dev
->ctrl
.abort_limit
);
1275 return BLK_EH_RESET_TIMER
;
1279 memset(&cmd
, 0, sizeof(cmd
));
1280 cmd
.abort
.opcode
= nvme_admin_abort_cmd
;
1281 cmd
.abort
.cid
= req
->tag
;
1282 cmd
.abort
.sqid
= cpu_to_le16(nvmeq
->qid
);
1284 dev_warn(nvmeq
->dev
->ctrl
.device
,
1285 "I/O %d QID %d timeout, aborting\n",
1286 req
->tag
, nvmeq
->qid
);
1288 abort_req
= nvme_alloc_request(dev
->ctrl
.admin_q
, &cmd
,
1289 BLK_MQ_REQ_NOWAIT
, NVME_QID_ANY
);
1290 if (IS_ERR(abort_req
)) {
1291 atomic_inc(&dev
->ctrl
.abort_limit
);
1292 return BLK_EH_RESET_TIMER
;
1295 abort_req
->timeout
= ADMIN_TIMEOUT
;
1296 abort_req
->end_io_data
= NULL
;
1297 blk_execute_rq_nowait(abort_req
->q
, NULL
, abort_req
, 0, abort_endio
);
1300 * The aborted req will be completed on receiving the abort req.
1301 * We enable the timer again. If hit twice, it'll cause a device reset,
1302 * as the device then is in a faulty state.
1304 return BLK_EH_RESET_TIMER
;
1307 static void nvme_free_queue(struct nvme_queue
*nvmeq
)
1309 dma_free_coherent(nvmeq
->dev
->dev
, CQ_SIZE(nvmeq
),
1310 (void *)nvmeq
->cqes
, nvmeq
->cq_dma_addr
);
1311 if (!nvmeq
->sq_cmds
)
1314 if (test_and_clear_bit(NVMEQ_SQ_CMB
, &nvmeq
->flags
)) {
1315 pci_free_p2pmem(to_pci_dev(nvmeq
->dev
->dev
),
1316 nvmeq
->sq_cmds
, SQ_SIZE(nvmeq
));
1318 dma_free_coherent(nvmeq
->dev
->dev
, SQ_SIZE(nvmeq
),
1319 nvmeq
->sq_cmds
, nvmeq
->sq_dma_addr
);
1323 static void nvme_free_queues(struct nvme_dev
*dev
, int lowest
)
1327 for (i
= dev
->ctrl
.queue_count
- 1; i
>= lowest
; i
--) {
1328 dev
->ctrl
.queue_count
--;
1329 nvme_free_queue(&dev
->queues
[i
]);
1334 * nvme_suspend_queue - put queue into suspended state
1335 * @nvmeq: queue to suspend
1337 static int nvme_suspend_queue(struct nvme_queue
*nvmeq
)
1339 if (!test_and_clear_bit(NVMEQ_ENABLED
, &nvmeq
->flags
))
1342 /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */
1345 nvmeq
->dev
->online_queues
--;
1346 if (!nvmeq
->qid
&& nvmeq
->dev
->ctrl
.admin_q
)
1347 blk_mq_quiesce_queue(nvmeq
->dev
->ctrl
.admin_q
);
1348 if (!test_and_clear_bit(NVMEQ_POLLED
, &nvmeq
->flags
))
1349 pci_free_irq(to_pci_dev(nvmeq
->dev
->dev
), nvmeq
->cq_vector
, nvmeq
);
1353 static void nvme_suspend_io_queues(struct nvme_dev
*dev
)
1357 for (i
= dev
->ctrl
.queue_count
- 1; i
> 0; i
--)
1358 nvme_suspend_queue(&dev
->queues
[i
]);
1361 static void nvme_disable_admin_queue(struct nvme_dev
*dev
, bool shutdown
)
1363 struct nvme_queue
*nvmeq
= &dev
->queues
[0];
1366 nvme_shutdown_ctrl(&dev
->ctrl
);
1368 nvme_disable_ctrl(&dev
->ctrl
);
1370 nvme_poll_irqdisable(nvmeq
);
1374 * Called only on a device that has been disabled and after all other threads
1375 * that can check this device's completion queues have synced. This is the
1376 * last chance for the driver to see a natural completion before
1377 * nvme_cancel_request() terminates all incomplete requests.
1379 static void nvme_reap_pending_cqes(struct nvme_dev
*dev
)
1383 for (i
= dev
->ctrl
.queue_count
- 1; i
> 0; i
--)
1384 nvme_process_cq(&dev
->queues
[i
]);
1387 static int nvme_cmb_qdepth(struct nvme_dev
*dev
, int nr_io_queues
,
1390 int q_depth
= dev
->q_depth
;
1391 unsigned q_size_aligned
= roundup(q_depth
* entry_size
,
1392 dev
->ctrl
.page_size
);
1394 if (q_size_aligned
* nr_io_queues
> dev
->cmb_size
) {
1395 u64 mem_per_q
= div_u64(dev
->cmb_size
, nr_io_queues
);
1396 mem_per_q
= round_down(mem_per_q
, dev
->ctrl
.page_size
);
1397 q_depth
= div_u64(mem_per_q
, entry_size
);
1400 * Ensure the reduced q_depth is above some threshold where it
1401 * would be better to map queues in system memory with the
1411 static int nvme_alloc_sq_cmds(struct nvme_dev
*dev
, struct nvme_queue
*nvmeq
,
1414 struct pci_dev
*pdev
= to_pci_dev(dev
->dev
);
1416 if (qid
&& dev
->cmb_use_sqes
&& (dev
->cmbsz
& NVME_CMBSZ_SQS
)) {
1417 nvmeq
->sq_cmds
= pci_alloc_p2pmem(pdev
, SQ_SIZE(nvmeq
));
1418 if (nvmeq
->sq_cmds
) {
1419 nvmeq
->sq_dma_addr
= pci_p2pmem_virt_to_bus(pdev
,
1421 if (nvmeq
->sq_dma_addr
) {
1422 set_bit(NVMEQ_SQ_CMB
, &nvmeq
->flags
);
1426 pci_free_p2pmem(pdev
, nvmeq
->sq_cmds
, SQ_SIZE(nvmeq
));
1430 nvmeq
->sq_cmds
= dma_alloc_coherent(dev
->dev
, SQ_SIZE(nvmeq
),
1431 &nvmeq
->sq_dma_addr
, GFP_KERNEL
);
1432 if (!nvmeq
->sq_cmds
)
1437 static int nvme_alloc_queue(struct nvme_dev
*dev
, int qid
, int depth
)
1439 struct nvme_queue
*nvmeq
= &dev
->queues
[qid
];
1441 if (dev
->ctrl
.queue_count
> qid
)
1444 nvmeq
->sqes
= qid
? dev
->io_sqes
: NVME_ADM_SQES
;
1445 nvmeq
->q_depth
= depth
;
1446 nvmeq
->cqes
= dma_alloc_coherent(dev
->dev
, CQ_SIZE(nvmeq
),
1447 &nvmeq
->cq_dma_addr
, GFP_KERNEL
);
1451 if (nvme_alloc_sq_cmds(dev
, nvmeq
, qid
))
1455 spin_lock_init(&nvmeq
->sq_lock
);
1456 spin_lock_init(&nvmeq
->cq_poll_lock
);
1458 nvmeq
->cq_phase
= 1;
1459 nvmeq
->q_db
= &dev
->dbs
[qid
* 2 * dev
->db_stride
];
1461 dev
->ctrl
.queue_count
++;
1466 dma_free_coherent(dev
->dev
, CQ_SIZE(nvmeq
), (void *)nvmeq
->cqes
,
1467 nvmeq
->cq_dma_addr
);
1472 static int queue_request_irq(struct nvme_queue
*nvmeq
)
1474 struct pci_dev
*pdev
= to_pci_dev(nvmeq
->dev
->dev
);
1475 int nr
= nvmeq
->dev
->ctrl
.instance
;
1477 if (use_threaded_interrupts
) {
1478 return pci_request_irq(pdev
, nvmeq
->cq_vector
, nvme_irq_check
,
1479 nvme_irq
, nvmeq
, "nvme%dq%d", nr
, nvmeq
->qid
);
1481 return pci_request_irq(pdev
, nvmeq
->cq_vector
, nvme_irq
,
1482 NULL
, nvmeq
, "nvme%dq%d", nr
, nvmeq
->qid
);
1486 static void nvme_init_queue(struct nvme_queue
*nvmeq
, u16 qid
)
1488 struct nvme_dev
*dev
= nvmeq
->dev
;
1492 nvmeq
->cq_phase
= 1;
1493 nvmeq
->q_db
= &dev
->dbs
[qid
* 2 * dev
->db_stride
];
1494 memset((void *)nvmeq
->cqes
, 0, CQ_SIZE(nvmeq
));
1495 nvme_dbbuf_init(dev
, nvmeq
, qid
);
1496 dev
->online_queues
++;
1497 wmb(); /* ensure the first interrupt sees the initialization */
1500 static int nvme_create_queue(struct nvme_queue
*nvmeq
, int qid
, bool polled
)
1502 struct nvme_dev
*dev
= nvmeq
->dev
;
1506 clear_bit(NVMEQ_DELETE_ERROR
, &nvmeq
->flags
);
1509 * A queue's vector matches the queue identifier unless the controller
1510 * has only one vector available.
1513 vector
= dev
->num_vecs
== 1 ? 0 : qid
;
1515 set_bit(NVMEQ_POLLED
, &nvmeq
->flags
);
1517 result
= adapter_alloc_cq(dev
, qid
, nvmeq
, vector
);
1521 result
= adapter_alloc_sq(dev
, qid
, nvmeq
);
1527 nvmeq
->cq_vector
= vector
;
1528 nvme_init_queue(nvmeq
, qid
);
1531 result
= queue_request_irq(nvmeq
);
1536 set_bit(NVMEQ_ENABLED
, &nvmeq
->flags
);
1540 dev
->online_queues
--;
1541 adapter_delete_sq(dev
, qid
);
1543 adapter_delete_cq(dev
, qid
);
1547 static const struct blk_mq_ops nvme_mq_admin_ops
= {
1548 .queue_rq
= nvme_queue_rq
,
1549 .complete
= nvme_pci_complete_rq
,
1550 .init_hctx
= nvme_admin_init_hctx
,
1551 .init_request
= nvme_init_request
,
1552 .timeout
= nvme_timeout
,
1555 static const struct blk_mq_ops nvme_mq_ops
= {
1556 .queue_rq
= nvme_queue_rq
,
1557 .complete
= nvme_pci_complete_rq
,
1558 .commit_rqs
= nvme_commit_rqs
,
1559 .init_hctx
= nvme_init_hctx
,
1560 .init_request
= nvme_init_request
,
1561 .map_queues
= nvme_pci_map_queues
,
1562 .timeout
= nvme_timeout
,
1566 static void nvme_dev_remove_admin(struct nvme_dev
*dev
)
1568 if (dev
->ctrl
.admin_q
&& !blk_queue_dying(dev
->ctrl
.admin_q
)) {
1570 * If the controller was reset during removal, it's possible
1571 * user requests may be waiting on a stopped queue. Start the
1572 * queue to flush these to completion.
1574 blk_mq_unquiesce_queue(dev
->ctrl
.admin_q
);
1575 blk_cleanup_queue(dev
->ctrl
.admin_q
);
1576 blk_mq_free_tag_set(&dev
->admin_tagset
);
1580 static int nvme_alloc_admin_tags(struct nvme_dev
*dev
)
1582 if (!dev
->ctrl
.admin_q
) {
1583 dev
->admin_tagset
.ops
= &nvme_mq_admin_ops
;
1584 dev
->admin_tagset
.nr_hw_queues
= 1;
1586 dev
->admin_tagset
.queue_depth
= NVME_AQ_MQ_TAG_DEPTH
;
1587 dev
->admin_tagset
.timeout
= ADMIN_TIMEOUT
;
1588 dev
->admin_tagset
.numa_node
= dev_to_node(dev
->dev
);
1589 dev
->admin_tagset
.cmd_size
= sizeof(struct nvme_iod
);
1590 dev
->admin_tagset
.flags
= BLK_MQ_F_NO_SCHED
;
1591 dev
->admin_tagset
.driver_data
= dev
;
1593 if (blk_mq_alloc_tag_set(&dev
->admin_tagset
))
1595 dev
->ctrl
.admin_tagset
= &dev
->admin_tagset
;
1597 dev
->ctrl
.admin_q
= blk_mq_init_queue(&dev
->admin_tagset
);
1598 if (IS_ERR(dev
->ctrl
.admin_q
)) {
1599 blk_mq_free_tag_set(&dev
->admin_tagset
);
1602 if (!blk_get_queue(dev
->ctrl
.admin_q
)) {
1603 nvme_dev_remove_admin(dev
);
1604 dev
->ctrl
.admin_q
= NULL
;
1608 blk_mq_unquiesce_queue(dev
->ctrl
.admin_q
);
1613 static unsigned long db_bar_size(struct nvme_dev
*dev
, unsigned nr_io_queues
)
1615 return NVME_REG_DBS
+ ((nr_io_queues
+ 1) * 8 * dev
->db_stride
);
1618 static int nvme_remap_bar(struct nvme_dev
*dev
, unsigned long size
)
1620 struct pci_dev
*pdev
= to_pci_dev(dev
->dev
);
1622 if (size
<= dev
->bar_mapped_size
)
1624 if (size
> pci_resource_len(pdev
, 0))
1628 dev
->bar
= ioremap(pci_resource_start(pdev
, 0), size
);
1630 dev
->bar_mapped_size
= 0;
1633 dev
->bar_mapped_size
= size
;
1634 dev
->dbs
= dev
->bar
+ NVME_REG_DBS
;
1639 static int nvme_pci_configure_admin_queue(struct nvme_dev
*dev
)
1643 struct nvme_queue
*nvmeq
;
1645 result
= nvme_remap_bar(dev
, db_bar_size(dev
, 0));
1649 dev
->subsystem
= readl(dev
->bar
+ NVME_REG_VS
) >= NVME_VS(1, 1, 0) ?
1650 NVME_CAP_NSSRC(dev
->ctrl
.cap
) : 0;
1652 if (dev
->subsystem
&&
1653 (readl(dev
->bar
+ NVME_REG_CSTS
) & NVME_CSTS_NSSRO
))
1654 writel(NVME_CSTS_NSSRO
, dev
->bar
+ NVME_REG_CSTS
);
1656 result
= nvme_disable_ctrl(&dev
->ctrl
);
1660 result
= nvme_alloc_queue(dev
, 0, NVME_AQ_DEPTH
);
1664 nvmeq
= &dev
->queues
[0];
1665 aqa
= nvmeq
->q_depth
- 1;
1668 writel(aqa
, dev
->bar
+ NVME_REG_AQA
);
1669 lo_hi_writeq(nvmeq
->sq_dma_addr
, dev
->bar
+ NVME_REG_ASQ
);
1670 lo_hi_writeq(nvmeq
->cq_dma_addr
, dev
->bar
+ NVME_REG_ACQ
);
1672 result
= nvme_enable_ctrl(&dev
->ctrl
);
1676 nvmeq
->cq_vector
= 0;
1677 nvme_init_queue(nvmeq
, 0);
1678 result
= queue_request_irq(nvmeq
);
1680 dev
->online_queues
--;
1684 set_bit(NVMEQ_ENABLED
, &nvmeq
->flags
);
1688 static int nvme_create_io_queues(struct nvme_dev
*dev
)
1690 unsigned i
, max
, rw_queues
;
1693 for (i
= dev
->ctrl
.queue_count
; i
<= dev
->max_qid
; i
++) {
1694 if (nvme_alloc_queue(dev
, i
, dev
->q_depth
)) {
1700 max
= min(dev
->max_qid
, dev
->ctrl
.queue_count
- 1);
1701 if (max
!= 1 && dev
->io_queues
[HCTX_TYPE_POLL
]) {
1702 rw_queues
= dev
->io_queues
[HCTX_TYPE_DEFAULT
] +
1703 dev
->io_queues
[HCTX_TYPE_READ
];
1708 for (i
= dev
->online_queues
; i
<= max
; i
++) {
1709 bool polled
= i
> rw_queues
;
1711 ret
= nvme_create_queue(&dev
->queues
[i
], i
, polled
);
1717 * Ignore failing Create SQ/CQ commands, we can continue with less
1718 * than the desired amount of queues, and even a controller without
1719 * I/O queues can still be used to issue admin commands. This might
1720 * be useful to upgrade a buggy firmware for example.
1722 return ret
>= 0 ? 0 : ret
;
1725 static ssize_t
nvme_cmb_show(struct device
*dev
,
1726 struct device_attribute
*attr
,
1729 struct nvme_dev
*ndev
= to_nvme_dev(dev_get_drvdata(dev
));
1731 return scnprintf(buf
, PAGE_SIZE
, "cmbloc : x%08x\ncmbsz : x%08x\n",
1732 ndev
->cmbloc
, ndev
->cmbsz
);
1734 static DEVICE_ATTR(cmb
, S_IRUGO
, nvme_cmb_show
, NULL
);
1736 static u64
nvme_cmb_size_unit(struct nvme_dev
*dev
)
1738 u8 szu
= (dev
->cmbsz
>> NVME_CMBSZ_SZU_SHIFT
) & NVME_CMBSZ_SZU_MASK
;
1740 return 1ULL << (12 + 4 * szu
);
1743 static u32
nvme_cmb_size(struct nvme_dev
*dev
)
1745 return (dev
->cmbsz
>> NVME_CMBSZ_SZ_SHIFT
) & NVME_CMBSZ_SZ_MASK
;
1748 static void nvme_map_cmb(struct nvme_dev
*dev
)
1751 resource_size_t bar_size
;
1752 struct pci_dev
*pdev
= to_pci_dev(dev
->dev
);
1758 dev
->cmbsz
= readl(dev
->bar
+ NVME_REG_CMBSZ
);
1761 dev
->cmbloc
= readl(dev
->bar
+ NVME_REG_CMBLOC
);
1763 size
= nvme_cmb_size_unit(dev
) * nvme_cmb_size(dev
);
1764 offset
= nvme_cmb_size_unit(dev
) * NVME_CMB_OFST(dev
->cmbloc
);
1765 bar
= NVME_CMB_BIR(dev
->cmbloc
);
1766 bar_size
= pci_resource_len(pdev
, bar
);
1768 if (offset
> bar_size
)
1772 * Controllers may support a CMB size larger than their BAR,
1773 * for example, due to being behind a bridge. Reduce the CMB to
1774 * the reported size of the BAR
1776 if (size
> bar_size
- offset
)
1777 size
= bar_size
- offset
;
1779 if (pci_p2pdma_add_resource(pdev
, bar
, size
, offset
)) {
1780 dev_warn(dev
->ctrl
.device
,
1781 "failed to register the CMB\n");
1785 dev
->cmb_size
= size
;
1786 dev
->cmb_use_sqes
= use_cmb_sqes
&& (dev
->cmbsz
& NVME_CMBSZ_SQS
);
1788 if ((dev
->cmbsz
& (NVME_CMBSZ_WDS
| NVME_CMBSZ_RDS
)) ==
1789 (NVME_CMBSZ_WDS
| NVME_CMBSZ_RDS
))
1790 pci_p2pmem_publish(pdev
, true);
1792 if (sysfs_add_file_to_group(&dev
->ctrl
.device
->kobj
,
1793 &dev_attr_cmb
.attr
, NULL
))
1794 dev_warn(dev
->ctrl
.device
,
1795 "failed to add sysfs attribute for CMB\n");
1798 static inline void nvme_release_cmb(struct nvme_dev
*dev
)
1800 if (dev
->cmb_size
) {
1801 sysfs_remove_file_from_group(&dev
->ctrl
.device
->kobj
,
1802 &dev_attr_cmb
.attr
, NULL
);
1807 static int nvme_set_host_mem(struct nvme_dev
*dev
, u32 bits
)
1809 u64 dma_addr
= dev
->host_mem_descs_dma
;
1810 struct nvme_command c
;
1813 memset(&c
, 0, sizeof(c
));
1814 c
.features
.opcode
= nvme_admin_set_features
;
1815 c
.features
.fid
= cpu_to_le32(NVME_FEAT_HOST_MEM_BUF
);
1816 c
.features
.dword11
= cpu_to_le32(bits
);
1817 c
.features
.dword12
= cpu_to_le32(dev
->host_mem_size
>>
1818 ilog2(dev
->ctrl
.page_size
));
1819 c
.features
.dword13
= cpu_to_le32(lower_32_bits(dma_addr
));
1820 c
.features
.dword14
= cpu_to_le32(upper_32_bits(dma_addr
));
1821 c
.features
.dword15
= cpu_to_le32(dev
->nr_host_mem_descs
);
1823 ret
= nvme_submit_sync_cmd(dev
->ctrl
.admin_q
, &c
, NULL
, 0);
1825 dev_warn(dev
->ctrl
.device
,
1826 "failed to set host mem (err %d, flags %#x).\n",
1832 static void nvme_free_host_mem(struct nvme_dev
*dev
)
1836 for (i
= 0; i
< dev
->nr_host_mem_descs
; i
++) {
1837 struct nvme_host_mem_buf_desc
*desc
= &dev
->host_mem_descs
[i
];
1838 size_t size
= le32_to_cpu(desc
->size
) * dev
->ctrl
.page_size
;
1840 dma_free_attrs(dev
->dev
, size
, dev
->host_mem_desc_bufs
[i
],
1841 le64_to_cpu(desc
->addr
),
1842 DMA_ATTR_NO_KERNEL_MAPPING
| DMA_ATTR_NO_WARN
);
1845 kfree(dev
->host_mem_desc_bufs
);
1846 dev
->host_mem_desc_bufs
= NULL
;
1847 dma_free_coherent(dev
->dev
,
1848 dev
->nr_host_mem_descs
* sizeof(*dev
->host_mem_descs
),
1849 dev
->host_mem_descs
, dev
->host_mem_descs_dma
);
1850 dev
->host_mem_descs
= NULL
;
1851 dev
->nr_host_mem_descs
= 0;
1854 static int __nvme_alloc_host_mem(struct nvme_dev
*dev
, u64 preferred
,
1857 struct nvme_host_mem_buf_desc
*descs
;
1858 u32 max_entries
, len
;
1859 dma_addr_t descs_dma
;
1864 tmp
= (preferred
+ chunk_size
- 1);
1865 do_div(tmp
, chunk_size
);
1868 if (dev
->ctrl
.hmmaxd
&& dev
->ctrl
.hmmaxd
< max_entries
)
1869 max_entries
= dev
->ctrl
.hmmaxd
;
1871 descs
= dma_alloc_coherent(dev
->dev
, max_entries
* sizeof(*descs
),
1872 &descs_dma
, GFP_KERNEL
);
1876 bufs
= kcalloc(max_entries
, sizeof(*bufs
), GFP_KERNEL
);
1878 goto out_free_descs
;
1880 for (size
= 0; size
< preferred
&& i
< max_entries
; size
+= len
) {
1881 dma_addr_t dma_addr
;
1883 len
= min_t(u64
, chunk_size
, preferred
- size
);
1884 bufs
[i
] = dma_alloc_attrs(dev
->dev
, len
, &dma_addr
, GFP_KERNEL
,
1885 DMA_ATTR_NO_KERNEL_MAPPING
| DMA_ATTR_NO_WARN
);
1889 descs
[i
].addr
= cpu_to_le64(dma_addr
);
1890 descs
[i
].size
= cpu_to_le32(len
/ dev
->ctrl
.page_size
);
1897 dev
->nr_host_mem_descs
= i
;
1898 dev
->host_mem_size
= size
;
1899 dev
->host_mem_descs
= descs
;
1900 dev
->host_mem_descs_dma
= descs_dma
;
1901 dev
->host_mem_desc_bufs
= bufs
;
1906 size_t size
= le32_to_cpu(descs
[i
].size
) * dev
->ctrl
.page_size
;
1908 dma_free_attrs(dev
->dev
, size
, bufs
[i
],
1909 le64_to_cpu(descs
[i
].addr
),
1910 DMA_ATTR_NO_KERNEL_MAPPING
| DMA_ATTR_NO_WARN
);
1915 dma_free_coherent(dev
->dev
, max_entries
* sizeof(*descs
), descs
,
1918 dev
->host_mem_descs
= NULL
;
1922 static int nvme_alloc_host_mem(struct nvme_dev
*dev
, u64 min
, u64 preferred
)
1926 /* start big and work our way down */
1927 for (chunk_size
= min_t(u64
, preferred
, PAGE_SIZE
* MAX_ORDER_NR_PAGES
);
1928 chunk_size
>= max_t(u32
, dev
->ctrl
.hmminds
* 4096, PAGE_SIZE
* 2);
1930 if (!__nvme_alloc_host_mem(dev
, preferred
, chunk_size
)) {
1931 if (!min
|| dev
->host_mem_size
>= min
)
1933 nvme_free_host_mem(dev
);
1940 static int nvme_setup_host_mem(struct nvme_dev
*dev
)
1942 u64 max
= (u64
)max_host_mem_size_mb
* SZ_1M
;
1943 u64 preferred
= (u64
)dev
->ctrl
.hmpre
* 4096;
1944 u64 min
= (u64
)dev
->ctrl
.hmmin
* 4096;
1945 u32 enable_bits
= NVME_HOST_MEM_ENABLE
;
1948 preferred
= min(preferred
, max
);
1950 dev_warn(dev
->ctrl
.device
,
1951 "min host memory (%lld MiB) above limit (%d MiB).\n",
1952 min
>> ilog2(SZ_1M
), max_host_mem_size_mb
);
1953 nvme_free_host_mem(dev
);
1958 * If we already have a buffer allocated check if we can reuse it.
1960 if (dev
->host_mem_descs
) {
1961 if (dev
->host_mem_size
>= min
)
1962 enable_bits
|= NVME_HOST_MEM_RETURN
;
1964 nvme_free_host_mem(dev
);
1967 if (!dev
->host_mem_descs
) {
1968 if (nvme_alloc_host_mem(dev
, min
, preferred
)) {
1969 dev_warn(dev
->ctrl
.device
,
1970 "failed to allocate host memory buffer.\n");
1971 return 0; /* controller must work without HMB */
1974 dev_info(dev
->ctrl
.device
,
1975 "allocated %lld MiB host memory buffer.\n",
1976 dev
->host_mem_size
>> ilog2(SZ_1M
));
1979 ret
= nvme_set_host_mem(dev
, enable_bits
);
1981 nvme_free_host_mem(dev
);
1986 * nirqs is the number of interrupts available for write and read
1987 * queues. The core already reserved an interrupt for the admin queue.
1989 static void nvme_calc_irq_sets(struct irq_affinity
*affd
, unsigned int nrirqs
)
1991 struct nvme_dev
*dev
= affd
->priv
;
1992 unsigned int nr_read_queues
, nr_write_queues
= dev
->nr_write_queues
;
1995 * If there is no interupt available for queues, ensure that
1996 * the default queue is set to 1. The affinity set size is
1997 * also set to one, but the irq core ignores it for this case.
1999 * If only one interrupt is available or 'write_queue' == 0, combine
2000 * write and read queues.
2002 * If 'write_queues' > 0, ensure it leaves room for at least one read
2008 } else if (nrirqs
== 1 || !nr_write_queues
) {
2010 } else if (nr_write_queues
>= nrirqs
) {
2013 nr_read_queues
= nrirqs
- nr_write_queues
;
2016 dev
->io_queues
[HCTX_TYPE_DEFAULT
] = nrirqs
- nr_read_queues
;
2017 affd
->set_size
[HCTX_TYPE_DEFAULT
] = nrirqs
- nr_read_queues
;
2018 dev
->io_queues
[HCTX_TYPE_READ
] = nr_read_queues
;
2019 affd
->set_size
[HCTX_TYPE_READ
] = nr_read_queues
;
2020 affd
->nr_sets
= nr_read_queues
? 2 : 1;
2023 static int nvme_setup_irqs(struct nvme_dev
*dev
, unsigned int nr_io_queues
)
2025 struct pci_dev
*pdev
= to_pci_dev(dev
->dev
);
2026 struct irq_affinity affd
= {
2028 .calc_sets
= nvme_calc_irq_sets
,
2031 unsigned int irq_queues
, this_p_queues
;
2034 * Poll queues don't need interrupts, but we need at least one IO
2035 * queue left over for non-polled IO.
2037 this_p_queues
= dev
->nr_poll_queues
;
2038 if (this_p_queues
>= nr_io_queues
) {
2039 this_p_queues
= nr_io_queues
- 1;
2042 irq_queues
= nr_io_queues
- this_p_queues
+ 1;
2044 dev
->io_queues
[HCTX_TYPE_POLL
] = this_p_queues
;
2046 /* Initialize for the single interrupt case */
2047 dev
->io_queues
[HCTX_TYPE_DEFAULT
] = 1;
2048 dev
->io_queues
[HCTX_TYPE_READ
] = 0;
2051 * Some Apple controllers require all queues to use the
2054 if (dev
->ctrl
.quirks
& NVME_QUIRK_SINGLE_VECTOR
)
2057 return pci_alloc_irq_vectors_affinity(pdev
, 1, irq_queues
,
2058 PCI_IRQ_ALL_TYPES
| PCI_IRQ_AFFINITY
, &affd
);
2061 static void nvme_disable_io_queues(struct nvme_dev
*dev
)
2063 if (__nvme_disable_io_queues(dev
, nvme_admin_delete_sq
))
2064 __nvme_disable_io_queues(dev
, nvme_admin_delete_cq
);
2067 static unsigned int nvme_max_io_queues(struct nvme_dev
*dev
)
2069 return num_possible_cpus() + dev
->nr_write_queues
+ dev
->nr_poll_queues
;
2072 static int nvme_setup_io_queues(struct nvme_dev
*dev
)
2074 struct nvme_queue
*adminq
= &dev
->queues
[0];
2075 struct pci_dev
*pdev
= to_pci_dev(dev
->dev
);
2076 unsigned int nr_io_queues
;
2081 * Sample the module parameters once at reset time so that we have
2082 * stable values to work with.
2084 dev
->nr_write_queues
= write_queues
;
2085 dev
->nr_poll_queues
= poll_queues
;
2088 * If tags are shared with admin queue (Apple bug), then
2089 * make sure we only use one IO queue.
2091 if (dev
->ctrl
.quirks
& NVME_QUIRK_SHARED_TAGS
)
2094 nr_io_queues
= min(nvme_max_io_queues(dev
),
2095 dev
->nr_allocated_queues
- 1);
2097 result
= nvme_set_queue_count(&dev
->ctrl
, &nr_io_queues
);
2101 if (nr_io_queues
== 0)
2104 clear_bit(NVMEQ_ENABLED
, &adminq
->flags
);
2106 if (dev
->cmb_use_sqes
) {
2107 result
= nvme_cmb_qdepth(dev
, nr_io_queues
,
2108 sizeof(struct nvme_command
));
2110 dev
->q_depth
= result
;
2112 dev
->cmb_use_sqes
= false;
2116 size
= db_bar_size(dev
, nr_io_queues
);
2117 result
= nvme_remap_bar(dev
, size
);
2120 if (!--nr_io_queues
)
2123 adminq
->q_db
= dev
->dbs
;
2126 /* Deregister the admin queue's interrupt */
2127 pci_free_irq(pdev
, 0, adminq
);
2130 * If we enable msix early due to not intx, disable it again before
2131 * setting up the full range we need.
2133 pci_free_irq_vectors(pdev
);
2135 result
= nvme_setup_irqs(dev
, nr_io_queues
);
2139 dev
->num_vecs
= result
;
2140 result
= max(result
- 1, 1);
2141 dev
->max_qid
= result
+ dev
->io_queues
[HCTX_TYPE_POLL
];
2144 * Should investigate if there's a performance win from allocating
2145 * more queues than interrupt vectors; it might allow the submission
2146 * path to scale better, even if the receive path is limited by the
2147 * number of interrupts.
2149 result
= queue_request_irq(adminq
);
2152 set_bit(NVMEQ_ENABLED
, &adminq
->flags
);
2154 result
= nvme_create_io_queues(dev
);
2155 if (result
|| dev
->online_queues
< 2)
2158 if (dev
->online_queues
- 1 < dev
->max_qid
) {
2159 nr_io_queues
= dev
->online_queues
- 1;
2160 nvme_disable_io_queues(dev
);
2161 nvme_suspend_io_queues(dev
);
2164 dev_info(dev
->ctrl
.device
, "%d/%d/%d default/read/poll queues\n",
2165 dev
->io_queues
[HCTX_TYPE_DEFAULT
],
2166 dev
->io_queues
[HCTX_TYPE_READ
],
2167 dev
->io_queues
[HCTX_TYPE_POLL
]);
2171 static void nvme_del_queue_end(struct request
*req
, blk_status_t error
)
2173 struct nvme_queue
*nvmeq
= req
->end_io_data
;
2175 blk_mq_free_request(req
);
2176 complete(&nvmeq
->delete_done
);
2179 static void nvme_del_cq_end(struct request
*req
, blk_status_t error
)
2181 struct nvme_queue
*nvmeq
= req
->end_io_data
;
2184 set_bit(NVMEQ_DELETE_ERROR
, &nvmeq
->flags
);
2186 nvme_del_queue_end(req
, error
);
2189 static int nvme_delete_queue(struct nvme_queue
*nvmeq
, u8 opcode
)
2191 struct request_queue
*q
= nvmeq
->dev
->ctrl
.admin_q
;
2192 struct request
*req
;
2193 struct nvme_command cmd
;
2195 memset(&cmd
, 0, sizeof(cmd
));
2196 cmd
.delete_queue
.opcode
= opcode
;
2197 cmd
.delete_queue
.qid
= cpu_to_le16(nvmeq
->qid
);
2199 req
= nvme_alloc_request(q
, &cmd
, BLK_MQ_REQ_NOWAIT
, NVME_QID_ANY
);
2201 return PTR_ERR(req
);
2203 req
->timeout
= ADMIN_TIMEOUT
;
2204 req
->end_io_data
= nvmeq
;
2206 init_completion(&nvmeq
->delete_done
);
2207 blk_execute_rq_nowait(q
, NULL
, req
, false,
2208 opcode
== nvme_admin_delete_cq
?
2209 nvme_del_cq_end
: nvme_del_queue_end
);
2213 static bool __nvme_disable_io_queues(struct nvme_dev
*dev
, u8 opcode
)
2215 int nr_queues
= dev
->online_queues
- 1, sent
= 0;
2216 unsigned long timeout
;
2219 timeout
= ADMIN_TIMEOUT
;
2220 while (nr_queues
> 0) {
2221 if (nvme_delete_queue(&dev
->queues
[nr_queues
], opcode
))
2227 struct nvme_queue
*nvmeq
= &dev
->queues
[nr_queues
+ sent
];
2229 timeout
= wait_for_completion_io_timeout(&nvmeq
->delete_done
,
2241 static void nvme_dev_add(struct nvme_dev
*dev
)
2245 if (!dev
->ctrl
.tagset
) {
2246 dev
->tagset
.ops
= &nvme_mq_ops
;
2247 dev
->tagset
.nr_hw_queues
= dev
->online_queues
- 1;
2248 dev
->tagset
.nr_maps
= 2; /* default + read */
2249 if (dev
->io_queues
[HCTX_TYPE_POLL
])
2250 dev
->tagset
.nr_maps
++;
2251 dev
->tagset
.timeout
= NVME_IO_TIMEOUT
;
2252 dev
->tagset
.numa_node
= dev_to_node(dev
->dev
);
2253 dev
->tagset
.queue_depth
=
2254 min_t(int, dev
->q_depth
, BLK_MQ_MAX_DEPTH
) - 1;
2255 dev
->tagset
.cmd_size
= sizeof(struct nvme_iod
);
2256 dev
->tagset
.flags
= BLK_MQ_F_SHOULD_MERGE
;
2257 dev
->tagset
.driver_data
= dev
;
2260 * Some Apple controllers requires tags to be unique
2261 * across admin and IO queue, so reserve the first 32
2262 * tags of the IO queue.
2264 if (dev
->ctrl
.quirks
& NVME_QUIRK_SHARED_TAGS
)
2265 dev
->tagset
.reserved_tags
= NVME_AQ_DEPTH
;
2267 ret
= blk_mq_alloc_tag_set(&dev
->tagset
);
2269 dev_warn(dev
->ctrl
.device
,
2270 "IO queues tagset allocation failed %d\n", ret
);
2273 dev
->ctrl
.tagset
= &dev
->tagset
;
2275 blk_mq_update_nr_hw_queues(&dev
->tagset
, dev
->online_queues
- 1);
2277 /* Free previously allocated queues that are no longer usable */
2278 nvme_free_queues(dev
, dev
->online_queues
);
2281 nvme_dbbuf_set(dev
);
2284 static int nvme_pci_enable(struct nvme_dev
*dev
)
2286 int result
= -ENOMEM
;
2287 struct pci_dev
*pdev
= to_pci_dev(dev
->dev
);
2289 if (pci_enable_device_mem(pdev
))
2292 pci_set_master(pdev
);
2294 if (dma_set_mask_and_coherent(dev
->dev
, DMA_BIT_MASK(64)))
2297 if (readl(dev
->bar
+ NVME_REG_CSTS
) == -1) {
2303 * Some devices and/or platforms don't advertise or work with INTx
2304 * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
2305 * adjust this later.
2307 result
= pci_alloc_irq_vectors(pdev
, 1, 1, PCI_IRQ_ALL_TYPES
);
2311 dev
->ctrl
.cap
= lo_hi_readq(dev
->bar
+ NVME_REG_CAP
);
2313 dev
->q_depth
= min_t(int, NVME_CAP_MQES(dev
->ctrl
.cap
) + 1,
2315 dev
->ctrl
.sqsize
= dev
->q_depth
- 1; /* 0's based queue depth */
2316 dev
->db_stride
= 1 << NVME_CAP_STRIDE(dev
->ctrl
.cap
);
2317 dev
->dbs
= dev
->bar
+ 4096;
2320 * Some Apple controllers require a non-standard SQE size.
2321 * Interestingly they also seem to ignore the CC:IOSQES register
2322 * so we don't bother updating it here.
2324 if (dev
->ctrl
.quirks
& NVME_QUIRK_128_BYTES_SQES
)
2327 dev
->io_sqes
= NVME_NVM_IOSQES
;
2330 * Temporary fix for the Apple controller found in the MacBook8,1 and
2331 * some MacBook7,1 to avoid controller resets and data loss.
2333 if (pdev
->vendor
== PCI_VENDOR_ID_APPLE
&& pdev
->device
== 0x2001) {
2335 dev_warn(dev
->ctrl
.device
, "detected Apple NVMe controller, "
2336 "set queue depth=%u to work around controller resets\n",
2338 } else if (pdev
->vendor
== PCI_VENDOR_ID_SAMSUNG
&&
2339 (pdev
->device
== 0xa821 || pdev
->device
== 0xa822) &&
2340 NVME_CAP_MQES(dev
->ctrl
.cap
) == 0) {
2342 dev_err(dev
->ctrl
.device
, "detected PM1725 NVMe controller, "
2343 "set queue depth=%u\n", dev
->q_depth
);
2347 * Controllers with the shared tags quirk need the IO queue to be
2348 * big enough so that we get 32 tags for the admin queue
2350 if ((dev
->ctrl
.quirks
& NVME_QUIRK_SHARED_TAGS
) &&
2351 (dev
->q_depth
< (NVME_AQ_DEPTH
+ 2))) {
2352 dev
->q_depth
= NVME_AQ_DEPTH
+ 2;
2353 dev_warn(dev
->ctrl
.device
, "IO queue depth clamped to %d\n",
2360 pci_enable_pcie_error_reporting(pdev
);
2361 pci_save_state(pdev
);
2365 pci_disable_device(pdev
);
2369 static void nvme_dev_unmap(struct nvme_dev
*dev
)
2373 pci_release_mem_regions(to_pci_dev(dev
->dev
));
2376 static void nvme_pci_disable(struct nvme_dev
*dev
)
2378 struct pci_dev
*pdev
= to_pci_dev(dev
->dev
);
2380 pci_free_irq_vectors(pdev
);
2382 if (pci_is_enabled(pdev
)) {
2383 pci_disable_pcie_error_reporting(pdev
);
2384 pci_disable_device(pdev
);
2388 static void nvme_dev_disable(struct nvme_dev
*dev
, bool shutdown
)
2390 bool dead
= true, freeze
= false;
2391 struct pci_dev
*pdev
= to_pci_dev(dev
->dev
);
2393 mutex_lock(&dev
->shutdown_lock
);
2394 if (pci_is_enabled(pdev
)) {
2395 u32 csts
= readl(dev
->bar
+ NVME_REG_CSTS
);
2397 if (dev
->ctrl
.state
== NVME_CTRL_LIVE
||
2398 dev
->ctrl
.state
== NVME_CTRL_RESETTING
) {
2400 nvme_start_freeze(&dev
->ctrl
);
2402 dead
= !!((csts
& NVME_CSTS_CFS
) || !(csts
& NVME_CSTS_RDY
) ||
2403 pdev
->error_state
!= pci_channel_io_normal
);
2407 * Give the controller a chance to complete all entered requests if
2408 * doing a safe shutdown.
2410 if (!dead
&& shutdown
&& freeze
)
2411 nvme_wait_freeze_timeout(&dev
->ctrl
, NVME_IO_TIMEOUT
);
2413 nvme_stop_queues(&dev
->ctrl
);
2415 if (!dead
&& dev
->ctrl
.queue_count
> 0) {
2416 nvme_disable_io_queues(dev
);
2417 nvme_disable_admin_queue(dev
, shutdown
);
2419 nvme_suspend_io_queues(dev
);
2420 nvme_suspend_queue(&dev
->queues
[0]);
2421 nvme_pci_disable(dev
);
2422 nvme_reap_pending_cqes(dev
);
2424 blk_mq_tagset_busy_iter(&dev
->tagset
, nvme_cancel_request
, &dev
->ctrl
);
2425 blk_mq_tagset_busy_iter(&dev
->admin_tagset
, nvme_cancel_request
, &dev
->ctrl
);
2426 blk_mq_tagset_wait_completed_request(&dev
->tagset
);
2427 blk_mq_tagset_wait_completed_request(&dev
->admin_tagset
);
2430 * The driver will not be starting up queues again if shutting down so
2431 * must flush all entered requests to their failed completion to avoid
2432 * deadlocking blk-mq hot-cpu notifier.
2435 nvme_start_queues(&dev
->ctrl
);
2436 if (dev
->ctrl
.admin_q
&& !blk_queue_dying(dev
->ctrl
.admin_q
))
2437 blk_mq_unquiesce_queue(dev
->ctrl
.admin_q
);
2439 mutex_unlock(&dev
->shutdown_lock
);
2442 static int nvme_disable_prepare_reset(struct nvme_dev
*dev
, bool shutdown
)
2444 if (!nvme_wait_reset(&dev
->ctrl
))
2446 nvme_dev_disable(dev
, shutdown
);
2450 static int nvme_setup_prp_pools(struct nvme_dev
*dev
)
2452 dev
->prp_page_pool
= dma_pool_create("prp list page", dev
->dev
,
2453 PAGE_SIZE
, PAGE_SIZE
, 0);
2454 if (!dev
->prp_page_pool
)
2457 /* Optimisation for I/Os between 4k and 128k */
2458 dev
->prp_small_pool
= dma_pool_create("prp list 256", dev
->dev
,
2460 if (!dev
->prp_small_pool
) {
2461 dma_pool_destroy(dev
->prp_page_pool
);
2467 static void nvme_release_prp_pools(struct nvme_dev
*dev
)
2469 dma_pool_destroy(dev
->prp_page_pool
);
2470 dma_pool_destroy(dev
->prp_small_pool
);
2473 static void nvme_free_tagset(struct nvme_dev
*dev
)
2475 if (dev
->tagset
.tags
)
2476 blk_mq_free_tag_set(&dev
->tagset
);
2477 dev
->ctrl
.tagset
= NULL
;
2480 static void nvme_pci_free_ctrl(struct nvme_ctrl
*ctrl
)
2482 struct nvme_dev
*dev
= to_nvme_dev(ctrl
);
2484 nvme_dbbuf_dma_free(dev
);
2485 nvme_free_tagset(dev
);
2486 if (dev
->ctrl
.admin_q
)
2487 blk_put_queue(dev
->ctrl
.admin_q
);
2488 free_opal_dev(dev
->ctrl
.opal_dev
);
2489 mempool_destroy(dev
->iod_mempool
);
2490 put_device(dev
->dev
);
2495 static void nvme_remove_dead_ctrl(struct nvme_dev
*dev
)
2498 * Set state to deleting now to avoid blocking nvme_wait_reset(), which
2499 * may be holding this pci_dev's device lock.
2501 nvme_change_ctrl_state(&dev
->ctrl
, NVME_CTRL_DELETING
);
2502 nvme_get_ctrl(&dev
->ctrl
);
2503 nvme_dev_disable(dev
, false);
2504 nvme_kill_queues(&dev
->ctrl
);
2505 if (!queue_work(nvme_wq
, &dev
->remove_work
))
2506 nvme_put_ctrl(&dev
->ctrl
);
2509 static void nvme_reset_work(struct work_struct
*work
)
2511 struct nvme_dev
*dev
=
2512 container_of(work
, struct nvme_dev
, ctrl
.reset_work
);
2513 bool was_suspend
= !!(dev
->ctrl
.ctrl_config
& NVME_CC_SHN_NORMAL
);
2516 if (WARN_ON(dev
->ctrl
.state
!= NVME_CTRL_RESETTING
)) {
2522 * If we're called to reset a live controller first shut it down before
2525 if (dev
->ctrl
.ctrl_config
& NVME_CC_ENABLE
)
2526 nvme_dev_disable(dev
, false);
2527 nvme_sync_queues(&dev
->ctrl
);
2529 mutex_lock(&dev
->shutdown_lock
);
2530 result
= nvme_pci_enable(dev
);
2534 result
= nvme_pci_configure_admin_queue(dev
);
2538 result
= nvme_alloc_admin_tags(dev
);
2543 * Limit the max command size to prevent iod->sg allocations going
2544 * over a single page.
2546 dev
->ctrl
.max_hw_sectors
= min_t(u32
,
2547 NVME_MAX_KB_SZ
<< 1, dma_max_mapping_size(dev
->dev
) >> 9);
2548 dev
->ctrl
.max_segments
= NVME_MAX_SEGS
;
2551 * Don't limit the IOMMU merged segment size.
2553 dma_set_max_seg_size(dev
->dev
, 0xffffffff);
2555 mutex_unlock(&dev
->shutdown_lock
);
2558 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
2559 * initializing procedure here.
2561 if (!nvme_change_ctrl_state(&dev
->ctrl
, NVME_CTRL_CONNECTING
)) {
2562 dev_warn(dev
->ctrl
.device
,
2563 "failed to mark controller CONNECTING\n");
2569 * We do not support an SGL for metadata (yet), so we are limited to a
2570 * single integrity segment for the separate metadata pointer.
2572 dev
->ctrl
.max_integrity_segments
= 1;
2574 result
= nvme_init_identify(&dev
->ctrl
);
2578 if (dev
->ctrl
.oacs
& NVME_CTRL_OACS_SEC_SUPP
) {
2579 if (!dev
->ctrl
.opal_dev
)
2580 dev
->ctrl
.opal_dev
=
2581 init_opal_dev(&dev
->ctrl
, &nvme_sec_submit
);
2582 else if (was_suspend
)
2583 opal_unlock_from_suspend(dev
->ctrl
.opal_dev
);
2585 free_opal_dev(dev
->ctrl
.opal_dev
);
2586 dev
->ctrl
.opal_dev
= NULL
;
2589 if (dev
->ctrl
.oacs
& NVME_CTRL_OACS_DBBUF_SUPP
) {
2590 result
= nvme_dbbuf_dma_alloc(dev
);
2593 "unable to allocate dma for dbbuf\n");
2596 if (dev
->ctrl
.hmpre
) {
2597 result
= nvme_setup_host_mem(dev
);
2602 result
= nvme_setup_io_queues(dev
);
2607 * Keep the controller around but remove all namespaces if we don't have
2608 * any working I/O queue.
2610 if (dev
->online_queues
< 2) {
2611 dev_warn(dev
->ctrl
.device
, "IO queues not created\n");
2612 nvme_kill_queues(&dev
->ctrl
);
2613 nvme_remove_namespaces(&dev
->ctrl
);
2614 nvme_free_tagset(dev
);
2616 nvme_start_queues(&dev
->ctrl
);
2617 nvme_wait_freeze(&dev
->ctrl
);
2619 nvme_unfreeze(&dev
->ctrl
);
2623 * If only admin queue live, keep it to do further investigation or
2626 if (!nvme_change_ctrl_state(&dev
->ctrl
, NVME_CTRL_LIVE
)) {
2627 dev_warn(dev
->ctrl
.device
,
2628 "failed to mark controller live state\n");
2633 nvme_start_ctrl(&dev
->ctrl
);
2637 mutex_unlock(&dev
->shutdown_lock
);
2640 dev_warn(dev
->ctrl
.device
,
2641 "Removing after probe failure status: %d\n", result
);
2642 nvme_remove_dead_ctrl(dev
);
2645 static void nvme_remove_dead_ctrl_work(struct work_struct
*work
)
2647 struct nvme_dev
*dev
= container_of(work
, struct nvme_dev
, remove_work
);
2648 struct pci_dev
*pdev
= to_pci_dev(dev
->dev
);
2650 if (pci_get_drvdata(pdev
))
2651 device_release_driver(&pdev
->dev
);
2652 nvme_put_ctrl(&dev
->ctrl
);
2655 static int nvme_pci_reg_read32(struct nvme_ctrl
*ctrl
, u32 off
, u32
*val
)
2657 *val
= readl(to_nvme_dev(ctrl
)->bar
+ off
);
2661 static int nvme_pci_reg_write32(struct nvme_ctrl
*ctrl
, u32 off
, u32 val
)
2663 writel(val
, to_nvme_dev(ctrl
)->bar
+ off
);
2667 static int nvme_pci_reg_read64(struct nvme_ctrl
*ctrl
, u32 off
, u64
*val
)
2669 *val
= lo_hi_readq(to_nvme_dev(ctrl
)->bar
+ off
);
2673 static int nvme_pci_get_address(struct nvme_ctrl
*ctrl
, char *buf
, int size
)
2675 struct pci_dev
*pdev
= to_pci_dev(to_nvme_dev(ctrl
)->dev
);
2677 return snprintf(buf
, size
, "%s\n", dev_name(&pdev
->dev
));
2680 static const struct nvme_ctrl_ops nvme_pci_ctrl_ops
= {
2682 .module
= THIS_MODULE
,
2683 .flags
= NVME_F_METADATA_SUPPORTED
|
2685 .reg_read32
= nvme_pci_reg_read32
,
2686 .reg_write32
= nvme_pci_reg_write32
,
2687 .reg_read64
= nvme_pci_reg_read64
,
2688 .free_ctrl
= nvme_pci_free_ctrl
,
2689 .submit_async_event
= nvme_pci_submit_async_event
,
2690 .get_address
= nvme_pci_get_address
,
2693 static int nvme_dev_map(struct nvme_dev
*dev
)
2695 struct pci_dev
*pdev
= to_pci_dev(dev
->dev
);
2697 if (pci_request_mem_regions(pdev
, "nvme"))
2700 if (nvme_remap_bar(dev
, NVME_REG_DBS
+ 4096))
2705 pci_release_mem_regions(pdev
);
2709 static unsigned long check_vendor_combination_bug(struct pci_dev
*pdev
)
2711 if (pdev
->vendor
== 0x144d && pdev
->device
== 0xa802) {
2713 * Several Samsung devices seem to drop off the PCIe bus
2714 * randomly when APST is on and uses the deepest sleep state.
2715 * This has been observed on a Samsung "SM951 NVMe SAMSUNG
2716 * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD
2717 * 950 PRO 256GB", but it seems to be restricted to two Dell
2720 if (dmi_match(DMI_SYS_VENDOR
, "Dell Inc.") &&
2721 (dmi_match(DMI_PRODUCT_NAME
, "XPS 15 9550") ||
2722 dmi_match(DMI_PRODUCT_NAME
, "Precision 5510")))
2723 return NVME_QUIRK_NO_DEEPEST_PS
;
2724 } else if (pdev
->vendor
== 0x144d && pdev
->device
== 0xa804) {
2726 * Samsung SSD 960 EVO drops off the PCIe bus after system
2727 * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as
2728 * within few minutes after bootup on a Coffee Lake board -
2731 if (dmi_match(DMI_BOARD_VENDOR
, "ASUSTeK COMPUTER INC.") &&
2732 (dmi_match(DMI_BOARD_NAME
, "PRIME B350M-A") ||
2733 dmi_match(DMI_BOARD_NAME
, "PRIME Z370-A")))
2734 return NVME_QUIRK_NO_APST
;
2735 } else if ((pdev
->vendor
== 0x144d && (pdev
->device
== 0xa801 ||
2736 pdev
->device
== 0xa808 || pdev
->device
== 0xa809)) ||
2737 (pdev
->vendor
== 0x1e0f && pdev
->device
== 0x0001)) {
2739 * Forcing to use host managed nvme power settings for
2740 * lowest idle power with quick resume latency on
2741 * Samsung and Toshiba SSDs based on suspend behavior
2742 * on Coffee Lake board for LENOVO C640
2744 if ((dmi_match(DMI_BOARD_VENDOR
, "LENOVO")) &&
2745 dmi_match(DMI_BOARD_NAME
, "LNVNB161216"))
2746 return NVME_QUIRK_SIMPLE_SUSPEND
;
2752 static void nvme_async_probe(void *data
, async_cookie_t cookie
)
2754 struct nvme_dev
*dev
= data
;
2756 flush_work(&dev
->ctrl
.reset_work
);
2757 flush_work(&dev
->ctrl
.scan_work
);
2758 nvme_put_ctrl(&dev
->ctrl
);
2761 static int nvme_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
2763 int node
, result
= -ENOMEM
;
2764 struct nvme_dev
*dev
;
2765 unsigned long quirks
= id
->driver_data
;
2768 node
= dev_to_node(&pdev
->dev
);
2769 if (node
== NUMA_NO_NODE
)
2770 set_dev_node(&pdev
->dev
, first_memory_node
);
2772 dev
= kzalloc_node(sizeof(*dev
), GFP_KERNEL
, node
);
2776 dev
->nr_write_queues
= write_queues
;
2777 dev
->nr_poll_queues
= poll_queues
;
2778 dev
->nr_allocated_queues
= nvme_max_io_queues(dev
) + 1;
2779 dev
->queues
= kcalloc_node(dev
->nr_allocated_queues
,
2780 sizeof(struct nvme_queue
), GFP_KERNEL
, node
);
2784 dev
->dev
= get_device(&pdev
->dev
);
2785 pci_set_drvdata(pdev
, dev
);
2787 result
= nvme_dev_map(dev
);
2791 INIT_WORK(&dev
->ctrl
.reset_work
, nvme_reset_work
);
2792 INIT_WORK(&dev
->remove_work
, nvme_remove_dead_ctrl_work
);
2793 mutex_init(&dev
->shutdown_lock
);
2795 result
= nvme_setup_prp_pools(dev
);
2799 quirks
|= check_vendor_combination_bug(pdev
);
2802 * Double check that our mempool alloc size will cover the biggest
2803 * command we support.
2805 alloc_size
= nvme_pci_iod_alloc_size(dev
, NVME_MAX_KB_SZ
,
2806 NVME_MAX_SEGS
, true);
2807 WARN_ON_ONCE(alloc_size
> PAGE_SIZE
);
2809 dev
->iod_mempool
= mempool_create_node(1, mempool_kmalloc
,
2811 (void *) alloc_size
,
2813 if (!dev
->iod_mempool
) {
2818 result
= nvme_init_ctrl(&dev
->ctrl
, &pdev
->dev
, &nvme_pci_ctrl_ops
,
2821 goto release_mempool
;
2823 dev_info(dev
->ctrl
.device
, "pci function %s\n", dev_name(&pdev
->dev
));
2825 nvme_reset_ctrl(&dev
->ctrl
);
2826 async_schedule(nvme_async_probe
, dev
);
2831 mempool_destroy(dev
->iod_mempool
);
2833 nvme_release_prp_pools(dev
);
2835 nvme_dev_unmap(dev
);
2837 put_device(dev
->dev
);
2844 static void nvme_reset_prepare(struct pci_dev
*pdev
)
2846 struct nvme_dev
*dev
= pci_get_drvdata(pdev
);
2849 * We don't need to check the return value from waiting for the reset
2850 * state as pci_dev device lock is held, making it impossible to race
2853 nvme_disable_prepare_reset(dev
, false);
2854 nvme_sync_queues(&dev
->ctrl
);
2857 static void nvme_reset_done(struct pci_dev
*pdev
)
2859 struct nvme_dev
*dev
= pci_get_drvdata(pdev
);
2861 if (!nvme_try_sched_reset(&dev
->ctrl
))
2862 flush_work(&dev
->ctrl
.reset_work
);
2865 static void nvme_shutdown(struct pci_dev
*pdev
)
2867 struct nvme_dev
*dev
= pci_get_drvdata(pdev
);
2868 nvme_disable_prepare_reset(dev
, true);
2872 * The driver's remove may be called on a device in a partially initialized
2873 * state. This function must not have any dependencies on the device state in
2876 static void nvme_remove(struct pci_dev
*pdev
)
2878 struct nvme_dev
*dev
= pci_get_drvdata(pdev
);
2880 nvme_change_ctrl_state(&dev
->ctrl
, NVME_CTRL_DELETING
);
2881 pci_set_drvdata(pdev
, NULL
);
2883 if (!pci_device_is_present(pdev
)) {
2884 nvme_change_ctrl_state(&dev
->ctrl
, NVME_CTRL_DEAD
);
2885 nvme_dev_disable(dev
, true);
2886 nvme_dev_remove_admin(dev
);
2889 flush_work(&dev
->ctrl
.reset_work
);
2890 nvme_stop_ctrl(&dev
->ctrl
);
2891 nvme_remove_namespaces(&dev
->ctrl
);
2892 nvme_dev_disable(dev
, true);
2893 nvme_release_cmb(dev
);
2894 nvme_free_host_mem(dev
);
2895 nvme_dev_remove_admin(dev
);
2896 nvme_free_queues(dev
, 0);
2897 nvme_release_prp_pools(dev
);
2898 nvme_dev_unmap(dev
);
2899 nvme_uninit_ctrl(&dev
->ctrl
);
2902 #ifdef CONFIG_PM_SLEEP
2903 static int nvme_get_power_state(struct nvme_ctrl
*ctrl
, u32
*ps
)
2905 return nvme_get_features(ctrl
, NVME_FEAT_POWER_MGMT
, 0, NULL
, 0, ps
);
2908 static int nvme_set_power_state(struct nvme_ctrl
*ctrl
, u32 ps
)
2910 return nvme_set_features(ctrl
, NVME_FEAT_POWER_MGMT
, ps
, NULL
, 0, NULL
);
2913 static int nvme_resume(struct device
*dev
)
2915 struct nvme_dev
*ndev
= pci_get_drvdata(to_pci_dev(dev
));
2916 struct nvme_ctrl
*ctrl
= &ndev
->ctrl
;
2918 if (ndev
->last_ps
== U32_MAX
||
2919 nvme_set_power_state(ctrl
, ndev
->last_ps
) != 0)
2920 return nvme_try_sched_reset(&ndev
->ctrl
);
2924 static int nvme_suspend(struct device
*dev
)
2926 struct pci_dev
*pdev
= to_pci_dev(dev
);
2927 struct nvme_dev
*ndev
= pci_get_drvdata(pdev
);
2928 struct nvme_ctrl
*ctrl
= &ndev
->ctrl
;
2931 ndev
->last_ps
= U32_MAX
;
2934 * The platform does not remove power for a kernel managed suspend so
2935 * use host managed nvme power settings for lowest idle power if
2936 * possible. This should have quicker resume latency than a full device
2937 * shutdown. But if the firmware is involved after the suspend or the
2938 * device does not support any non-default power states, shut down the
2941 * If ASPM is not enabled for the device, shut down the device and allow
2942 * the PCI bus layer to put it into D3 in order to take the PCIe link
2943 * down, so as to allow the platform to achieve its minimum low-power
2944 * state (which may not be possible if the link is up).
2946 if (pm_suspend_via_firmware() || !ctrl
->npss
||
2947 !pcie_aspm_enabled(pdev
) ||
2948 (ndev
->ctrl
.quirks
& NVME_QUIRK_SIMPLE_SUSPEND
))
2949 return nvme_disable_prepare_reset(ndev
, true);
2951 nvme_start_freeze(ctrl
);
2952 nvme_wait_freeze(ctrl
);
2953 nvme_sync_queues(ctrl
);
2955 if (ctrl
->state
!= NVME_CTRL_LIVE
)
2958 ret
= nvme_get_power_state(ctrl
, &ndev
->last_ps
);
2963 * A saved state prevents pci pm from generically controlling the
2964 * device's power. If we're using protocol specific settings, we don't
2965 * want pci interfering.
2967 pci_save_state(pdev
);
2969 ret
= nvme_set_power_state(ctrl
, ctrl
->npss
);
2974 /* discard the saved state */
2975 pci_load_saved_state(pdev
, NULL
);
2978 * Clearing npss forces a controller reset on resume. The
2979 * correct value will be rediscovered then.
2981 ret
= nvme_disable_prepare_reset(ndev
, true);
2985 nvme_unfreeze(ctrl
);
2989 static int nvme_simple_suspend(struct device
*dev
)
2991 struct nvme_dev
*ndev
= pci_get_drvdata(to_pci_dev(dev
));
2992 return nvme_disable_prepare_reset(ndev
, true);
2995 static int nvme_simple_resume(struct device
*dev
)
2997 struct pci_dev
*pdev
= to_pci_dev(dev
);
2998 struct nvme_dev
*ndev
= pci_get_drvdata(pdev
);
3000 return nvme_try_sched_reset(&ndev
->ctrl
);
3003 static const struct dev_pm_ops nvme_dev_pm_ops
= {
3004 .suspend
= nvme_suspend
,
3005 .resume
= nvme_resume
,
3006 .freeze
= nvme_simple_suspend
,
3007 .thaw
= nvme_simple_resume
,
3008 .poweroff
= nvme_simple_suspend
,
3009 .restore
= nvme_simple_resume
,
3011 #endif /* CONFIG_PM_SLEEP */
3013 static pci_ers_result_t
nvme_error_detected(struct pci_dev
*pdev
,
3014 pci_channel_state_t state
)
3016 struct nvme_dev
*dev
= pci_get_drvdata(pdev
);
3019 * A frozen channel requires a reset. When detected, this method will
3020 * shutdown the controller to quiesce. The controller will be restarted
3021 * after the slot reset through driver's slot_reset callback.
3024 case pci_channel_io_normal
:
3025 return PCI_ERS_RESULT_CAN_RECOVER
;
3026 case pci_channel_io_frozen
:
3027 dev_warn(dev
->ctrl
.device
,
3028 "frozen state error detected, reset controller\n");
3029 nvme_dev_disable(dev
, false);
3030 return PCI_ERS_RESULT_NEED_RESET
;
3031 case pci_channel_io_perm_failure
:
3032 dev_warn(dev
->ctrl
.device
,
3033 "failure state error detected, request disconnect\n");
3034 return PCI_ERS_RESULT_DISCONNECT
;
3036 return PCI_ERS_RESULT_NEED_RESET
;
3039 static pci_ers_result_t
nvme_slot_reset(struct pci_dev
*pdev
)
3041 struct nvme_dev
*dev
= pci_get_drvdata(pdev
);
3043 dev_info(dev
->ctrl
.device
, "restart after slot reset\n");
3044 pci_restore_state(pdev
);
3045 nvme_reset_ctrl(&dev
->ctrl
);
3046 return PCI_ERS_RESULT_RECOVERED
;
3049 static void nvme_error_resume(struct pci_dev
*pdev
)
3051 struct nvme_dev
*dev
= pci_get_drvdata(pdev
);
3053 flush_work(&dev
->ctrl
.reset_work
);
3056 static const struct pci_error_handlers nvme_err_handler
= {
3057 .error_detected
= nvme_error_detected
,
3058 .slot_reset
= nvme_slot_reset
,
3059 .resume
= nvme_error_resume
,
3060 .reset_prepare
= nvme_reset_prepare
,
3061 .reset_done
= nvme_reset_done
,
3064 static const struct pci_device_id nvme_id_table
[] = {
3065 { PCI_VDEVICE(INTEL
, 0x0953),
3066 .driver_data
= NVME_QUIRK_STRIPE_SIZE
|
3067 NVME_QUIRK_DEALLOCATE_ZEROES
, },
3068 { PCI_VDEVICE(INTEL
, 0x0a53),
3069 .driver_data
= NVME_QUIRK_STRIPE_SIZE
|
3070 NVME_QUIRK_DEALLOCATE_ZEROES
, },
3071 { PCI_VDEVICE(INTEL
, 0x0a54),
3072 .driver_data
= NVME_QUIRK_STRIPE_SIZE
|
3073 NVME_QUIRK_DEALLOCATE_ZEROES
, },
3074 { PCI_VDEVICE(INTEL
, 0x0a55),
3075 .driver_data
= NVME_QUIRK_STRIPE_SIZE
|
3076 NVME_QUIRK_DEALLOCATE_ZEROES
, },
3077 { PCI_VDEVICE(INTEL
, 0xf1a5), /* Intel 600P/P3100 */
3078 .driver_data
= NVME_QUIRK_NO_DEEPEST_PS
|
3079 NVME_QUIRK_MEDIUM_PRIO_SQ
|
3080 NVME_QUIRK_NO_TEMP_THRESH_CHANGE
},
3081 { PCI_VDEVICE(INTEL
, 0xf1a6), /* Intel 760p/Pro 7600p */
3082 .driver_data
= NVME_QUIRK_IGNORE_DEV_SUBNQN
, },
3083 { PCI_VDEVICE(INTEL
, 0x5845), /* Qemu emulated controller */
3084 .driver_data
= NVME_QUIRK_IDENTIFY_CNS
|
3085 NVME_QUIRK_DISABLE_WRITE_ZEROES
, },
3086 { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
3087 .driver_data
= NVME_QUIRK_DELAY_BEFORE_CHK_RDY
, },
3088 { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
3089 .driver_data
= NVME_QUIRK_DELAY_BEFORE_CHK_RDY
, },
3090 { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */
3091 .driver_data
= NVME_QUIRK_DELAY_BEFORE_CHK_RDY
, },
3092 { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */
3093 .driver_data
= NVME_QUIRK_DELAY_BEFORE_CHK_RDY
, },
3094 { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
3095 .driver_data
= NVME_QUIRK_DELAY_BEFORE_CHK_RDY
, },
3096 { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
3097 .driver_data
= NVME_QUIRK_DELAY_BEFORE_CHK_RDY
, },
3098 { PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
3099 .driver_data
= NVME_QUIRK_LIGHTNVM
, },
3100 { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
3101 .driver_data
= NVME_QUIRK_LIGHTNVM
, },
3102 { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */
3103 .driver_data
= NVME_QUIRK_LIGHTNVM
, },
3104 { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
3105 .driver_data
= NVME_QUIRK_IGNORE_DEV_SUBNQN
, },
3106 { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
3107 .driver_data
= NVME_QUIRK_NO_DEEPEST_PS
|
3108 NVME_QUIRK_IGNORE_DEV_SUBNQN
, },
3109 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS
, 0xffffff) },
3110 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, 0x2001),
3111 .driver_data
= NVME_QUIRK_SINGLE_VECTOR
},
3112 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, 0x2003) },
3113 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, 0x2005),
3114 .driver_data
= NVME_QUIRK_SINGLE_VECTOR
|
3115 NVME_QUIRK_128_BYTES_SQES
|
3116 NVME_QUIRK_SHARED_TAGS
},
3119 MODULE_DEVICE_TABLE(pci
, nvme_id_table
);
3121 static struct pci_driver nvme_driver
= {
3123 .id_table
= nvme_id_table
,
3124 .probe
= nvme_probe
,
3125 .remove
= nvme_remove
,
3126 .shutdown
= nvme_shutdown
,
3127 #ifdef CONFIG_PM_SLEEP
3129 .pm
= &nvme_dev_pm_ops
,
3132 .sriov_configure
= pci_sriov_configure_simple
,
3133 .err_handler
= &nvme_err_handler
,
3136 static int __init
nvme_init(void)
3138 BUILD_BUG_ON(sizeof(struct nvme_create_cq
) != 64);
3139 BUILD_BUG_ON(sizeof(struct nvme_create_sq
) != 64);
3140 BUILD_BUG_ON(sizeof(struct nvme_delete_queue
) != 64);
3141 BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS
< 2);
3143 return pci_register_driver(&nvme_driver
);
3146 static void __exit
nvme_exit(void)
3148 pci_unregister_driver(&nvme_driver
);
3149 flush_workqueue(nvme_wq
);
3152 MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
3153 MODULE_LICENSE("GPL");
3154 MODULE_VERSION("1.0");
3155 module_init(nvme_init
);
3156 module_exit(nvme_exit
);