]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/nvme/host/rdma.c
UBUNTU: Ubuntu-5.15.0-39.42
[mirror_ubuntu-jammy-kernel.git] / drivers / nvme / host / rdma.c
CommitLineData
5d8762d5 1// SPDX-License-Identifier: GPL-2.0
71102307
CH
2/*
3 * NVMe over Fabrics RDMA host code.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
71102307
CH
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
71102307
CH
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
f41725bb 10#include <rdma/mr_pool.h>
71102307
CH
11#include <linux/err.h>
12#include <linux/string.h>
71102307
CH
13#include <linux/atomic.h>
14#include <linux/blk-mq.h>
0b36658c 15#include <linux/blk-mq-rdma.h>
71102307
CH
16#include <linux/types.h>
17#include <linux/list.h>
18#include <linux/mutex.h>
19#include <linux/scatterlist.h>
20#include <linux/nvme.h>
71102307
CH
21#include <asm/unaligned.h>
22
23#include <rdma/ib_verbs.h>
24#include <rdma/rdma_cm.h>
71102307
CH
25#include <linux/nvme-rdma.h>
26
27#include "nvme.h"
28#include "fabrics.h"
29
30
782d820c 31#define NVME_RDMA_CONNECT_TIMEOUT_MS 3000 /* 3 second */
71102307 32
71102307
CH
33#define NVME_RDMA_MAX_SEGMENTS 256
34
64a741c1 35#define NVME_RDMA_MAX_INLINE_SEGMENTS 4
71102307 36
5ec5d3bd
MG
37#define NVME_RDMA_DATA_SGL_SIZE \
38 (sizeof(struct scatterlist) * NVME_INLINE_SG_CNT)
39#define NVME_RDMA_METADATA_SGL_SIZE \
40 (sizeof(struct scatterlist) * NVME_INLINE_METADATA_SG_CNT)
41
71102307 42struct nvme_rdma_device {
f87c89ad
MG
43 struct ib_device *dev;
44 struct ib_pd *pd;
71102307
CH
45 struct kref ref;
46 struct list_head entry;
64a741c1 47 unsigned int num_inline_segments;
71102307
CH
48};
49
50struct nvme_rdma_qe {
51 struct ib_cqe cqe;
52 void *data;
53 u64 dma;
54};
55
324d9e78
IR
56struct nvme_rdma_sgl {
57 int nents;
58 struct sg_table sg_table;
59};
60
71102307
CH
61struct nvme_rdma_queue;
62struct nvme_rdma_request {
d49187e9 63 struct nvme_request req;
71102307
CH
64 struct ib_mr *mr;
65 struct nvme_rdma_qe sqe;
4af7f7ff
SG
66 union nvme_result result;
67 __le16 status;
68 refcount_t ref;
71102307
CH
69 struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
70 u32 num_sge;
71102307
CH
71 struct ib_reg_wr reg_wr;
72 struct ib_cqe reg_cqe;
73 struct nvme_rdma_queue *queue;
324d9e78 74 struct nvme_rdma_sgl data_sgl;
5ec5d3bd
MG
75 struct nvme_rdma_sgl *metadata_sgl;
76 bool use_sig_mr;
71102307
CH
77};
78
79enum nvme_rdma_queue_flags {
5013e98b
SG
80 NVME_RDMA_Q_ALLOCATED = 0,
81 NVME_RDMA_Q_LIVE = 1,
eb1bd249 82 NVME_RDMA_Q_TR_READY = 2,
71102307
CH
83};
84
85struct nvme_rdma_queue {
86 struct nvme_rdma_qe *rsp_ring;
71102307
CH
87 int queue_size;
88 size_t cmnd_capsule_len;
89 struct nvme_rdma_ctrl *ctrl;
90 struct nvme_rdma_device *device;
91 struct ib_cq *ib_cq;
92 struct ib_qp *qp;
93
94 unsigned long flags;
95 struct rdma_cm_id *cm_id;
96 int cm_error;
97 struct completion cm_done;
5ec5d3bd 98 bool pi_support;
287f329e 99 int cq_size;
7674073b 100 struct mutex queue_lock;
71102307
CH
101};
102
103struct nvme_rdma_ctrl {
71102307
CH
104 /* read only in the hot path */
105 struct nvme_rdma_queue *queues;
71102307
CH
106
107 /* other member variables */
71102307 108 struct blk_mq_tag_set tag_set;
71102307
CH
109 struct work_struct err_work;
110
111 struct nvme_rdma_qe async_event_sqe;
112
71102307
CH
113 struct delayed_work reconnect_work;
114
115 struct list_head list;
116
117 struct blk_mq_tag_set admin_tag_set;
118 struct nvme_rdma_device *device;
119
71102307
CH
120 u32 max_fr_pages;
121
0928f9b4
SG
122 struct sockaddr_storage addr;
123 struct sockaddr_storage src_addr;
71102307
CH
124
125 struct nvme_ctrl ctrl;
64a741c1 126 bool use_inline_data;
b1064d3e 127 u32 io_queues[HCTX_MAX_TYPES];
71102307
CH
128};
129
130static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl)
131{
132 return container_of(ctrl, struct nvme_rdma_ctrl, ctrl);
133}
134
135static LIST_HEAD(device_list);
136static DEFINE_MUTEX(device_list_mutex);
137
138static LIST_HEAD(nvme_rdma_ctrl_list);
139static DEFINE_MUTEX(nvme_rdma_ctrl_mutex);
140
71102307
CH
141/*
142 * Disabling this option makes small I/O goes faster, but is fundamentally
143 * unsafe. With it turned off we will have to register a global rkey that
144 * allows read and write access to all physical memory.
145 */
146static bool register_always = true;
147module_param(register_always, bool, 0444);
148MODULE_PARM_DESC(register_always,
149 "Use memory registration even for contiguous memory regions");
150
151static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
152 struct rdma_cm_event *event);
153static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
ff029451 154static void nvme_rdma_complete_rq(struct request *rq);
71102307 155
90af3512
SG
156static const struct blk_mq_ops nvme_rdma_mq_ops;
157static const struct blk_mq_ops nvme_rdma_admin_mq_ops;
158
71102307
CH
159static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
160{
161 return queue - queue->ctrl->queues;
162}
163
ff8519f9
SG
164static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue)
165{
166 return nvme_rdma_queue_idx(queue) >
b1064d3e
SG
167 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] +
168 queue->ctrl->io_queues[HCTX_TYPE_READ];
ff8519f9
SG
169}
170
71102307
CH
171static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue)
172{
173 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
174}
175
176static void nvme_rdma_free_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
177 size_t capsule_size, enum dma_data_direction dir)
178{
179 ib_dma_unmap_single(ibdev, qe->dma, capsule_size, dir);
180 kfree(qe->data);
181}
182
183static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
184 size_t capsule_size, enum dma_data_direction dir)
185{
186 qe->data = kzalloc(capsule_size, GFP_KERNEL);
187 if (!qe->data)
188 return -ENOMEM;
189
190 qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir);
191 if (ib_dma_mapping_error(ibdev, qe->dma)) {
192 kfree(qe->data);
6344d02d 193 qe->data = NULL;
71102307
CH
194 return -ENOMEM;
195 }
196
197 return 0;
198}
199
200static void nvme_rdma_free_ring(struct ib_device *ibdev,
201 struct nvme_rdma_qe *ring, size_t ib_queue_size,
202 size_t capsule_size, enum dma_data_direction dir)
203{
204 int i;
205
206 for (i = 0; i < ib_queue_size; i++)
207 nvme_rdma_free_qe(ibdev, &ring[i], capsule_size, dir);
208 kfree(ring);
209}
210
211static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev,
212 size_t ib_queue_size, size_t capsule_size,
213 enum dma_data_direction dir)
214{
215 struct nvme_rdma_qe *ring;
216 int i;
217
218 ring = kcalloc(ib_queue_size, sizeof(struct nvme_rdma_qe), GFP_KERNEL);
219 if (!ring)
220 return NULL;
221
62f99b62
MG
222 /*
223 * Bind the CQEs (post recv buffers) DMA mapping to the RDMA queue
224 * lifetime. It's safe, since any chage in the underlying RDMA device
225 * will issue error recovery and queue re-creation.
226 */
71102307
CH
227 for (i = 0; i < ib_queue_size; i++) {
228 if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir))
229 goto out_free_ring;
230 }
231
232 return ring;
233
234out_free_ring:
235 nvme_rdma_free_ring(ibdev, ring, i, capsule_size, dir);
236 return NULL;
237}
238
239static void nvme_rdma_qp_event(struct ib_event *event, void *context)
240{
27a4beef
MG
241 pr_debug("QP event %s (%d)\n",
242 ib_event_msg(event->event), event->event);
243
71102307
CH
244}
245
246static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
247{
35da77d5
BVA
248 int ret;
249
250 ret = wait_for_completion_interruptible_timeout(&queue->cm_done,
71102307 251 msecs_to_jiffies(NVME_RDMA_CONNECT_TIMEOUT_MS) + 1);
35da77d5
BVA
252 if (ret < 0)
253 return ret;
254 if (ret == 0)
255 return -ETIMEDOUT;
256 WARN_ON_ONCE(queue->cm_error > 0);
71102307
CH
257 return queue->cm_error;
258}
259
260static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
261{
262 struct nvme_rdma_device *dev = queue->device;
263 struct ib_qp_init_attr init_attr;
264 int ret;
265
266 memset(&init_attr, 0, sizeof(init_attr));
267 init_attr.event_handler = nvme_rdma_qp_event;
268 /* +1 for drain */
269 init_attr.cap.max_send_wr = factor * queue->queue_size + 1;
270 /* +1 for drain */
271 init_attr.cap.max_recv_wr = queue->queue_size + 1;
272 init_attr.cap.max_recv_sge = 1;
64a741c1 273 init_attr.cap.max_send_sge = 1 + dev->num_inline_segments;
71102307
CH
274 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
275 init_attr.qp_type = IB_QPT_RC;
276 init_attr.send_cq = queue->ib_cq;
277 init_attr.recv_cq = queue->ib_cq;
5ec5d3bd
MG
278 if (queue->pi_support)
279 init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
287f329e 280 init_attr.qp_context = queue;
71102307
CH
281
282 ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr);
283
284 queue->qp = queue->cm_id->qp;
285 return ret;
286}
287
385475ee
CH
288static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
289 struct request *rq, unsigned int hctx_idx)
71102307
CH
290{
291 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
71102307 292
62f99b62 293 kfree(req->sqe.data);
71102307
CH
294}
295
385475ee
CH
296static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
297 struct request *rq, unsigned int hctx_idx,
298 unsigned int numa_node)
71102307 299{
385475ee 300 struct nvme_rdma_ctrl *ctrl = set->driver_data;
71102307 301 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
385475ee 302 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
71102307 303 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
71102307 304
59e29ce6 305 nvme_req(rq)->ctrl = &ctrl->ctrl;
62f99b62
MG
306 req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL);
307 if (!req->sqe.data)
308 return -ENOMEM;
71102307 309
5ec5d3bd
MG
310 /* metadata nvme_rdma_sgl struct is located after command's data SGL */
311 if (queue->pi_support)
312 req->metadata_sgl = (void *)nvme_req(rq) +
313 sizeof(struct nvme_rdma_request) +
314 NVME_RDMA_DATA_SGL_SIZE;
315
71102307 316 req->queue = queue;
f4b9e6c9 317 nvme_req(rq)->cmd = req->sqe.data;
71102307
CH
318
319 return 0;
71102307
CH
320}
321
71102307
CH
322static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
323 unsigned int hctx_idx)
324{
325 struct nvme_rdma_ctrl *ctrl = data;
326 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];
327
d858e5f0 328 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
71102307
CH
329
330 hctx->driver_data = queue;
331 return 0;
332}
333
334static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
335 unsigned int hctx_idx)
336{
337 struct nvme_rdma_ctrl *ctrl = data;
338 struct nvme_rdma_queue *queue = &ctrl->queues[0];
339
340 BUG_ON(hctx_idx != 0);
341
342 hctx->driver_data = queue;
343 return 0;
344}
345
346static void nvme_rdma_free_dev(struct kref *ref)
347{
348 struct nvme_rdma_device *ndev =
349 container_of(ref, struct nvme_rdma_device, ref);
350
351 mutex_lock(&device_list_mutex);
352 list_del(&ndev->entry);
353 mutex_unlock(&device_list_mutex);
354
71102307 355 ib_dealloc_pd(ndev->pd);
71102307
CH
356 kfree(ndev);
357}
358
359static void nvme_rdma_dev_put(struct nvme_rdma_device *dev)
360{
361 kref_put(&dev->ref, nvme_rdma_free_dev);
362}
363
364static int nvme_rdma_dev_get(struct nvme_rdma_device *dev)
365{
366 return kref_get_unless_zero(&dev->ref);
367}
368
369static struct nvme_rdma_device *
370nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
371{
372 struct nvme_rdma_device *ndev;
373
374 mutex_lock(&device_list_mutex);
375 list_for_each_entry(ndev, &device_list, entry) {
376 if (ndev->dev->node_guid == cm_id->device->node_guid &&
377 nvme_rdma_dev_get(ndev))
378 goto out_unlock;
379 }
380
381 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
382 if (!ndev)
383 goto out_err;
384
385 ndev->dev = cm_id->device;
386 kref_init(&ndev->ref);
387
11975e01
CH
388 ndev->pd = ib_alloc_pd(ndev->dev,
389 register_always ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
71102307
CH
390 if (IS_ERR(ndev->pd))
391 goto out_free_dev;
392
71102307
CH
393 if (!(ndev->dev->attrs.device_cap_flags &
394 IB_DEVICE_MEM_MGT_EXTENSIONS)) {
395 dev_err(&ndev->dev->dev,
396 "Memory registrations not supported.\n");
11975e01 397 goto out_free_pd;
71102307
CH
398 }
399
64a741c1 400 ndev->num_inline_segments = min(NVME_RDMA_MAX_INLINE_SEGMENTS,
0a3173a5 401 ndev->dev->attrs.max_send_sge - 1);
71102307
CH
402 list_add(&ndev->entry, &device_list);
403out_unlock:
404 mutex_unlock(&device_list_mutex);
405 return ndev;
406
71102307
CH
407out_free_pd:
408 ib_dealloc_pd(ndev->pd);
409out_free_dev:
410 kfree(ndev);
411out_err:
412 mutex_unlock(&device_list_mutex);
413 return NULL;
414}
415
287f329e
YF
416static void nvme_rdma_free_cq(struct nvme_rdma_queue *queue)
417{
418 if (nvme_rdma_poll_queue(queue))
419 ib_free_cq(queue->ib_cq);
420 else
421 ib_cq_pool_put(queue->ib_cq, queue->cq_size);
422}
423
71102307
CH
424static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
425{
eb1bd249
MG
426 struct nvme_rdma_device *dev;
427 struct ib_device *ibdev;
428
429 if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags))
430 return;
431
432 dev = queue->device;
433 ibdev = dev->dev;
71102307 434
5ec5d3bd
MG
435 if (queue->pi_support)
436 ib_mr_pool_destroy(queue->qp, &queue->qp->sig_mrs);
f41725bb
IR
437 ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
438
eb1bd249
MG
439 /*
440 * The cm_id object might have been destroyed during RDMA connection
441 * establishment error flow to avoid getting other cma events, thus
442 * the destruction of the QP shouldn't use rdma_cm API.
443 */
444 ib_destroy_qp(queue->qp);
287f329e 445 nvme_rdma_free_cq(queue);
71102307
CH
446
447 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
448 sizeof(struct nvme_completion), DMA_FROM_DEVICE);
449
450 nvme_rdma_dev_put(dev);
451}
452
5ec5d3bd 453static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev, bool pi_support)
f41725bb 454{
5ec5d3bd
MG
455 u32 max_page_list_len;
456
457 if (pi_support)
458 max_page_list_len = ibdev->attrs.max_pi_fast_reg_page_list_len;
459 else
460 max_page_list_len = ibdev->attrs.max_fast_reg_page_list_len;
461
462 return min_t(u32, NVME_RDMA_MAX_SEGMENTS, max_page_list_len - 1);
f41725bb
IR
463}
464
287f329e
YF
465static int nvme_rdma_create_cq(struct ib_device *ibdev,
466 struct nvme_rdma_queue *queue)
467{
468 int ret, comp_vector, idx = nvme_rdma_queue_idx(queue);
469 enum ib_poll_context poll_ctx;
470
471 /*
472 * Spread I/O queues completion vectors according their queue index.
473 * Admin queues can always go on completion vector 0.
474 */
475 comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors;
476
477 /* Polling queues need direct cq polling context */
478 if (nvme_rdma_poll_queue(queue)) {
479 poll_ctx = IB_POLL_DIRECT;
480 queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size,
481 comp_vector, poll_ctx);
482 } else {
483 poll_ctx = IB_POLL_SOFTIRQ;
484 queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size,
485 comp_vector, poll_ctx);
486 }
487
488 if (IS_ERR(queue->ib_cq)) {
489 ret = PTR_ERR(queue->ib_cq);
490 return ret;
491 }
492
493 return 0;
494}
495
ca6e95bb 496static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
71102307 497{
ca6e95bb 498 struct ib_device *ibdev;
71102307
CH
499 const int send_wr_factor = 3; /* MR, SEND, INV */
500 const int cq_factor = send_wr_factor + 1; /* + RECV */
ff13c1b8 501 int ret, pages_per_mr;
71102307 502
ca6e95bb
SG
503 queue->device = nvme_rdma_find_get_device(queue->cm_id);
504 if (!queue->device) {
505 dev_err(queue->cm_id->device->dev.parent,
506 "no client data found!\n");
507 return -ECONNREFUSED;
508 }
509 ibdev = queue->device->dev;
71102307 510
71102307 511 /* +1 for ib_stop_cq */
287f329e
YF
512 queue->cq_size = cq_factor * queue->queue_size + 1;
513
514 ret = nvme_rdma_create_cq(ibdev, queue);
515 if (ret)
ca6e95bb 516 goto out_put_dev;
71102307
CH
517
518 ret = nvme_rdma_create_qp(queue, send_wr_factor);
519 if (ret)
520 goto out_destroy_ib_cq;
521
522 queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size,
523 sizeof(struct nvme_completion), DMA_FROM_DEVICE);
524 if (!queue->rsp_ring) {
525 ret = -ENOMEM;
526 goto out_destroy_qp;
527 }
528
ff13c1b8
MG
529 /*
530 * Currently we don't use SG_GAPS MR's so if the first entry is
531 * misaligned we'll end up using two entries for a single data page,
532 * so one additional entry is required.
533 */
5ec5d3bd 534 pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev, queue->pi_support) + 1;
f41725bb
IR
535 ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
536 queue->queue_size,
537 IB_MR_TYPE_MEM_REG,
ff13c1b8 538 pages_per_mr, 0);
f41725bb
IR
539 if (ret) {
540 dev_err(queue->ctrl->ctrl.device,
541 "failed to initialize MR pool sized %d for QID %d\n",
287f329e 542 queue->queue_size, nvme_rdma_queue_idx(queue));
f41725bb
IR
543 goto out_destroy_ring;
544 }
545
5ec5d3bd
MG
546 if (queue->pi_support) {
547 ret = ib_mr_pool_init(queue->qp, &queue->qp->sig_mrs,
548 queue->queue_size, IB_MR_TYPE_INTEGRITY,
549 pages_per_mr, pages_per_mr);
550 if (ret) {
551 dev_err(queue->ctrl->ctrl.device,
552 "failed to initialize PI MR pool sized %d for QID %d\n",
287f329e 553 queue->queue_size, nvme_rdma_queue_idx(queue));
5ec5d3bd
MG
554 goto out_destroy_mr_pool;
555 }
556 }
557
eb1bd249
MG
558 set_bit(NVME_RDMA_Q_TR_READY, &queue->flags);
559
71102307
CH
560 return 0;
561
5ec5d3bd
MG
562out_destroy_mr_pool:
563 ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
f41725bb
IR
564out_destroy_ring:
565 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
566 sizeof(struct nvme_completion), DMA_FROM_DEVICE);
71102307 567out_destroy_qp:
1f61def9 568 rdma_destroy_qp(queue->cm_id);
71102307 569out_destroy_ib_cq:
287f329e 570 nvme_rdma_free_cq(queue);
ca6e95bb
SG
571out_put_dev:
572 nvme_rdma_dev_put(queue->device);
71102307
CH
573 return ret;
574}
575
41e8cfa1 576static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
71102307
CH
577 int idx, size_t queue_size)
578{
579 struct nvme_rdma_queue *queue;
8f4e8dac 580 struct sockaddr *src_addr = NULL;
71102307
CH
581 int ret;
582
583 queue = &ctrl->queues[idx];
7674073b 584 mutex_init(&queue->queue_lock);
71102307 585 queue->ctrl = ctrl;
5ec5d3bd
MG
586 if (idx && ctrl->ctrl.max_integrity_segments)
587 queue->pi_support = true;
588 else
589 queue->pi_support = false;
71102307
CH
590 init_completion(&queue->cm_done);
591
592 if (idx > 0)
593 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
594 else
595 queue->cmnd_capsule_len = sizeof(struct nvme_command);
596
597 queue->queue_size = queue_size;
598
599 queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
600 RDMA_PS_TCP, IB_QPT_RC);
601 if (IS_ERR(queue->cm_id)) {
602 dev_info(ctrl->ctrl.device,
603 "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
7674073b
CL
604 ret = PTR_ERR(queue->cm_id);
605 goto out_destroy_mutex;
71102307
CH
606 }
607
8f4e8dac 608 if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
0928f9b4 609 src_addr = (struct sockaddr *)&ctrl->src_addr;
8f4e8dac 610
0928f9b4
SG
611 queue->cm_error = -ETIMEDOUT;
612 ret = rdma_resolve_addr(queue->cm_id, src_addr,
613 (struct sockaddr *)&ctrl->addr,
71102307
CH
614 NVME_RDMA_CONNECT_TIMEOUT_MS);
615 if (ret) {
616 dev_info(ctrl->ctrl.device,
617 "rdma_resolve_addr failed (%d).\n", ret);
618 goto out_destroy_cm_id;
619 }
620
621 ret = nvme_rdma_wait_for_cm(queue);
622 if (ret) {
623 dev_info(ctrl->ctrl.device,
d8bfceeb 624 "rdma connection establishment failed (%d)\n", ret);
71102307
CH
625 goto out_destroy_cm_id;
626 }
627
5013e98b 628 set_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags);
71102307
CH
629
630 return 0;
631
632out_destroy_cm_id:
633 rdma_destroy_id(queue->cm_id);
eb1bd249 634 nvme_rdma_destroy_queue_ib(queue);
7674073b
CL
635out_destroy_mutex:
636 mutex_destroy(&queue->queue_lock);
71102307
CH
637 return ret;
638}
639
d94211b8
SG
640static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
641{
642 rdma_disconnect(queue->cm_id);
643 ib_drain_qp(queue->qp);
644}
645
71102307
CH
646static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
647{
7674073b
CL
648 mutex_lock(&queue->queue_lock);
649 if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
650 __nvme_rdma_stop_queue(queue);
651 mutex_unlock(&queue->queue_lock);
71102307
CH
652}
653
654static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
655{
5013e98b 656 if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
a57bd541
SG
657 return;
658
71102307 659 rdma_destroy_id(queue->cm_id);
9817d763 660 nvme_rdma_destroy_queue_ib(queue);
7674073b 661 mutex_destroy(&queue->queue_lock);
71102307
CH
662}
663
a57bd541 664static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
71102307 665{
a57bd541
SG
666 int i;
667
668 for (i = 1; i < ctrl->ctrl.queue_count; i++)
669 nvme_rdma_free_queue(&ctrl->queues[i]);
71102307
CH
670}
671
a57bd541 672static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl)
71102307
CH
673{
674 int i;
675
d858e5f0 676 for (i = 1; i < ctrl->ctrl.queue_count; i++)
a57bd541 677 nvme_rdma_stop_queue(&ctrl->queues[i]);
71102307
CH
678}
679
68e16fcf
SG
680static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
681{
ff8519f9 682 struct nvme_rdma_queue *queue = &ctrl->queues[idx];
68e16fcf
SG
683 int ret;
684
685 if (idx)
be42a33b 686 ret = nvmf_connect_io_queue(&ctrl->ctrl, idx);
68e16fcf
SG
687 else
688 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
689
d94211b8 690 if (!ret) {
ff8519f9 691 set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
d94211b8 692 } else {
67b483dd
SG
693 if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
694 __nvme_rdma_stop_queue(queue);
68e16fcf
SG
695 dev_info(ctrl->ctrl.device,
696 "failed to connect queue: %d ret=%d\n", idx, ret);
d94211b8 697 }
68e16fcf
SG
698 return ret;
699}
700
701static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl)
71102307
CH
702{
703 int i, ret = 0;
704
d858e5f0 705 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
68e16fcf
SG
706 ret = nvme_rdma_start_queue(ctrl, i);
707 if (ret)
a57bd541 708 goto out_stop_queues;
71102307
CH
709 }
710
c8dbc37c
SW
711 return 0;
712
a57bd541 713out_stop_queues:
68e16fcf
SG
714 for (i--; i >= 1; i--)
715 nvme_rdma_stop_queue(&ctrl->queues[i]);
71102307
CH
716 return ret;
717}
718
41e8cfa1 719static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
71102307 720{
c248c643 721 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
0b36658c 722 struct ib_device *ibdev = ctrl->device->dev;
5651cd3c
SG
723 unsigned int nr_io_queues, nr_default_queues;
724 unsigned int nr_read_queues, nr_poll_queues;
71102307
CH
725 int i, ret;
726
5651cd3c
SG
727 nr_read_queues = min_t(unsigned int, ibdev->num_comp_vectors,
728 min(opts->nr_io_queues, num_online_cpus()));
729 nr_default_queues = min_t(unsigned int, ibdev->num_comp_vectors,
730 min(opts->nr_write_queues, num_online_cpus()));
731 nr_poll_queues = min(opts->nr_poll_queues, num_online_cpus());
732 nr_io_queues = nr_read_queues + nr_default_queues + nr_poll_queues;
b65bb777 733
c248c643
SG
734 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
735 if (ret)
736 return ret;
737
85032874 738 if (nr_io_queues == 0) {
c4c6df5f
SG
739 dev_err(ctrl->ctrl.device,
740 "unable to set any I/O queues\n");
741 return -ENOMEM;
742 }
c248c643 743
85032874 744 ctrl->ctrl.queue_count = nr_io_queues + 1;
c248c643
SG
745 dev_info(ctrl->ctrl.device,
746 "creating %d I/O queues.\n", nr_io_queues);
747
5651cd3c
SG
748 if (opts->nr_write_queues && nr_read_queues < nr_io_queues) {
749 /*
750 * separate read/write queues
751 * hand out dedicated default queues only after we have
752 * sufficient read queues.
753 */
754 ctrl->io_queues[HCTX_TYPE_READ] = nr_read_queues;
755 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
756 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
757 min(nr_default_queues, nr_io_queues);
758 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
759 } else {
760 /*
761 * shared read/write queues
762 * either no write queues were requested, or we don't have
763 * sufficient queue count to have dedicated default queues.
764 */
765 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
766 min(nr_read_queues, nr_io_queues);
767 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
768 }
769
770 if (opts->nr_poll_queues && nr_io_queues) {
771 /* map dedicated poll queues only if we have queues left */
772 ctrl->io_queues[HCTX_TYPE_POLL] =
773 min(nr_poll_queues, nr_io_queues);
774 }
775
d858e5f0 776 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
41e8cfa1
SG
777 ret = nvme_rdma_alloc_queue(ctrl, i,
778 ctrl->ctrl.sqsize + 1);
779 if (ret)
71102307 780 goto out_free_queues;
71102307
CH
781 }
782
783 return 0;
784
785out_free_queues:
f361e5a0 786 for (i--; i >= 1; i--)
a57bd541 787 nvme_rdma_free_queue(&ctrl->queues[i]);
71102307
CH
788
789 return ret;
790}
791
b28a308e
SG
792static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
793 bool admin)
794{
795 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
796 struct blk_mq_tag_set *set;
797 int ret;
798
799 if (admin) {
800 set = &ctrl->admin_tag_set;
801 memset(set, 0, sizeof(*set));
802 set->ops = &nvme_rdma_admin_mq_ops;
38dabe21 803 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
ed01fee2 804 set->reserved_tags = NVMF_RESERVED_TAGS;
103e515e 805 set->numa_node = nctrl->numa_node;
b28a308e 806 set->cmd_size = sizeof(struct nvme_rdma_request) +
5ec5d3bd 807 NVME_RDMA_DATA_SGL_SIZE;
b28a308e
SG
808 set->driver_data = ctrl;
809 set->nr_hw_queues = 1;
dc96f938 810 set->timeout = NVME_ADMIN_TIMEOUT;
94f29d4f 811 set->flags = BLK_MQ_F_NO_SCHED;
b28a308e
SG
812 } else {
813 set = &ctrl->tag_set;
814 memset(set, 0, sizeof(*set));
815 set->ops = &nvme_rdma_mq_ops;
5e77d61c 816 set->queue_depth = nctrl->sqsize + 1;
ed01fee2 817 set->reserved_tags = NVMF_RESERVED_TAGS;
103e515e 818 set->numa_node = nctrl->numa_node;
b28a308e
SG
819 set->flags = BLK_MQ_F_SHOULD_MERGE;
820 set->cmd_size = sizeof(struct nvme_rdma_request) +
5ec5d3bd
MG
821 NVME_RDMA_DATA_SGL_SIZE;
822 if (nctrl->max_integrity_segments)
823 set->cmd_size += sizeof(struct nvme_rdma_sgl) +
824 NVME_RDMA_METADATA_SGL_SIZE;
b28a308e
SG
825 set->driver_data = ctrl;
826 set->nr_hw_queues = nctrl->queue_count - 1;
827 set->timeout = NVME_IO_TIMEOUT;
ff8519f9 828 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
b28a308e
SG
829 }
830
831 ret = blk_mq_alloc_tag_set(set);
832 if (ret)
87fd1253 833 return ERR_PTR(ret);
b28a308e
SG
834
835 return set;
b28a308e
SG
836}
837
3f02fffb
SG
838static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
839 bool remove)
71102307 840{
3f02fffb
SG
841 if (remove) {
842 blk_cleanup_queue(ctrl->ctrl.admin_q);
e7832cb4 843 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
87fd1253 844 blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
3f02fffb 845 }
682630f0 846 if (ctrl->async_event_sqe.data) {
925dd04c 847 cancel_work_sync(&ctrl->ctrl.async_event_work);
682630f0
SG
848 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
849 sizeof(struct nvme_command), DMA_TO_DEVICE);
850 ctrl->async_event_sqe.data = NULL;
851 }
a57bd541 852 nvme_rdma_free_queue(&ctrl->queues[0]);
71102307
CH
853}
854
3f02fffb
SG
855static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
856 bool new)
90af3512 857{
5ec5d3bd 858 bool pi_capable = false;
90af3512
SG
859 int error;
860
41e8cfa1 861 error = nvme_rdma_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
90af3512
SG
862 if (error)
863 return error;
864
865 ctrl->device = ctrl->queues[0].device;
22dd4c70 866 ctrl->ctrl.numa_node = ibdev_to_node(ctrl->device->dev);
90af3512 867
5ec5d3bd
MG
868 /* T10-PI support */
869 if (ctrl->device->dev->attrs.device_cap_flags &
870 IB_DEVICE_INTEGRITY_HANDOVER)
871 pi_capable = true;
872
873 ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev,
874 pi_capable);
90af3512 875
62f99b62
MG
876 /*
877 * Bind the async event SQE DMA mapping to the admin queue lifetime.
878 * It's safe, since any chage in the underlying RDMA device will issue
879 * error recovery and queue re-creation.
880 */
94e42213
SG
881 error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe,
882 sizeof(struct nvme_command), DMA_TO_DEVICE);
883 if (error)
884 goto out_free_queue;
885
3f02fffb
SG
886 if (new) {
887 ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
f04b9cc8
SG
888 if (IS_ERR(ctrl->ctrl.admin_tagset)) {
889 error = PTR_ERR(ctrl->ctrl.admin_tagset);
94e42213 890 goto out_free_async_qe;
f04b9cc8 891 }
90af3512 892
e7832cb4
SG
893 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
894 if (IS_ERR(ctrl->ctrl.fabrics_q)) {
895 error = PTR_ERR(ctrl->ctrl.fabrics_q);
896 goto out_free_tagset;
897 }
898
3f02fffb
SG
899 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
900 if (IS_ERR(ctrl->ctrl.admin_q)) {
901 error = PTR_ERR(ctrl->ctrl.admin_q);
e7832cb4 902 goto out_cleanup_fabrics_q;
3f02fffb 903 }
90af3512
SG
904 }
905
68e16fcf 906 error = nvme_rdma_start_queue(ctrl, 0);
90af3512
SG
907 if (error)
908 goto out_cleanup_queue;
909
c0f2f45b 910 error = nvme_enable_ctrl(&ctrl->ctrl);
90af3512 911 if (error)
2e050f00 912 goto out_stop_queue;
90af3512 913
ff13c1b8
MG
914 ctrl->ctrl.max_segments = ctrl->max_fr_pages;
915 ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9);
5ec5d3bd
MG
916 if (pi_capable)
917 ctrl->ctrl.max_integrity_segments = ctrl->max_fr_pages;
918 else
919 ctrl->ctrl.max_integrity_segments = 0;
90af3512 920
e7832cb4
SG
921 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
922
f21c4769 923 error = nvme_init_ctrl_finish(&ctrl->ctrl);
90af3512 924 if (error)
958dc1d3 925 goto out_quiesce_queue;
90af3512 926
90af3512
SG
927 return 0;
928
958dc1d3
CL
929out_quiesce_queue:
930 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
931 blk_sync_queue(ctrl->ctrl.admin_q);
2e050f00
JW
932out_stop_queue:
933 nvme_rdma_stop_queue(&ctrl->queues[0]);
958dc1d3 934 nvme_cancel_admin_tagset(&ctrl->ctrl);
90af3512 935out_cleanup_queue:
3f02fffb
SG
936 if (new)
937 blk_cleanup_queue(ctrl->ctrl.admin_q);
e7832cb4
SG
938out_cleanup_fabrics_q:
939 if (new)
940 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
90af3512 941out_free_tagset:
3f02fffb 942 if (new)
87fd1253 943 blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
94e42213 944out_free_async_qe:
9134ae2a
PS
945 if (ctrl->async_event_sqe.data) {
946 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
947 sizeof(struct nvme_command), DMA_TO_DEVICE);
948 ctrl->async_event_sqe.data = NULL;
949 }
90af3512
SG
950out_free_queue:
951 nvme_rdma_free_queue(&ctrl->queues[0]);
952 return error;
953}
954
a57bd541
SG
955static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
956 bool remove)
957{
a57bd541
SG
958 if (remove) {
959 blk_cleanup_queue(ctrl->ctrl.connect_q);
87fd1253 960 blk_mq_free_tag_set(ctrl->ctrl.tagset);
a57bd541
SG
961 }
962 nvme_rdma_free_io_queues(ctrl);
963}
964
965static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
966{
967 int ret;
968
41e8cfa1 969 ret = nvme_rdma_alloc_io_queues(ctrl);
a57bd541
SG
970 if (ret)
971 return ret;
972
973 if (new) {
974 ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false);
f04b9cc8
SG
975 if (IS_ERR(ctrl->ctrl.tagset)) {
976 ret = PTR_ERR(ctrl->ctrl.tagset);
a57bd541 977 goto out_free_io_queues;
f04b9cc8 978 }
a57bd541
SG
979
980 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
981 if (IS_ERR(ctrl->ctrl.connect_q)) {
982 ret = PTR_ERR(ctrl->ctrl.connect_q);
983 goto out_free_tag_set;
984 }
a57bd541
SG
985 }
986
68e16fcf 987 ret = nvme_rdma_start_io_queues(ctrl);
a57bd541
SG
988 if (ret)
989 goto out_cleanup_connect_q;
990
9f98772b
SG
991 if (!new) {
992 nvme_start_queues(&ctrl->ctrl);
2362acb6
SG
993 if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) {
994 /*
995 * If we timed out waiting for freeze we are likely to
996 * be stuck. Fail the controller initialization just
997 * to be safe.
998 */
999 ret = -ENODEV;
1000 goto out_wait_freeze_timed_out;
1001 }
9f98772b
SG
1002 blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset,
1003 ctrl->ctrl.queue_count - 1);
1004 nvme_unfreeze(&ctrl->ctrl);
1005 }
1006
a57bd541
SG
1007 return 0;
1008
2362acb6
SG
1009out_wait_freeze_timed_out:
1010 nvme_stop_queues(&ctrl->ctrl);
958dc1d3 1011 nvme_sync_io_queues(&ctrl->ctrl);
2362acb6 1012 nvme_rdma_stop_io_queues(ctrl);
a57bd541 1013out_cleanup_connect_q:
958dc1d3 1014 nvme_cancel_tagset(&ctrl->ctrl);
a57bd541
SG
1015 if (new)
1016 blk_cleanup_queue(ctrl->ctrl.connect_q);
1017out_free_tag_set:
1018 if (new)
87fd1253 1019 blk_mq_free_tag_set(ctrl->ctrl.tagset);
a57bd541
SG
1020out_free_io_queues:
1021 nvme_rdma_free_io_queues(ctrl);
1022 return ret;
71102307
CH
1023}
1024
75862c72
SG
1025static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
1026 bool remove)
1027{
1028 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
3017013d 1029 blk_sync_queue(ctrl->ctrl.admin_q);
75862c72 1030 nvme_rdma_stop_queue(&ctrl->queues[0]);
c4189d68 1031 nvme_cancel_admin_tagset(&ctrl->ctrl);
e7832cb4
SG
1032 if (remove)
1033 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
75862c72
SG
1034 nvme_rdma_destroy_admin_queue(ctrl, remove);
1035}
1036
1037static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
1038 bool remove)
1039{
1040 if (ctrl->ctrl.queue_count > 1) {
9f98772b 1041 nvme_start_freeze(&ctrl->ctrl);
75862c72 1042 nvme_stop_queues(&ctrl->ctrl);
3017013d 1043 nvme_sync_io_queues(&ctrl->ctrl);
75862c72 1044 nvme_rdma_stop_io_queues(ctrl);
c4189d68 1045 nvme_cancel_tagset(&ctrl->ctrl);
75862c72
SG
1046 if (remove)
1047 nvme_start_queues(&ctrl->ctrl);
1048 nvme_rdma_destroy_io_queues(ctrl, remove);
1049 }
1050}
1051
71102307
CH
1052static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
1053{
1054 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
1055
1056 if (list_empty(&ctrl->list))
1057 goto free_ctrl;
1058
1059 mutex_lock(&nvme_rdma_ctrl_mutex);
1060 list_del(&ctrl->list);
1061 mutex_unlock(&nvme_rdma_ctrl_mutex);
1062
71102307
CH
1063 nvmf_free_options(nctrl->opts);
1064free_ctrl:
3d064101 1065 kfree(ctrl->queues);
71102307
CH
1066 kfree(ctrl);
1067}
1068
fd8563ce
SG
1069static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
1070{
1071 /* If we are resetting/deleting then do nothing */
ad6a0a52 1072 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
fd8563ce
SG
1073 WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
1074 ctrl->ctrl.state == NVME_CTRL_LIVE);
1075 return;
1076 }
1077
1078 if (nvmf_should_reconnect(&ctrl->ctrl)) {
1079 dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n",
1080 ctrl->ctrl.opts->reconnect_delay);
9a6327d2 1081 queue_delayed_work(nvme_wq, &ctrl->reconnect_work,
fd8563ce
SG
1082 ctrl->ctrl.opts->reconnect_delay * HZ);
1083 } else {
12fa1304 1084 nvme_delete_ctrl(&ctrl->ctrl);
fd8563ce
SG
1085 }
1086}
1087
c66e2998 1088static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
71102307 1089{
13ce7e62 1090 int ret;
71102307 1091 bool changed;
71102307 1092
c66e2998 1093 ret = nvme_rdma_configure_admin_queue(ctrl, new);
71102307 1094 if (ret)
c66e2998
SG
1095 return ret;
1096
1097 if (ctrl->ctrl.icdoff) {
26e8addf 1098 ret = -EOPNOTSUPP;
c66e2998
SG
1099 dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
1100 goto destroy_admin;
1101 }
1102
1103 if (!(ctrl->ctrl.sgls & (1 << 2))) {
26e8addf 1104 ret = -EOPNOTSUPP;
c66e2998
SG
1105 dev_err(ctrl->ctrl.device,
1106 "Mandatory keyed sgls are not supported!\n");
1107 goto destroy_admin;
1108 }
1109
1110 if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) {
1111 dev_warn(ctrl->ctrl.device,
1112 "queue_size %zu > ctrl sqsize %u, clamping down\n",
1113 ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
1114 }
1115
1116 if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
1117 dev_warn(ctrl->ctrl.device,
1118 "sqsize %u > ctrl maxcmd %u, clamping down\n",
1119 ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd);
1120 ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1;
1121 }
71102307 1122
64a741c1
SW
1123 if (ctrl->ctrl.sgls & (1 << 20))
1124 ctrl->use_inline_data = true;
71102307 1125
d858e5f0 1126 if (ctrl->ctrl.queue_count > 1) {
c66e2998 1127 ret = nvme_rdma_configure_io_queues(ctrl, new);
71102307 1128 if (ret)
5e1fe61d 1129 goto destroy_admin;
71102307
CH
1130 }
1131
1132 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
0a960afd 1133 if (!changed) {
96135862 1134 /*
ecca390e 1135 * state change failure is ok if we started ctrl delete,
96135862
IR
1136 * unless we're during creation of a new controller to
1137 * avoid races with teardown flow.
1138 */
ecca390e
SG
1139 WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
1140 ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
96135862 1141 WARN_ON_ONCE(new);
c66e2998
SG
1142 ret = -EINVAL;
1143 goto destroy_io;
0a960afd
SG
1144 }
1145
d09f2b45 1146 nvme_start_ctrl(&ctrl->ctrl);
c66e2998
SG
1147 return 0;
1148
1149destroy_io:
958dc1d3
CL
1150 if (ctrl->ctrl.queue_count > 1) {
1151 nvme_stop_queues(&ctrl->ctrl);
1152 nvme_sync_io_queues(&ctrl->ctrl);
1153 nvme_rdma_stop_io_queues(ctrl);
1154 nvme_cancel_tagset(&ctrl->ctrl);
c66e2998 1155 nvme_rdma_destroy_io_queues(ctrl, new);
958dc1d3 1156 }
c66e2998 1157destroy_admin:
958dc1d3
CL
1158 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
1159 blk_sync_queue(ctrl->ctrl.admin_q);
c66e2998 1160 nvme_rdma_stop_queue(&ctrl->queues[0]);
958dc1d3 1161 nvme_cancel_admin_tagset(&ctrl->ctrl);
c66e2998
SG
1162 nvme_rdma_destroy_admin_queue(ctrl, new);
1163 return ret;
1164}
1165
1166static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
1167{
1168 struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
1169 struct nvme_rdma_ctrl, reconnect_work);
1170
1171 ++ctrl->ctrl.nr_reconnects;
1172
1173 if (nvme_rdma_setup_ctrl(ctrl, false))
1174 goto requeue;
71102307 1175
5e1fe61d
SG
1176 dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n",
1177 ctrl->ctrl.nr_reconnects);
1178
1179 ctrl->ctrl.nr_reconnects = 0;
71102307
CH
1180
1181 return;
1182
71102307 1183requeue:
fd8563ce 1184 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
fdf9dfa8 1185 ctrl->ctrl.nr_reconnects);
fd8563ce 1186 nvme_rdma_reconnect_or_remove(ctrl);
71102307
CH
1187}
1188
1189static void nvme_rdma_error_recovery_work(struct work_struct *work)
1190{
1191 struct nvme_rdma_ctrl *ctrl = container_of(work,
1192 struct nvme_rdma_ctrl, err_work);
1193
e4d753d7 1194 nvme_stop_keep_alive(&ctrl->ctrl);
60c6dfe5 1195 flush_work(&ctrl->ctrl.async_event_work);
75862c72 1196 nvme_rdma_teardown_io_queues(ctrl, false);
e818a5b4 1197 nvme_start_queues(&ctrl->ctrl);
75862c72 1198 nvme_rdma_teardown_admin_queue(ctrl, false);
e7832cb4 1199 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
e818a5b4 1200
ad6a0a52 1201 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
ecca390e
SG
1202 /* state change failure is ok if we started ctrl delete */
1203 WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
1204 ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
d5bf4b7f
SG
1205 return;
1206 }
1207
fd8563ce 1208 nvme_rdma_reconnect_or_remove(ctrl);
71102307
CH
1209}
1210
1211static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
1212{
d5bf4b7f 1213 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
71102307
CH
1214 return;
1215
0475a8dc 1216 dev_warn(ctrl->ctrl.device, "starting error recovery\n");
97b2512a 1217 queue_work(nvme_reset_wq, &ctrl->err_work);
71102307
CH
1218}
1219
8446546c
CH
1220static void nvme_rdma_end_request(struct nvme_rdma_request *req)
1221{
1222 struct request *rq = blk_mq_rq_from_pdu(req);
1223
1224 if (!refcount_dec_and_test(&req->ref))
1225 return;
2eb81a33 1226 if (!nvme_try_complete_req(rq, req->status, req->result))
ff029451 1227 nvme_rdma_complete_rq(rq);
8446546c
CH
1228}
1229
71102307
CH
1230static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
1231 const char *op)
1232{
287f329e 1233 struct nvme_rdma_queue *queue = wc->qp->qp_context;
71102307
CH
1234 struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1235
1236 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
1237 dev_info(ctrl->ctrl.device,
1238 "%s for CQE 0x%p failed with status %s (%d)\n",
1239 op, wc->wr_cqe,
1240 ib_wc_status_msg(wc->status), wc->status);
1241 nvme_rdma_error_recovery(ctrl);
1242}
1243
1244static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc)
1245{
1246 if (unlikely(wc->status != IB_WC_SUCCESS))
1247 nvme_rdma_wr_error(cq, wc, "MEMREG");
1248}
1249
1250static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
1251{
2f122e4f
SG
1252 struct nvme_rdma_request *req =
1253 container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe);
2f122e4f 1254
8446546c 1255 if (unlikely(wc->status != IB_WC_SUCCESS))
71102307 1256 nvme_rdma_wr_error(cq, wc, "LOCAL_INV");
8446546c
CH
1257 else
1258 nvme_rdma_end_request(req);
71102307
CH
1259}
1260
1261static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
1262 struct nvme_rdma_request *req)
1263{
71102307
CH
1264 struct ib_send_wr wr = {
1265 .opcode = IB_WR_LOCAL_INV,
1266 .next = NULL,
1267 .num_sge = 0,
2f122e4f 1268 .send_flags = IB_SEND_SIGNALED,
71102307
CH
1269 .ex.invalidate_rkey = req->mr->rkey,
1270 };
1271
1272 req->reg_cqe.done = nvme_rdma_inv_rkey_done;
1273 wr.wr_cqe = &req->reg_cqe;
1274
45e3cc1a 1275 return ib_post_send(queue->qp, &wr, NULL);
71102307
CH
1276}
1277
1278static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
1279 struct request *rq)
1280{
1281 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
71102307
CH
1282 struct nvme_rdma_device *dev = queue->device;
1283 struct ib_device *ibdev = dev->dev;
5ec5d3bd 1284 struct list_head *pool = &queue->qp->rdma_mrs;
71102307 1285
34e08191 1286 if (!blk_rq_nr_phys_segments(rq))
71102307
CH
1287 return;
1288
5ec5d3bd
MG
1289 if (blk_integrity_rq(rq)) {
1290 ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
1291 req->metadata_sgl->nents, rq_dma_dir(rq));
1292 sg_free_table_chained(&req->metadata_sgl->sg_table,
1293 NVME_INLINE_METADATA_SG_CNT);
1294 }
1295
1296 if (req->use_sig_mr)
1297 pool = &queue->qp->sig_mrs;
1298
f41725bb 1299 if (req->mr) {
5ec5d3bd 1300 ib_mr_pool_put(queue->qp, pool, req->mr);
f41725bb
IR
1301 req->mr = NULL;
1302 }
1303
324d9e78
IR
1304 ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
1305 rq_dma_dir(rq));
1306 sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
71102307
CH
1307}
1308
1309static int nvme_rdma_set_sg_null(struct nvme_command *c)
1310{
1311 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
1312
1313 sg->addr = 0;
1314 put_unaligned_le24(0, sg->length);
1315 put_unaligned_le32(0, sg->key);
1316 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
1317 return 0;
1318}
1319
1320static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
64a741c1
SW
1321 struct nvme_rdma_request *req, struct nvme_command *c,
1322 int count)
71102307
CH
1323{
1324 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
64a741c1 1325 struct ib_sge *sge = &req->sge[1];
12b2aaad 1326 struct scatterlist *sgl;
64a741c1
SW
1327 u32 len = 0;
1328 int i;
71102307 1329
12b2aaad 1330 for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) {
64a741c1
SW
1331 sge->addr = sg_dma_address(sgl);
1332 sge->length = sg_dma_len(sgl);
1333 sge->lkey = queue->device->pd->local_dma_lkey;
1334 len += sge->length;
12b2aaad 1335 sge++;
64a741c1 1336 }
71102307
CH
1337
1338 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
64a741c1 1339 sg->length = cpu_to_le32(len);
71102307
CH
1340 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
1341
64a741c1 1342 req->num_sge += count;
71102307
CH
1343 return 0;
1344}
1345
1346static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue,
1347 struct nvme_rdma_request *req, struct nvme_command *c)
1348{
1349 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
1350
324d9e78
IR
1351 sg->addr = cpu_to_le64(sg_dma_address(req->data_sgl.sg_table.sgl));
1352 put_unaligned_le24(sg_dma_len(req->data_sgl.sg_table.sgl), sg->length);
11975e01 1353 put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key);
71102307
CH
1354 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
1355 return 0;
1356}
1357
1358static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
1359 struct nvme_rdma_request *req, struct nvme_command *c,
1360 int count)
1361{
1362 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
1363 int nr;
1364
f41725bb
IR
1365 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs);
1366 if (WARN_ON_ONCE(!req->mr))
1367 return -EAGAIN;
1368
b925a2dc
MG
1369 /*
1370 * Align the MR to a 4K page size to match the ctrl page size and
1371 * the block virtual boundary.
1372 */
324d9e78
IR
1373 nr = ib_map_mr_sg(req->mr, req->data_sgl.sg_table.sgl, count, NULL,
1374 SZ_4K);
a7b7c7a1 1375 if (unlikely(nr < count)) {
f41725bb
IR
1376 ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
1377 req->mr = NULL;
71102307
CH
1378 if (nr < 0)
1379 return nr;
1380 return -EINVAL;
1381 }
1382
1383 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
1384
1385 req->reg_cqe.done = nvme_rdma_memreg_done;
1386 memset(&req->reg_wr, 0, sizeof(req->reg_wr));
1387 req->reg_wr.wr.opcode = IB_WR_REG_MR;
1388 req->reg_wr.wr.wr_cqe = &req->reg_cqe;
1389 req->reg_wr.wr.num_sge = 0;
1390 req->reg_wr.mr = req->mr;
1391 req->reg_wr.key = req->mr->rkey;
1392 req->reg_wr.access = IB_ACCESS_LOCAL_WRITE |
1393 IB_ACCESS_REMOTE_READ |
1394 IB_ACCESS_REMOTE_WRITE;
1395
71102307
CH
1396 sg->addr = cpu_to_le64(req->mr->iova);
1397 put_unaligned_le24(req->mr->length, sg->length);
1398 put_unaligned_le32(req->mr->rkey, sg->key);
1399 sg->type = (NVME_KEY_SGL_FMT_DATA_DESC << 4) |
1400 NVME_SGL_FMT_INVALIDATE;
1401
1402 return 0;
1403}
1404
5ec5d3bd
MG
1405static void nvme_rdma_set_sig_domain(struct blk_integrity *bi,
1406 struct nvme_command *cmd, struct ib_sig_domain *domain,
1407 u16 control, u8 pi_type)
1408{
1409 domain->sig_type = IB_SIG_TYPE_T10_DIF;
1410 domain->sig.dif.bg_type = IB_T10DIF_CRC;
1411 domain->sig.dif.pi_interval = 1 << bi->interval_exp;
1412 domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag);
1413 if (control & NVME_RW_PRINFO_PRCHK_REF)
1414 domain->sig.dif.ref_remap = true;
1415
1416 domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag);
1417 domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask);
1418 domain->sig.dif.app_escape = true;
1419 if (pi_type == NVME_NS_DPS_PI_TYPE3)
1420 domain->sig.dif.ref_escape = true;
1421}
1422
1423static void nvme_rdma_set_sig_attrs(struct blk_integrity *bi,
1424 struct nvme_command *cmd, struct ib_sig_attrs *sig_attrs,
1425 u8 pi_type)
1426{
1427 u16 control = le16_to_cpu(cmd->rw.control);
1428
1429 memset(sig_attrs, 0, sizeof(*sig_attrs));
1430 if (control & NVME_RW_PRINFO_PRACT) {
1431 /* for WRITE_INSERT/READ_STRIP no memory domain */
1432 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
1433 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
1434 pi_type);
1435 /* Clear the PRACT bit since HCA will generate/verify the PI */
1436 control &= ~NVME_RW_PRINFO_PRACT;
1437 cmd->rw.control = cpu_to_le16(control);
1438 } else {
1439 /* for WRITE_PASS/READ_PASS both wire/memory domains exist */
1440 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
1441 pi_type);
1442 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
1443 pi_type);
1444 }
1445}
1446
1447static void nvme_rdma_set_prot_checks(struct nvme_command *cmd, u8 *mask)
1448{
1449 *mask = 0;
1450 if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_REF)
1451 *mask |= IB_SIG_CHECK_REFTAG;
1452 if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_GUARD)
1453 *mask |= IB_SIG_CHECK_GUARD;
1454}
1455
1456static void nvme_rdma_sig_done(struct ib_cq *cq, struct ib_wc *wc)
1457{
1458 if (unlikely(wc->status != IB_WC_SUCCESS))
1459 nvme_rdma_wr_error(cq, wc, "SIG");
1460}
1461
1462static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
1463 struct nvme_rdma_request *req, struct nvme_command *c,
1464 int count, int pi_count)
1465{
1466 struct nvme_rdma_sgl *sgl = &req->data_sgl;
1467 struct ib_reg_wr *wr = &req->reg_wr;
1468 struct request *rq = blk_mq_rq_from_pdu(req);
1469 struct nvme_ns *ns = rq->q->queuedata;
1470 struct bio *bio = rq->bio;
1471 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
1472 int nr;
1473
1474 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs);
1475 if (WARN_ON_ONCE(!req->mr))
1476 return -EAGAIN;
1477
1478 nr = ib_map_mr_sg_pi(req->mr, sgl->sg_table.sgl, count, NULL,
1479 req->metadata_sgl->sg_table.sgl, pi_count, NULL,
1480 SZ_4K);
1481 if (unlikely(nr))
1482 goto mr_put;
1483
309dca30 1484 nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_bdev->bd_disk), c,
5ec5d3bd
MG
1485 req->mr->sig_attrs, ns->pi_type);
1486 nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask);
1487
1488 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
1489
1490 req->reg_cqe.done = nvme_rdma_sig_done;
1491 memset(wr, 0, sizeof(*wr));
1492 wr->wr.opcode = IB_WR_REG_MR_INTEGRITY;
1493 wr->wr.wr_cqe = &req->reg_cqe;
1494 wr->wr.num_sge = 0;
1495 wr->wr.send_flags = 0;
1496 wr->mr = req->mr;
1497 wr->key = req->mr->rkey;
1498 wr->access = IB_ACCESS_LOCAL_WRITE |
1499 IB_ACCESS_REMOTE_READ |
1500 IB_ACCESS_REMOTE_WRITE;
1501
1502 sg->addr = cpu_to_le64(req->mr->iova);
1503 put_unaligned_le24(req->mr->length, sg->length);
1504 put_unaligned_le32(req->mr->rkey, sg->key);
1505 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
1506
1507 return 0;
1508
1509mr_put:
1510 ib_mr_pool_put(queue->qp, &queue->qp->sig_mrs, req->mr);
1511 req->mr = NULL;
1512 if (nr < 0)
1513 return nr;
1514 return -EINVAL;
1515}
1516
71102307 1517static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
b131c61d 1518 struct request *rq, struct nvme_command *c)
71102307
CH
1519{
1520 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1521 struct nvme_rdma_device *dev = queue->device;
1522 struct ib_device *ibdev = dev->dev;
5ec5d3bd 1523 int pi_count = 0;
f9d03f96 1524 int count, ret;
71102307
CH
1525
1526 req->num_sge = 1;
4af7f7ff 1527 refcount_set(&req->ref, 2); /* send and recv completions */
71102307
CH
1528
1529 c->common.flags |= NVME_CMD_SGL_METABUF;
1530
34e08191 1531 if (!blk_rq_nr_phys_segments(rq))
71102307
CH
1532 return nvme_rdma_set_sg_null(c);
1533
324d9e78
IR
1534 req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1);
1535 ret = sg_alloc_table_chained(&req->data_sgl.sg_table,
1536 blk_rq_nr_phys_segments(rq), req->data_sgl.sg_table.sgl,
38e18002 1537 NVME_INLINE_SG_CNT);
71102307
CH
1538 if (ret)
1539 return -ENOMEM;
1540
324d9e78
IR
1541 req->data_sgl.nents = blk_rq_map_sg(rq->q, rq,
1542 req->data_sgl.sg_table.sgl);
71102307 1543
324d9e78
IR
1544 count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
1545 req->data_sgl.nents, rq_dma_dir(rq));
71102307 1546 if (unlikely(count <= 0)) {
94423a8f
MG
1547 ret = -EIO;
1548 goto out_free_table;
71102307
CH
1549 }
1550
5ec5d3bd
MG
1551 if (blk_integrity_rq(rq)) {
1552 req->metadata_sgl->sg_table.sgl =
1553 (struct scatterlist *)(req->metadata_sgl + 1);
1554 ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table,
1555 blk_rq_count_integrity_sg(rq->q, rq->bio),
1556 req->metadata_sgl->sg_table.sgl,
1557 NVME_INLINE_METADATA_SG_CNT);
1558 if (unlikely(ret)) {
1559 ret = -ENOMEM;
1560 goto out_unmap_sg;
1561 }
1562
1563 req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q,
1564 rq->bio, req->metadata_sgl->sg_table.sgl);
1565 pi_count = ib_dma_map_sg(ibdev,
1566 req->metadata_sgl->sg_table.sgl,
1567 req->metadata_sgl->nents,
1568 rq_dma_dir(rq));
1569 if (unlikely(pi_count <= 0)) {
1570 ret = -EIO;
1571 goto out_free_pi_table;
1572 }
1573 }
1574
1575 if (req->use_sig_mr) {
1576 ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count);
1577 goto out;
1578 }
1579
64a741c1 1580 if (count <= dev->num_inline_segments) {
b131c61d 1581 if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
64a741c1 1582 queue->ctrl->use_inline_data &&
b131c61d 1583 blk_rq_payload_bytes(rq) <=
94423a8f 1584 nvme_rdma_inline_data_size(queue)) {
64a741c1 1585 ret = nvme_rdma_map_sg_inline(queue, req, c, count);
94423a8f
MG
1586 goto out;
1587 }
71102307 1588
64a741c1 1589 if (count == 1 && dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
94423a8f
MG
1590 ret = nvme_rdma_map_sg_single(queue, req, c);
1591 goto out;
1592 }
71102307
CH
1593 }
1594
94423a8f
MG
1595 ret = nvme_rdma_map_sg_fr(queue, req, c, count);
1596out:
1597 if (unlikely(ret))
5ec5d3bd 1598 goto out_unmap_pi_sg;
94423a8f
MG
1599
1600 return 0;
1601
5ec5d3bd
MG
1602out_unmap_pi_sg:
1603 if (blk_integrity_rq(rq))
1604 ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
1605 req->metadata_sgl->nents, rq_dma_dir(rq));
1606out_free_pi_table:
1607 if (blk_integrity_rq(rq))
1608 sg_free_table_chained(&req->metadata_sgl->sg_table,
1609 NVME_INLINE_METADATA_SG_CNT);
94423a8f 1610out_unmap_sg:
324d9e78
IR
1611 ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
1612 rq_dma_dir(rq));
94423a8f 1613out_free_table:
324d9e78 1614 sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
94423a8f 1615 return ret;
71102307
CH
1616}
1617
1618static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
1619{
4af7f7ff
SG
1620 struct nvme_rdma_qe *qe =
1621 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
1622 struct nvme_rdma_request *req =
1623 container_of(qe, struct nvme_rdma_request, sqe);
4af7f7ff 1624
8446546c 1625 if (unlikely(wc->status != IB_WC_SUCCESS))
71102307 1626 nvme_rdma_wr_error(cq, wc, "SEND");
8446546c
CH
1627 else
1628 nvme_rdma_end_request(req);
71102307
CH
1629}
1630
1631static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
1632 struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
b4b591c8 1633 struct ib_send_wr *first)
71102307 1634{
45e3cc1a 1635 struct ib_send_wr wr;
71102307
CH
1636 int ret;
1637
1638 sge->addr = qe->dma;
a62315b8 1639 sge->length = sizeof(struct nvme_command);
71102307
CH
1640 sge->lkey = queue->device->pd->local_dma_lkey;
1641
71102307
CH
1642 wr.next = NULL;
1643 wr.wr_cqe = &qe->cqe;
1644 wr.sg_list = sge;
1645 wr.num_sge = num_sge;
1646 wr.opcode = IB_WR_SEND;
b4b591c8 1647 wr.send_flags = IB_SEND_SIGNALED;
71102307
CH
1648
1649 if (first)
1650 first->next = &wr;
1651 else
1652 first = &wr;
1653
45e3cc1a 1654 ret = ib_post_send(queue->qp, first, NULL);
a7b7c7a1 1655 if (unlikely(ret)) {
71102307
CH
1656 dev_err(queue->ctrl->ctrl.device,
1657 "%s failed with error code %d\n", __func__, ret);
1658 }
1659 return ret;
1660}
1661
1662static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue,
1663 struct nvme_rdma_qe *qe)
1664{
45e3cc1a 1665 struct ib_recv_wr wr;
71102307
CH
1666 struct ib_sge list;
1667 int ret;
1668
1669 list.addr = qe->dma;
1670 list.length = sizeof(struct nvme_completion);
1671 list.lkey = queue->device->pd->local_dma_lkey;
1672
1673 qe->cqe.done = nvme_rdma_recv_done;
1674
1675 wr.next = NULL;
1676 wr.wr_cqe = &qe->cqe;
1677 wr.sg_list = &list;
1678 wr.num_sge = 1;
1679
45e3cc1a 1680 ret = ib_post_recv(queue->qp, &wr, NULL);
a7b7c7a1 1681 if (unlikely(ret)) {
71102307
CH
1682 dev_err(queue->ctrl->ctrl.device,
1683 "%s failed with error code %d\n", __func__, ret);
1684 }
1685 return ret;
1686}
1687
1688static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
1689{
1690 u32 queue_idx = nvme_rdma_queue_idx(queue);
1691
1692 if (queue_idx == 0)
1693 return queue->ctrl->admin_tag_set.tags[queue_idx];
1694 return queue->ctrl->tag_set.tags[queue_idx - 1];
1695}
1696
b4b591c8
SG
1697static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc)
1698{
1699 if (unlikely(wc->status != IB_WC_SUCCESS))
1700 nvme_rdma_wr_error(cq, wc, "ASYNC");
1701}
1702
ad22c355 1703static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
71102307
CH
1704{
1705 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
1706 struct nvme_rdma_queue *queue = &ctrl->queues[0];
1707 struct ib_device *dev = queue->device->dev;
1708 struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe;
1709 struct nvme_command *cmd = sqe->data;
1710 struct ib_sge sge;
1711 int ret;
1712
71102307
CH
1713 ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE);
1714
1715 memset(cmd, 0, sizeof(*cmd));
1716 cmd->common.opcode = nvme_admin_async_event;
38dabe21 1717 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
71102307
CH
1718 cmd->common.flags |= NVME_CMD_SGL_METABUF;
1719 nvme_rdma_set_sg_null(cmd);
1720
b4b591c8
SG
1721 sqe->cqe.done = nvme_rdma_async_done;
1722
71102307
CH
1723 ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd),
1724 DMA_TO_DEVICE);
1725
b4b591c8 1726 ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL);
71102307
CH
1727 WARN_ON_ONCE(ret);
1728}
1729
1052b8ac
JA
1730static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
1731 struct nvme_completion *cqe, struct ib_wc *wc)
71102307 1732{
71102307
CH
1733 struct request *rq;
1734 struct nvme_rdma_request *req;
71102307 1735
e7006de6 1736 rq = nvme_find_rq(nvme_rdma_tagset(queue), cqe->command_id);
71102307
CH
1737 if (!rq) {
1738 dev_err(queue->ctrl->ctrl.device,
e7006de6 1739 "got bad command_id %#x on QP %#x\n",
71102307
CH
1740 cqe->command_id, queue->qp->qp_num);
1741 nvme_rdma_error_recovery(queue->ctrl);
1052b8ac 1742 return;
71102307
CH
1743 }
1744 req = blk_mq_rq_to_pdu(rq);
1745
4af7f7ff
SG
1746 req->status = cqe->status;
1747 req->result = cqe->result;
71102307 1748
3ef0279b 1749 if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
a87da50f
CL
1750 if (unlikely(!req->mr ||
1751 wc->ex.invalidate_rkey != req->mr->rkey)) {
3ef0279b
SG
1752 dev_err(queue->ctrl->ctrl.device,
1753 "Bogus remote invalidation for rkey %#x\n",
a87da50f 1754 req->mr ? req->mr->rkey : 0);
3ef0279b
SG
1755 nvme_rdma_error_recovery(queue->ctrl);
1756 }
f41725bb 1757 } else if (req->mr) {
1052b8ac
JA
1758 int ret;
1759
2f122e4f
SG
1760 ret = nvme_rdma_inv_rkey(queue, req);
1761 if (unlikely(ret < 0)) {
1762 dev_err(queue->ctrl->ctrl.device,
1763 "Queueing INV WR for rkey %#x failed (%d)\n",
1764 req->mr->rkey, ret);
1765 nvme_rdma_error_recovery(queue->ctrl);
1766 }
1767 /* the local invalidation completion will end the request */
7a804c34 1768 return;
2f122e4f 1769 }
7a804c34
CH
1770
1771 nvme_rdma_end_request(req);
71102307
CH
1772}
1773
1052b8ac 1774static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
71102307
CH
1775{
1776 struct nvme_rdma_qe *qe =
1777 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
287f329e 1778 struct nvme_rdma_queue *queue = wc->qp->qp_context;
71102307
CH
1779 struct ib_device *ibdev = queue->device->dev;
1780 struct nvme_completion *cqe = qe->data;
1781 const size_t len = sizeof(struct nvme_completion);
71102307
CH
1782
1783 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1784 nvme_rdma_wr_error(cq, wc, "RECV");
1052b8ac 1785 return;
71102307
CH
1786 }
1787
25c1ca6e 1788 /* sanity checking for received data length */
1789 if (unlikely(wc->byte_len < len)) {
1790 dev_err(queue->ctrl->ctrl.device,
1791 "Unexpected nvme completion length(%d)\n", wc->byte_len);
1792 nvme_rdma_error_recovery(queue->ctrl);
1793 return;
1794 }
1795
71102307
CH
1796 ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE);
1797 /*
1798 * AEN requests are special as they don't time out and can
1799 * survive any kind of queue freeze and often don't respond to
1800 * aborts. We don't even bother to allocate a struct request
1801 * for them but rather special case them here.
1802 */
58a8df67
IR
1803 if (unlikely(nvme_is_aen_req(nvme_rdma_queue_idx(queue),
1804 cqe->command_id)))
7bf58533
CH
1805 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
1806 &cqe->result);
71102307 1807 else
1052b8ac 1808 nvme_rdma_process_nvme_rsp(queue, cqe, wc);
71102307
CH
1809 ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE);
1810
1811 nvme_rdma_post_recv(queue, qe);
71102307
CH
1812}
1813
1814static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
1815{
1816 int ret, i;
1817
1818 for (i = 0; i < queue->queue_size; i++) {
1819 ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
1820 if (ret)
9817d763 1821 return ret;
71102307
CH
1822 }
1823
1824 return 0;
71102307
CH
1825}
1826
1827static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
1828 struct rdma_cm_event *ev)
1829{
7f03953c
SW
1830 struct rdma_cm_id *cm_id = queue->cm_id;
1831 int status = ev->status;
1832 const char *rej_msg;
1833 const struct nvme_rdma_cm_rej *rej_data;
1834 u8 rej_data_len;
1835
1836 rej_msg = rdma_reject_msg(cm_id, status);
1837 rej_data = rdma_consumer_reject_data(cm_id, ev, &rej_data_len);
1838
1839 if (rej_data && rej_data_len >= sizeof(u16)) {
1840 u16 sts = le16_to_cpu(rej_data->sts);
71102307
CH
1841
1842 dev_err(queue->ctrl->ctrl.device,
7f03953c
SW
1843 "Connect rejected: status %d (%s) nvme status %d (%s).\n",
1844 status, rej_msg, sts, nvme_rdma_cm_msg(sts));
71102307
CH
1845 } else {
1846 dev_err(queue->ctrl->ctrl.device,
7f03953c 1847 "Connect rejected: status %d (%s).\n", status, rej_msg);
71102307
CH
1848 }
1849
1850 return -ECONNRESET;
1851}
1852
1853static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
1854{
e63440d6 1855 struct nvme_ctrl *ctrl = &queue->ctrl->ctrl;
71102307
CH
1856 int ret;
1857
ca6e95bb
SG
1858 ret = nvme_rdma_create_queue_ib(queue);
1859 if (ret)
1860 return ret;
71102307 1861
e63440d6
IR
1862 if (ctrl->opts->tos >= 0)
1863 rdma_set_service_type(queue->cm_id, ctrl->opts->tos);
71102307
CH
1864 ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS);
1865 if (ret) {
e63440d6 1866 dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n",
71102307
CH
1867 queue->cm_error);
1868 goto out_destroy_queue;
1869 }
1870
1871 return 0;
1872
1873out_destroy_queue:
1874 nvme_rdma_destroy_queue_ib(queue);
71102307
CH
1875 return ret;
1876}
1877
1878static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
1879{
1880 struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1881 struct rdma_conn_param param = { };
0b857b44 1882 struct nvme_rdma_cm_req priv = { };
71102307
CH
1883 int ret;
1884
1885 param.qp_num = queue->qp->qp_num;
1886 param.flow_control = 1;
1887
1888 param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom;
2ac17c28
SG
1889 /* maximum retry count */
1890 param.retry_count = 7;
71102307
CH
1891 param.rnr_retry_count = 7;
1892 param.private_data = &priv;
1893 param.private_data_len = sizeof(priv);
1894
1895 priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1896 priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
f994d9dc
JF
1897 /*
1898 * set the admin queue depth to the minimum size
1899 * specified by the Fabrics standard.
1900 */
1901 if (priv.qid == 0) {
7aa1f427
SG
1902 priv.hrqsize = cpu_to_le16(NVME_AQ_DEPTH);
1903 priv.hsqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
f994d9dc 1904 } else {
c5af8654
JF
1905 /*
1906 * current interpretation of the fabrics spec
1907 * is at minimum you make hrqsize sqsize+1, or a
1908 * 1's based representation of sqsize.
1909 */
f994d9dc 1910 priv.hrqsize = cpu_to_le16(queue->queue_size);
c5af8654 1911 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
f994d9dc 1912 }
71102307 1913
071ba4cc 1914 ret = rdma_connect_locked(queue->cm_id, &param);
71102307
CH
1915 if (ret) {
1916 dev_err(ctrl->ctrl.device,
071ba4cc 1917 "rdma_connect_locked failed (%d).\n", ret);
9817d763 1918 return ret;
71102307
CH
1919 }
1920
1921 return 0;
71102307
CH
1922}
1923
71102307
CH
1924static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
1925 struct rdma_cm_event *ev)
1926{
1927 struct nvme_rdma_queue *queue = cm_id->context;
1928 int cm_error = 0;
1929
1930 dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n",
1931 rdma_event_msg(ev->event), ev->event,
1932 ev->status, cm_id);
1933
1934 switch (ev->event) {
1935 case RDMA_CM_EVENT_ADDR_RESOLVED:
1936 cm_error = nvme_rdma_addr_resolved(queue);
1937 break;
1938 case RDMA_CM_EVENT_ROUTE_RESOLVED:
1939 cm_error = nvme_rdma_route_resolved(queue);
1940 break;
1941 case RDMA_CM_EVENT_ESTABLISHED:
1942 queue->cm_error = nvme_rdma_conn_established(queue);
1943 /* complete cm_done regardless of success/failure */
1944 complete(&queue->cm_done);
1945 return 0;
1946 case RDMA_CM_EVENT_REJECTED:
1947 cm_error = nvme_rdma_conn_rejected(queue, ev);
1948 break;
71102307
CH
1949 case RDMA_CM_EVENT_ROUTE_ERROR:
1950 case RDMA_CM_EVENT_CONNECT_ERROR:
1951 case RDMA_CM_EVENT_UNREACHABLE:
abf87d5e 1952 case RDMA_CM_EVENT_ADDR_ERROR:
71102307
CH
1953 dev_dbg(queue->ctrl->ctrl.device,
1954 "CM error event %d\n", ev->event);
1955 cm_error = -ECONNRESET;
1956 break;
1957 case RDMA_CM_EVENT_DISCONNECTED:
1958 case RDMA_CM_EVENT_ADDR_CHANGE:
1959 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1960 dev_dbg(queue->ctrl->ctrl.device,
1961 "disconnect received - connection closed\n");
1962 nvme_rdma_error_recovery(queue->ctrl);
1963 break;
1964 case RDMA_CM_EVENT_DEVICE_REMOVAL:
e87a911f
SW
1965 /* device removal is handled via the ib_client API */
1966 break;
71102307
CH
1967 default:
1968 dev_err(queue->ctrl->ctrl.device,
1969 "Unexpected RDMA CM event (%d)\n", ev->event);
1970 nvme_rdma_error_recovery(queue->ctrl);
1971 break;
1972 }
1973
1974 if (cm_error) {
1975 queue->cm_error = cm_error;
1976 complete(&queue->cm_done);
1977 }
1978
1979 return 0;
1980}
1981
0475a8dc
SG
1982static void nvme_rdma_complete_timed_out(struct request *rq)
1983{
1984 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1985 struct nvme_rdma_queue *queue = req->queue;
0475a8dc 1986
0475a8dc 1987 nvme_rdma_stop_queue(queue);
fdf58e02 1988 if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
0475a8dc
SG
1989 nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
1990 blk_mq_complete_request(rq);
1991 }
0475a8dc
SG
1992}
1993
71102307
CH
1994static enum blk_eh_timer_return
1995nvme_rdma_timeout(struct request *rq, bool reserved)
1996{
1997 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
4c174e63
SG
1998 struct nvme_rdma_queue *queue = req->queue;
1999 struct nvme_rdma_ctrl *ctrl = queue->ctrl;
71102307 2000
4c174e63
SG
2001 dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
2002 rq->tag, nvme_rdma_queue_idx(queue));
e62a538d 2003
4c174e63
SG
2004 if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
2005 /*
0475a8dc
SG
2006 * If we are resetting, connecting or deleting we should
2007 * complete immediately because we may block controller
2008 * teardown or setup sequence
2009 * - ctrl disable/shutdown fabrics requests
2010 * - connect requests
2011 * - initialization admin requests
2012 * - I/O requests that entered after unquiescing and
2013 * the controller stopped responding
2014 *
2015 * All other requests should be cancelled by the error
2016 * recovery work, so it's fine that we fail it here.
4c174e63 2017 */
0475a8dc 2018 nvme_rdma_complete_timed_out(rq);
4c174e63
SG
2019 return BLK_EH_DONE;
2020 }
71102307 2021
0475a8dc
SG
2022 /*
2023 * LIVE state should trigger the normal error recovery which will
2024 * handle completing this request.
2025 */
4c174e63 2026 nvme_rdma_error_recovery(ctrl);
4c174e63 2027 return BLK_EH_RESET_TIMER;
71102307
CH
2028}
2029
fc17b653 2030static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
71102307
CH
2031 const struct blk_mq_queue_data *bd)
2032{
2033 struct nvme_ns *ns = hctx->queue->queuedata;
2034 struct nvme_rdma_queue *queue = hctx->driver_data;
2035 struct request *rq = bd->rq;
2036 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
2037 struct nvme_rdma_qe *sqe = &req->sqe;
f4b9e6c9 2038 struct nvme_command *c = nvme_req(rq)->cmd;
71102307 2039 struct ib_device *dev;
3bc32bb1 2040 bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags);
fc17b653
CH
2041 blk_status_t ret;
2042 int err;
71102307
CH
2043
2044 WARN_ON_ONCE(rq->tag < 0);
2045
a9715744
TC
2046 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2047 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
553cd9ef 2048
71102307 2049 dev = queue->device->dev;
62f99b62
MG
2050
2051 req->sqe.dma = ib_dma_map_single(dev, req->sqe.data,
2052 sizeof(struct nvme_command),
2053 DMA_TO_DEVICE);
2054 err = ib_dma_mapping_error(dev, req->sqe.dma);
2055 if (unlikely(err))
2056 return BLK_STS_RESOURCE;
2057
71102307
CH
2058 ib_dma_sync_single_for_cpu(dev, sqe->dma,
2059 sizeof(struct nvme_command), DMA_TO_DEVICE);
2060
f4b9e6c9 2061 ret = nvme_setup_cmd(ns, rq);
fc17b653 2062 if (ret)
62f99b62 2063 goto unmap_qe;
71102307 2064
71102307
CH
2065 blk_mq_start_request(rq);
2066
5ec5d3bd
MG
2067 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
2068 queue->pi_support &&
2069 (c->common.opcode == nvme_cmd_write ||
2070 c->common.opcode == nvme_cmd_read) &&
2071 nvme_ns_has_pi(ns))
2072 req->use_sig_mr = true;
2073 else
2074 req->use_sig_mr = false;
2075
fc17b653 2076 err = nvme_rdma_map_data(queue, rq, c);
a7b7c7a1 2077 if (unlikely(err < 0)) {
71102307 2078 dev_err(queue->ctrl->ctrl.device,
fc17b653 2079 "Failed to map data (%d)\n", err);
71102307
CH
2080 goto err;
2081 }
2082
b4b591c8
SG
2083 sqe->cqe.done = nvme_rdma_send_done;
2084
71102307
CH
2085 ib_dma_sync_single_for_device(dev, sqe->dma,
2086 sizeof(struct nvme_command), DMA_TO_DEVICE);
2087
fc17b653 2088 err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
f41725bb 2089 req->mr ? &req->reg_wr.wr : NULL);
16686f3a
MG
2090 if (unlikely(err))
2091 goto err_unmap;
71102307 2092
fc17b653 2093 return BLK_STS_OK;
62f99b62 2094
16686f3a
MG
2095err_unmap:
2096 nvme_rdma_unmap_data(queue, rq);
71102307 2097err:
62eca397
CL
2098 if (err == -EIO)
2099 ret = nvme_host_path_error(rq);
2100 else if (err == -ENOMEM || err == -EAGAIN)
62f99b62
MG
2101 ret = BLK_STS_RESOURCE;
2102 else
2103 ret = BLK_STS_IOERR;
16686f3a 2104 nvme_cleanup_cmd(rq);
62f99b62
MG
2105unmap_qe:
2106 ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command),
2107 DMA_TO_DEVICE);
2108 return ret;
71102307
CH
2109}
2110
ff8519f9
SG
2111static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx)
2112{
2113 struct nvme_rdma_queue *queue = hctx->driver_data;
2114
2115 return ib_process_cq_direct(queue->ib_cq, -1);
2116}
2117
5ec5d3bd
MG
2118static void nvme_rdma_check_pi_status(struct nvme_rdma_request *req)
2119{
2120 struct request *rq = blk_mq_rq_from_pdu(req);
2121 struct ib_mr_status mr_status;
2122 int ret;
2123
2124 ret = ib_check_mr_status(req->mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
2125 if (ret) {
2126 pr_err("ib_check_mr_status failed, ret %d\n", ret);
2127 nvme_req(rq)->status = NVME_SC_INVALID_PI;
2128 return;
2129 }
2130
2131 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
2132 switch (mr_status.sig_err.err_type) {
2133 case IB_SIG_BAD_GUARD:
2134 nvme_req(rq)->status = NVME_SC_GUARD_CHECK;
2135 break;
2136 case IB_SIG_BAD_REFTAG:
2137 nvme_req(rq)->status = NVME_SC_REFTAG_CHECK;
2138 break;
2139 case IB_SIG_BAD_APPTAG:
2140 nvme_req(rq)->status = NVME_SC_APPTAG_CHECK;
2141 break;
2142 }
2143 pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n",
2144 mr_status.sig_err.err_type, mr_status.sig_err.expected,
2145 mr_status.sig_err.actual);
2146 }
2147}
2148
71102307
CH
2149static void nvme_rdma_complete_rq(struct request *rq)
2150{
2151 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
62f99b62
MG
2152 struct nvme_rdma_queue *queue = req->queue;
2153 struct ib_device *ibdev = queue->device->dev;
71102307 2154
5ec5d3bd
MG
2155 if (req->use_sig_mr)
2156 nvme_rdma_check_pi_status(req);
2157
62f99b62
MG
2158 nvme_rdma_unmap_data(queue, rq);
2159 ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command),
2160 DMA_TO_DEVICE);
77f02a7a 2161 nvme_complete_rq(rq);
71102307
CH
2162}
2163
0b36658c
SG
2164static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
2165{
2166 struct nvme_rdma_ctrl *ctrl = set->driver_data;
5651cd3c 2167 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
0b36658c 2168
5651cd3c 2169 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
b65bb777 2170 /* separate read/write queues */
5651cd3c
SG
2171 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2172 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2173 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2174 set->map[HCTX_TYPE_READ].nr_queues =
2175 ctrl->io_queues[HCTX_TYPE_READ];
b65bb777 2176 set->map[HCTX_TYPE_READ].queue_offset =
5651cd3c 2177 ctrl->io_queues[HCTX_TYPE_DEFAULT];
b65bb777 2178 } else {
5651cd3c
SG
2179 /* shared read/write queues */
2180 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2181 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2182 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2183 set->map[HCTX_TYPE_READ].nr_queues =
2184 ctrl->io_queues[HCTX_TYPE_DEFAULT];
b65bb777
SG
2185 set->map[HCTX_TYPE_READ].queue_offset = 0;
2186 }
2187 blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT],
2188 ctrl->device->dev, 0);
2189 blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_READ],
2190 ctrl->device->dev, 0);
ff8519f9 2191
5651cd3c
SG
2192 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2193 /* map dedicated poll queues only if we have queues left */
ff8519f9 2194 set->map[HCTX_TYPE_POLL].nr_queues =
b1064d3e 2195 ctrl->io_queues[HCTX_TYPE_POLL];
ff8519f9 2196 set->map[HCTX_TYPE_POLL].queue_offset =
5651cd3c
SG
2197 ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2198 ctrl->io_queues[HCTX_TYPE_READ];
ff8519f9
SG
2199 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2200 }
5651cd3c
SG
2201
2202 dev_info(ctrl->ctrl.device,
2203 "mapped %d/%d/%d default/read/poll queues.\n",
2204 ctrl->io_queues[HCTX_TYPE_DEFAULT],
2205 ctrl->io_queues[HCTX_TYPE_READ],
2206 ctrl->io_queues[HCTX_TYPE_POLL]);
2207
b65bb777 2208 return 0;
0b36658c
SG
2209}
2210
f363b089 2211static const struct blk_mq_ops nvme_rdma_mq_ops = {
71102307
CH
2212 .queue_rq = nvme_rdma_queue_rq,
2213 .complete = nvme_rdma_complete_rq,
71102307
CH
2214 .init_request = nvme_rdma_init_request,
2215 .exit_request = nvme_rdma_exit_request,
71102307 2216 .init_hctx = nvme_rdma_init_hctx,
71102307 2217 .timeout = nvme_rdma_timeout,
0b36658c 2218 .map_queues = nvme_rdma_map_queues,
ff8519f9 2219 .poll = nvme_rdma_poll,
71102307
CH
2220};
2221
f363b089 2222static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
71102307
CH
2223 .queue_rq = nvme_rdma_queue_rq,
2224 .complete = nvme_rdma_complete_rq,
385475ee
CH
2225 .init_request = nvme_rdma_init_request,
2226 .exit_request = nvme_rdma_exit_request,
71102307
CH
2227 .init_hctx = nvme_rdma_init_admin_hctx,
2228 .timeout = nvme_rdma_timeout,
2229};
2230
18398af2 2231static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
71102307 2232{
794a4cb3
SG
2233 cancel_work_sync(&ctrl->err_work);
2234 cancel_delayed_work_sync(&ctrl->reconnect_work);
2235
75862c72 2236 nvme_rdma_teardown_io_queues(ctrl, shutdown);
e7832cb4 2237 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
18398af2 2238 if (shutdown)
71102307 2239 nvme_shutdown_ctrl(&ctrl->ctrl);
18398af2 2240 else
b5b05048 2241 nvme_disable_ctrl(&ctrl->ctrl);
75862c72 2242 nvme_rdma_teardown_admin_queue(ctrl, shutdown);
71102307
CH
2243}
2244
c5017e85 2245static void nvme_rdma_delete_ctrl(struct nvme_ctrl *ctrl)
2461a8dd 2246{
e9bc2587 2247 nvme_rdma_shutdown_ctrl(to_rdma_ctrl(ctrl), true);
71102307
CH
2248}
2249
71102307
CH
2250static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
2251{
d86c4d8e
CH
2252 struct nvme_rdma_ctrl *ctrl =
2253 container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work);
71102307 2254
d09f2b45 2255 nvme_stop_ctrl(&ctrl->ctrl);
18398af2 2256 nvme_rdma_shutdown_ctrl(ctrl, false);
71102307 2257
ad6a0a52 2258 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
d5bf4b7f
SG
2259 /* state change failure should never happen */
2260 WARN_ON_ONCE(1);
2261 return;
2262 }
2263
c66e2998 2264 if (nvme_rdma_setup_ctrl(ctrl, false))
370ae6e4 2265 goto out_fail;
71102307 2266
71102307
CH
2267 return;
2268
370ae6e4 2269out_fail:
8000d1fd
NC
2270 ++ctrl->ctrl.nr_reconnects;
2271 nvme_rdma_reconnect_or_remove(ctrl);
71102307
CH
2272}
2273
71102307
CH
2274static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
2275 .name = "rdma",
2276 .module = THIS_MODULE,
5ec5d3bd 2277 .flags = NVME_F_FABRICS | NVME_F_METADATA_SUPPORTED,
71102307
CH
2278 .reg_read32 = nvmf_reg_read32,
2279 .reg_read64 = nvmf_reg_read64,
2280 .reg_write32 = nvmf_reg_write32,
71102307
CH
2281 .free_ctrl = nvme_rdma_free_ctrl,
2282 .submit_async_event = nvme_rdma_submit_async_event,
c5017e85 2283 .delete_ctrl = nvme_rdma_delete_ctrl,
71102307
CH
2284 .get_address = nvmf_get_address,
2285};
2286
36e835f2
JS
2287/*
2288 * Fails a connection request if it matches an existing controller
2289 * (association) with the same tuple:
2290 * <Host NQN, Host ID, local address, remote address, remote port, SUBSYS NQN>
2291 *
2292 * if local address is not specified in the request, it will match an
2293 * existing controller with all the other parameters the same and no
2294 * local port address specified as well.
2295 *
2296 * The ports don't need to be compared as they are intrinsically
2297 * already matched by the port pointers supplied.
2298 */
2299static bool
2300nvme_rdma_existing_controller(struct nvmf_ctrl_options *opts)
2301{
2302 struct nvme_rdma_ctrl *ctrl;
2303 bool found = false;
2304
2305 mutex_lock(&nvme_rdma_ctrl_mutex);
2306 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
b7c7be6f 2307 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
36e835f2
JS
2308 if (found)
2309 break;
2310 }
2311 mutex_unlock(&nvme_rdma_ctrl_mutex);
2312
2313 return found;
2314}
2315
71102307
CH
2316static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
2317 struct nvmf_ctrl_options *opts)
2318{
2319 struct nvme_rdma_ctrl *ctrl;
2320 int ret;
2321 bool changed;
2322
2323 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2324 if (!ctrl)
2325 return ERR_PTR(-ENOMEM);
2326 ctrl->ctrl.opts = opts;
2327 INIT_LIST_HEAD(&ctrl->list);
2328
bb59b8e5
SG
2329 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2330 opts->trsvcid =
2331 kstrdup(__stringify(NVME_RDMA_IP_PORT), GFP_KERNEL);
2332 if (!opts->trsvcid) {
2333 ret = -ENOMEM;
2334 goto out_free_ctrl;
2335 }
2336 opts->mask |= NVMF_OPT_TRSVCID;
2337 }
0928f9b4
SG
2338
2339 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
bb59b8e5 2340 opts->traddr, opts->trsvcid, &ctrl->addr);
71102307 2341 if (ret) {
bb59b8e5
SG
2342 pr_err("malformed address passed: %s:%s\n",
2343 opts->traddr, opts->trsvcid);
71102307
CH
2344 goto out_free_ctrl;
2345 }
2346
8f4e8dac 2347 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
0928f9b4
SG
2348 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2349 opts->host_traddr, NULL, &ctrl->src_addr);
8f4e8dac 2350 if (ret) {
0928f9b4 2351 pr_err("malformed src address passed: %s\n",
8f4e8dac
MG
2352 opts->host_traddr);
2353 goto out_free_ctrl;
2354 }
2355 }
2356
36e835f2
JS
2357 if (!opts->duplicate_connect && nvme_rdma_existing_controller(opts)) {
2358 ret = -EALREADY;
2359 goto out_free_ctrl;
2360 }
2361
71102307
CH
2362 INIT_DELAYED_WORK(&ctrl->reconnect_work,
2363 nvme_rdma_reconnect_ctrl_work);
2364 INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
d86c4d8e 2365 INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
71102307 2366
ff8519f9
SG
2367 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2368 opts->nr_poll_queues + 1;
c5af8654 2369 ctrl->ctrl.sqsize = opts->queue_size - 1;
71102307
CH
2370 ctrl->ctrl.kato = opts->kato;
2371
2372 ret = -ENOMEM;
d858e5f0 2373 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
71102307
CH
2374 GFP_KERNEL);
2375 if (!ctrl->queues)
3d064101
SG
2376 goto out_free_ctrl;
2377
2378 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
2379 0 /* no quirks, we're perfect! */);
2380 if (ret)
2381 goto out_kfree_queues;
71102307 2382
b754a32c
MG
2383 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
2384 WARN_ON_ONCE(!changed);
2385
c66e2998 2386 ret = nvme_rdma_setup_ctrl(ctrl, true);
71102307 2387 if (ret)
3d064101 2388 goto out_uninit_ctrl;
71102307 2389
0928f9b4 2390 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
71102307
CH
2391 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2392
71102307
CH
2393 mutex_lock(&nvme_rdma_ctrl_mutex);
2394 list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
2395 mutex_unlock(&nvme_rdma_ctrl_mutex);
2396
71102307
CH
2397 return &ctrl->ctrl;
2398
71102307
CH
2399out_uninit_ctrl:
2400 nvme_uninit_ctrl(&ctrl->ctrl);
2401 nvme_put_ctrl(&ctrl->ctrl);
2402 if (ret > 0)
2403 ret = -EIO;
2404 return ERR_PTR(ret);
3d064101
SG
2405out_kfree_queues:
2406 kfree(ctrl->queues);
71102307
CH
2407out_free_ctrl:
2408 kfree(ctrl);
2409 return ERR_PTR(ret);
2410}
2411
2412static struct nvmf_transport_ops nvme_rdma_transport = {
2413 .name = "rdma",
0de5cd36 2414 .module = THIS_MODULE,
71102307 2415 .required_opts = NVMF_OPT_TRADDR,
8f4e8dac 2416 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
b65bb777 2417 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
e63440d6
IR
2418 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2419 NVMF_OPT_TOS,
71102307
CH
2420 .create_ctrl = nvme_rdma_create_ctrl,
2421};
2422
e87a911f
SW
2423static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
2424{
2425 struct nvme_rdma_ctrl *ctrl;
9bad0404
MG
2426 struct nvme_rdma_device *ndev;
2427 bool found = false;
2428
2429 mutex_lock(&device_list_mutex);
2430 list_for_each_entry(ndev, &device_list, entry) {
2431 if (ndev->dev == ib_device) {
2432 found = true;
2433 break;
2434 }
2435 }
2436 mutex_unlock(&device_list_mutex);
2437
2438 if (!found)
2439 return;
e87a911f
SW
2440
2441 /* Delete all controllers using this device */
2442 mutex_lock(&nvme_rdma_ctrl_mutex);
2443 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
2444 if (ctrl->device->dev != ib_device)
2445 continue;
c5017e85 2446 nvme_delete_ctrl(&ctrl->ctrl);
e87a911f
SW
2447 }
2448 mutex_unlock(&nvme_rdma_ctrl_mutex);
2449
b227c59b 2450 flush_workqueue(nvme_delete_wq);
e87a911f
SW
2451}
2452
2453static struct ib_client nvme_rdma_ib_client = {
2454 .name = "nvme_rdma",
e87a911f
SW
2455 .remove = nvme_rdma_remove_one
2456};
2457
71102307
CH
2458static int __init nvme_rdma_init_module(void)
2459{
e87a911f
SW
2460 int ret;
2461
e87a911f 2462 ret = ib_register_client(&nvme_rdma_ib_client);
a56c79cf 2463 if (ret)
9a6327d2 2464 return ret;
a56c79cf
SG
2465
2466 ret = nvmf_register_transport(&nvme_rdma_transport);
2467 if (ret)
2468 goto err_unreg_client;
e87a911f 2469
a56c79cf 2470 return 0;
e87a911f 2471
a56c79cf
SG
2472err_unreg_client:
2473 ib_unregister_client(&nvme_rdma_ib_client);
a56c79cf 2474 return ret;
71102307
CH
2475}
2476
2477static void __exit nvme_rdma_cleanup_module(void)
2478{
9ad9e8d6
MG
2479 struct nvme_rdma_ctrl *ctrl;
2480
71102307 2481 nvmf_unregister_transport(&nvme_rdma_transport);
e87a911f 2482 ib_unregister_client(&nvme_rdma_ib_client);
9ad9e8d6
MG
2483
2484 mutex_lock(&nvme_rdma_ctrl_mutex);
2485 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list)
2486 nvme_delete_ctrl(&ctrl->ctrl);
2487 mutex_unlock(&nvme_rdma_ctrl_mutex);
2488 flush_workqueue(nvme_delete_wq);
71102307
CH
2489}
2490
2491module_init(nvme_rdma_init_module);
2492module_exit(nvme_rdma_cleanup_module);
2493
2494MODULE_LICENSE("GPL v2");