]>
Commit | Line | Data |
---|---|---|
8f000cac CH |
1 | /* |
2 | * NVMe over Fabrics RDMA target. | |
3 | * Copyright (c) 2015-2016 HGST, a Western Digital Company. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
15 | #include <linux/atomic.h> | |
16 | #include <linux/ctype.h> | |
17 | #include <linux/delay.h> | |
18 | #include <linux/err.h> | |
19 | #include <linux/init.h> | |
20 | #include <linux/module.h> | |
21 | #include <linux/nvme.h> | |
22 | #include <linux/slab.h> | |
23 | #include <linux/string.h> | |
24 | #include <linux/wait.h> | |
25 | #include <linux/inet.h> | |
26 | #include <asm/unaligned.h> | |
27 | ||
28 | #include <rdma/ib_verbs.h> | |
29 | #include <rdma/rdma_cm.h> | |
30 | #include <rdma/rw.h> | |
31 | ||
32 | #include <linux/nvme-rdma.h> | |
33 | #include "nvmet.h" | |
34 | ||
35 | /* | |
36 | * We allow up to a page of inline data to go with the SQE | |
37 | */ | |
38 | #define NVMET_RDMA_INLINE_DATA_SIZE PAGE_SIZE | |
39 | ||
40 | struct nvmet_rdma_cmd { | |
41 | struct ib_sge sge[2]; | |
42 | struct ib_cqe cqe; | |
43 | struct ib_recv_wr wr; | |
44 | struct scatterlist inline_sg; | |
45 | struct page *inline_page; | |
46 | struct nvme_command *nvme_cmd; | |
47 | struct nvmet_rdma_queue *queue; | |
48 | }; | |
49 | ||
50 | enum { | |
51 | NVMET_RDMA_REQ_INLINE_DATA = (1 << 0), | |
52 | NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1), | |
53 | }; | |
54 | ||
55 | struct nvmet_rdma_rsp { | |
56 | struct ib_sge send_sge; | |
57 | struct ib_cqe send_cqe; | |
58 | struct ib_send_wr send_wr; | |
59 | ||
60 | struct nvmet_rdma_cmd *cmd; | |
61 | struct nvmet_rdma_queue *queue; | |
62 | ||
63 | struct ib_cqe read_cqe; | |
64 | struct rdma_rw_ctx rw; | |
65 | ||
66 | struct nvmet_req req; | |
67 | ||
701e06eb | 68 | bool allocated; |
8f000cac CH |
69 | u8 n_rdma; |
70 | u32 flags; | |
71 | u32 invalidate_rkey; | |
72 | ||
73 | struct list_head wait_list; | |
74 | struct list_head free_list; | |
75 | }; | |
76 | ||
77 | enum nvmet_rdma_queue_state { | |
78 | NVMET_RDMA_Q_CONNECTING, | |
79 | NVMET_RDMA_Q_LIVE, | |
80 | NVMET_RDMA_Q_DISCONNECTING, | |
d8f7750a | 81 | NVMET_RDMA_IN_DEVICE_REMOVAL, |
8f000cac CH |
82 | }; |
83 | ||
84 | struct nvmet_rdma_queue { | |
85 | struct rdma_cm_id *cm_id; | |
86 | struct nvmet_port *port; | |
87 | struct ib_cq *cq; | |
88 | atomic_t sq_wr_avail; | |
89 | struct nvmet_rdma_device *dev; | |
90 | spinlock_t state_lock; | |
91 | enum nvmet_rdma_queue_state state; | |
92 | struct nvmet_cq nvme_cq; | |
93 | struct nvmet_sq nvme_sq; | |
94 | ||
95 | struct nvmet_rdma_rsp *rsps; | |
96 | struct list_head free_rsps; | |
97 | spinlock_t rsps_lock; | |
98 | struct nvmet_rdma_cmd *cmds; | |
99 | ||
100 | struct work_struct release_work; | |
101 | struct list_head rsp_wait_list; | |
102 | struct list_head rsp_wr_wait_list; | |
103 | spinlock_t rsp_wr_wait_lock; | |
104 | ||
105 | int idx; | |
106 | int host_qid; | |
107 | int recv_queue_size; | |
108 | int send_queue_size; | |
109 | ||
110 | struct list_head queue_list; | |
111 | }; | |
112 | ||
113 | struct nvmet_rdma_device { | |
114 | struct ib_device *device; | |
115 | struct ib_pd *pd; | |
116 | struct ib_srq *srq; | |
117 | struct nvmet_rdma_cmd *srq_cmds; | |
118 | size_t srq_size; | |
119 | struct kref ref; | |
120 | struct list_head entry; | |
121 | }; | |
122 | ||
123 | static bool nvmet_rdma_use_srq; | |
124 | module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444); | |
125 | MODULE_PARM_DESC(use_srq, "Use shared receive queue."); | |
126 | ||
127 | static DEFINE_IDA(nvmet_rdma_queue_ida); | |
128 | static LIST_HEAD(nvmet_rdma_queue_list); | |
129 | static DEFINE_MUTEX(nvmet_rdma_queue_mutex); | |
130 | ||
131 | static LIST_HEAD(device_list); | |
132 | static DEFINE_MUTEX(device_list_mutex); | |
133 | ||
134 | static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp); | |
135 | static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc); | |
136 | static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); | |
137 | static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); | |
138 | static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); | |
139 | static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); | |
17428770 RR |
140 | static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, |
141 | struct nvmet_rdma_rsp *r); | |
142 | static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, | |
143 | struct nvmet_rdma_rsp *r); | |
8f000cac CH |
144 | |
145 | static struct nvmet_fabrics_ops nvmet_rdma_ops; | |
146 | ||
147 | /* XXX: really should move to a generic header sooner or later.. */ | |
148 | static inline u32 get_unaligned_le24(const u8 *p) | |
149 | { | |
150 | return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16; | |
151 | } | |
152 | ||
153 | static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp) | |
154 | { | |
155 | return nvme_is_write(rsp->req.cmd) && | |
5e62d5c9 | 156 | rsp->req.transfer_len && |
8f000cac CH |
157 | !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); |
158 | } | |
159 | ||
160 | static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp) | |
161 | { | |
162 | return !nvme_is_write(rsp->req.cmd) && | |
5e62d5c9 | 163 | rsp->req.transfer_len && |
8f000cac CH |
164 | !rsp->req.rsp->status && |
165 | !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); | |
166 | } | |
167 | ||
168 | static inline struct nvmet_rdma_rsp * | |
169 | nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) | |
170 | { | |
171 | struct nvmet_rdma_rsp *rsp; | |
172 | unsigned long flags; | |
173 | ||
174 | spin_lock_irqsave(&queue->rsps_lock, flags); | |
701e06eb | 175 | rsp = list_first_entry_or_null(&queue->free_rsps, |
8f000cac | 176 | struct nvmet_rdma_rsp, free_list); |
701e06eb SG |
177 | if (likely(rsp)) |
178 | list_del(&rsp->free_list); | |
8f000cac CH |
179 | spin_unlock_irqrestore(&queue->rsps_lock, flags); |
180 | ||
701e06eb | 181 | if (unlikely(!rsp)) { |
17428770 RR |
182 | int ret; |
183 | ||
184 | rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); | |
701e06eb SG |
185 | if (unlikely(!rsp)) |
186 | return NULL; | |
17428770 RR |
187 | ret = nvmet_rdma_alloc_rsp(queue->dev, rsp); |
188 | if (unlikely(ret)) { | |
189 | kfree(rsp); | |
190 | return NULL; | |
191 | } | |
192 | ||
701e06eb SG |
193 | rsp->allocated = true; |
194 | } | |
195 | ||
8f000cac CH |
196 | return rsp; |
197 | } | |
198 | ||
199 | static inline void | |
200 | nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) | |
201 | { | |
202 | unsigned long flags; | |
203 | ||
05be570a | 204 | if (unlikely(rsp->allocated)) { |
17428770 | 205 | nvmet_rdma_free_rsp(rsp->queue->dev, rsp); |
701e06eb SG |
206 | kfree(rsp); |
207 | return; | |
208 | } | |
209 | ||
8f000cac CH |
210 | spin_lock_irqsave(&rsp->queue->rsps_lock, flags); |
211 | list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); | |
212 | spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); | |
213 | } | |
214 | ||
215 | static void nvmet_rdma_free_sgl(struct scatterlist *sgl, unsigned int nents) | |
216 | { | |
217 | struct scatterlist *sg; | |
218 | int count; | |
219 | ||
220 | if (!sgl || !nents) | |
221 | return; | |
222 | ||
223 | for_each_sg(sgl, sg, nents, count) | |
224 | __free_page(sg_page(sg)); | |
225 | kfree(sgl); | |
226 | } | |
227 | ||
228 | static int nvmet_rdma_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, | |
229 | u32 length) | |
230 | { | |
231 | struct scatterlist *sg; | |
232 | struct page *page; | |
233 | unsigned int nent; | |
234 | int i = 0; | |
235 | ||
236 | nent = DIV_ROUND_UP(length, PAGE_SIZE); | |
237 | sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL); | |
238 | if (!sg) | |
239 | goto out; | |
240 | ||
241 | sg_init_table(sg, nent); | |
242 | ||
243 | while (length) { | |
244 | u32 page_len = min_t(u32, length, PAGE_SIZE); | |
245 | ||
246 | page = alloc_page(GFP_KERNEL); | |
247 | if (!page) | |
248 | goto out_free_pages; | |
249 | ||
250 | sg_set_page(&sg[i], page, page_len, 0); | |
251 | length -= page_len; | |
252 | i++; | |
253 | } | |
254 | *sgl = sg; | |
255 | *nents = nent; | |
256 | return 0; | |
257 | ||
258 | out_free_pages: | |
259 | while (i > 0) { | |
260 | i--; | |
261 | __free_page(sg_page(&sg[i])); | |
262 | } | |
263 | kfree(sg); | |
264 | out: | |
265 | return NVME_SC_INTERNAL; | |
266 | } | |
267 | ||
268 | static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev, | |
269 | struct nvmet_rdma_cmd *c, bool admin) | |
270 | { | |
271 | /* NVMe command / RDMA RECV */ | |
272 | c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL); | |
273 | if (!c->nvme_cmd) | |
274 | goto out; | |
275 | ||
276 | c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd, | |
277 | sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); | |
278 | if (ib_dma_mapping_error(ndev->device, c->sge[0].addr)) | |
279 | goto out_free_cmd; | |
280 | ||
281 | c->sge[0].length = sizeof(*c->nvme_cmd); | |
282 | c->sge[0].lkey = ndev->pd->local_dma_lkey; | |
283 | ||
284 | if (!admin) { | |
285 | c->inline_page = alloc_pages(GFP_KERNEL, | |
286 | get_order(NVMET_RDMA_INLINE_DATA_SIZE)); | |
287 | if (!c->inline_page) | |
288 | goto out_unmap_cmd; | |
289 | c->sge[1].addr = ib_dma_map_page(ndev->device, | |
290 | c->inline_page, 0, NVMET_RDMA_INLINE_DATA_SIZE, | |
291 | DMA_FROM_DEVICE); | |
292 | if (ib_dma_mapping_error(ndev->device, c->sge[1].addr)) | |
293 | goto out_free_inline_page; | |
294 | c->sge[1].length = NVMET_RDMA_INLINE_DATA_SIZE; | |
295 | c->sge[1].lkey = ndev->pd->local_dma_lkey; | |
296 | } | |
297 | ||
298 | c->cqe.done = nvmet_rdma_recv_done; | |
299 | ||
300 | c->wr.wr_cqe = &c->cqe; | |
301 | c->wr.sg_list = c->sge; | |
302 | c->wr.num_sge = admin ? 1 : 2; | |
303 | ||
304 | return 0; | |
305 | ||
306 | out_free_inline_page: | |
307 | if (!admin) { | |
308 | __free_pages(c->inline_page, | |
309 | get_order(NVMET_RDMA_INLINE_DATA_SIZE)); | |
310 | } | |
311 | out_unmap_cmd: | |
312 | ib_dma_unmap_single(ndev->device, c->sge[0].addr, | |
313 | sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); | |
314 | out_free_cmd: | |
315 | kfree(c->nvme_cmd); | |
316 | ||
317 | out: | |
318 | return -ENOMEM; | |
319 | } | |
320 | ||
321 | static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev, | |
322 | struct nvmet_rdma_cmd *c, bool admin) | |
323 | { | |
324 | if (!admin) { | |
325 | ib_dma_unmap_page(ndev->device, c->sge[1].addr, | |
326 | NVMET_RDMA_INLINE_DATA_SIZE, DMA_FROM_DEVICE); | |
327 | __free_pages(c->inline_page, | |
328 | get_order(NVMET_RDMA_INLINE_DATA_SIZE)); | |
329 | } | |
330 | ib_dma_unmap_single(ndev->device, c->sge[0].addr, | |
331 | sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); | |
332 | kfree(c->nvme_cmd); | |
333 | } | |
334 | ||
335 | static struct nvmet_rdma_cmd * | |
336 | nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev, | |
337 | int nr_cmds, bool admin) | |
338 | { | |
339 | struct nvmet_rdma_cmd *cmds; | |
340 | int ret = -EINVAL, i; | |
341 | ||
342 | cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL); | |
343 | if (!cmds) | |
344 | goto out; | |
345 | ||
346 | for (i = 0; i < nr_cmds; i++) { | |
347 | ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin); | |
348 | if (ret) | |
349 | goto out_free; | |
350 | } | |
351 | ||
352 | return cmds; | |
353 | ||
354 | out_free: | |
355 | while (--i >= 0) | |
356 | nvmet_rdma_free_cmd(ndev, cmds + i, admin); | |
357 | kfree(cmds); | |
358 | out: | |
359 | return ERR_PTR(ret); | |
360 | } | |
361 | ||
362 | static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev, | |
363 | struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin) | |
364 | { | |
365 | int i; | |
366 | ||
367 | for (i = 0; i < nr_cmds; i++) | |
368 | nvmet_rdma_free_cmd(ndev, cmds + i, admin); | |
369 | kfree(cmds); | |
370 | } | |
371 | ||
372 | static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, | |
373 | struct nvmet_rdma_rsp *r) | |
374 | { | |
375 | /* NVMe CQE / RDMA SEND */ | |
376 | r->req.rsp = kmalloc(sizeof(*r->req.rsp), GFP_KERNEL); | |
377 | if (!r->req.rsp) | |
378 | goto out; | |
379 | ||
380 | r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.rsp, | |
381 | sizeof(*r->req.rsp), DMA_TO_DEVICE); | |
382 | if (ib_dma_mapping_error(ndev->device, r->send_sge.addr)) | |
383 | goto out_free_rsp; | |
384 | ||
385 | r->send_sge.length = sizeof(*r->req.rsp); | |
386 | r->send_sge.lkey = ndev->pd->local_dma_lkey; | |
387 | ||
388 | r->send_cqe.done = nvmet_rdma_send_done; | |
389 | ||
390 | r->send_wr.wr_cqe = &r->send_cqe; | |
391 | r->send_wr.sg_list = &r->send_sge; | |
392 | r->send_wr.num_sge = 1; | |
393 | r->send_wr.send_flags = IB_SEND_SIGNALED; | |
394 | ||
395 | /* Data In / RDMA READ */ | |
396 | r->read_cqe.done = nvmet_rdma_read_data_done; | |
397 | return 0; | |
398 | ||
399 | out_free_rsp: | |
400 | kfree(r->req.rsp); | |
401 | out: | |
402 | return -ENOMEM; | |
403 | } | |
404 | ||
405 | static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, | |
406 | struct nvmet_rdma_rsp *r) | |
407 | { | |
408 | ib_dma_unmap_single(ndev->device, r->send_sge.addr, | |
409 | sizeof(*r->req.rsp), DMA_TO_DEVICE); | |
410 | kfree(r->req.rsp); | |
411 | } | |
412 | ||
413 | static int | |
414 | nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) | |
415 | { | |
416 | struct nvmet_rdma_device *ndev = queue->dev; | |
417 | int nr_rsps = queue->recv_queue_size * 2; | |
418 | int ret = -EINVAL, i; | |
419 | ||
420 | queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), | |
421 | GFP_KERNEL); | |
422 | if (!queue->rsps) | |
423 | goto out; | |
424 | ||
425 | for (i = 0; i < nr_rsps; i++) { | |
426 | struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; | |
427 | ||
428 | ret = nvmet_rdma_alloc_rsp(ndev, rsp); | |
429 | if (ret) | |
430 | goto out_free; | |
431 | ||
432 | list_add_tail(&rsp->free_list, &queue->free_rsps); | |
433 | } | |
434 | ||
435 | return 0; | |
436 | ||
437 | out_free: | |
438 | while (--i >= 0) { | |
439 | struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; | |
440 | ||
441 | list_del(&rsp->free_list); | |
442 | nvmet_rdma_free_rsp(ndev, rsp); | |
443 | } | |
444 | kfree(queue->rsps); | |
445 | out: | |
446 | return ret; | |
447 | } | |
448 | ||
449 | static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) | |
450 | { | |
451 | struct nvmet_rdma_device *ndev = queue->dev; | |
452 | int i, nr_rsps = queue->recv_queue_size * 2; | |
453 | ||
454 | for (i = 0; i < nr_rsps; i++) { | |
455 | struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; | |
456 | ||
457 | list_del(&rsp->free_list); | |
458 | nvmet_rdma_free_rsp(ndev, rsp); | |
459 | } | |
460 | kfree(queue->rsps); | |
461 | } | |
462 | ||
463 | static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, | |
464 | struct nvmet_rdma_cmd *cmd) | |
465 | { | |
466 | struct ib_recv_wr *bad_wr; | |
467 | ||
748ff840 PP |
468 | ib_dma_sync_single_for_device(ndev->device, |
469 | cmd->sge[0].addr, cmd->sge[0].length, | |
470 | DMA_FROM_DEVICE); | |
471 | ||
8f000cac CH |
472 | if (ndev->srq) |
473 | return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr); | |
474 | return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr); | |
475 | } | |
476 | ||
477 | static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue) | |
478 | { | |
479 | spin_lock(&queue->rsp_wr_wait_lock); | |
480 | while (!list_empty(&queue->rsp_wr_wait_list)) { | |
481 | struct nvmet_rdma_rsp *rsp; | |
482 | bool ret; | |
483 | ||
484 | rsp = list_entry(queue->rsp_wr_wait_list.next, | |
485 | struct nvmet_rdma_rsp, wait_list); | |
486 | list_del(&rsp->wait_list); | |
487 | ||
488 | spin_unlock(&queue->rsp_wr_wait_lock); | |
489 | ret = nvmet_rdma_execute_command(rsp); | |
490 | spin_lock(&queue->rsp_wr_wait_lock); | |
491 | ||
492 | if (!ret) { | |
493 | list_add(&rsp->wait_list, &queue->rsp_wr_wait_list); | |
494 | break; | |
495 | } | |
496 | } | |
497 | spin_unlock(&queue->rsp_wr_wait_lock); | |
498 | } | |
499 | ||
500 | ||
501 | static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) | |
502 | { | |
503 | struct nvmet_rdma_queue *queue = rsp->queue; | |
504 | ||
505 | atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); | |
506 | ||
507 | if (rsp->n_rdma) { | |
508 | rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, | |
509 | queue->cm_id->port_num, rsp->req.sg, | |
510 | rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); | |
511 | } | |
512 | ||
513 | if (rsp->req.sg != &rsp->cmd->inline_sg) | |
514 | nvmet_rdma_free_sgl(rsp->req.sg, rsp->req.sg_cnt); | |
515 | ||
516 | if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) | |
517 | nvmet_rdma_process_wr_wait_list(queue); | |
518 | ||
519 | nvmet_rdma_put_rsp(rsp); | |
520 | } | |
521 | ||
522 | static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue) | |
523 | { | |
524 | if (queue->nvme_sq.ctrl) { | |
525 | nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); | |
526 | } else { | |
527 | /* | |
528 | * we didn't setup the controller yet in case | |
529 | * of admin connect error, just disconnect and | |
530 | * cleanup the queue | |
531 | */ | |
532 | nvmet_rdma_queue_disconnect(queue); | |
533 | } | |
534 | } | |
535 | ||
536 | static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) | |
537 | { | |
538 | struct nvmet_rdma_rsp *rsp = | |
539 | container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); | |
b1e7f50b | 540 | struct nvmet_rdma_queue *queue = cq->cq_context; |
8f000cac CH |
541 | |
542 | nvmet_rdma_release_rsp(rsp); | |
543 | ||
544 | if (unlikely(wc->status != IB_WC_SUCCESS && | |
545 | wc->status != IB_WC_WR_FLUSH_ERR)) { | |
546 | pr_err("SEND for CQE 0x%p failed with status %s (%d).\n", | |
547 | wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); | |
b1e7f50b | 548 | nvmet_rdma_error_comp(queue); |
8f000cac CH |
549 | } |
550 | } | |
551 | ||
552 | static void nvmet_rdma_queue_response(struct nvmet_req *req) | |
553 | { | |
554 | struct nvmet_rdma_rsp *rsp = | |
555 | container_of(req, struct nvmet_rdma_rsp, req); | |
556 | struct rdma_cm_id *cm_id = rsp->queue->cm_id; | |
557 | struct ib_send_wr *first_wr, *bad_wr; | |
558 | ||
559 | if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) { | |
560 | rsp->send_wr.opcode = IB_WR_SEND_WITH_INV; | |
561 | rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey; | |
562 | } else { | |
563 | rsp->send_wr.opcode = IB_WR_SEND; | |
564 | } | |
565 | ||
566 | if (nvmet_rdma_need_data_out(rsp)) | |
567 | first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, | |
568 | cm_id->port_num, NULL, &rsp->send_wr); | |
569 | else | |
570 | first_wr = &rsp->send_wr; | |
571 | ||
572 | nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); | |
748ff840 PP |
573 | |
574 | ib_dma_sync_single_for_device(rsp->queue->dev->device, | |
575 | rsp->send_sge.addr, rsp->send_sge.length, | |
576 | DMA_TO_DEVICE); | |
577 | ||
8f000cac CH |
578 | if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) { |
579 | pr_err("sending cmd response failed\n"); | |
580 | nvmet_rdma_release_rsp(rsp); | |
581 | } | |
582 | } | |
583 | ||
584 | static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) | |
585 | { | |
586 | struct nvmet_rdma_rsp *rsp = | |
587 | container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe); | |
588 | struct nvmet_rdma_queue *queue = cq->cq_context; | |
589 | ||
590 | WARN_ON(rsp->n_rdma <= 0); | |
591 | atomic_add(rsp->n_rdma, &queue->sq_wr_avail); | |
592 | rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, | |
593 | queue->cm_id->port_num, rsp->req.sg, | |
594 | rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); | |
595 | rsp->n_rdma = 0; | |
596 | ||
597 | if (unlikely(wc->status != IB_WC_SUCCESS)) { | |
549f01ae | 598 | nvmet_req_uninit(&rsp->req); |
8f000cac CH |
599 | nvmet_rdma_release_rsp(rsp); |
600 | if (wc->status != IB_WC_WR_FLUSH_ERR) { | |
601 | pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n", | |
602 | wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); | |
603 | nvmet_rdma_error_comp(queue); | |
604 | } | |
605 | return; | |
606 | } | |
607 | ||
5e62d5c9 | 608 | nvmet_req_execute(&rsp->req); |
8f000cac CH |
609 | } |
610 | ||
611 | static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len, | |
612 | u64 off) | |
613 | { | |
614 | sg_init_table(&rsp->cmd->inline_sg, 1); | |
615 | sg_set_page(&rsp->cmd->inline_sg, rsp->cmd->inline_page, len, off); | |
616 | rsp->req.sg = &rsp->cmd->inline_sg; | |
617 | rsp->req.sg_cnt = 1; | |
618 | } | |
619 | ||
620 | static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp) | |
621 | { | |
622 | struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl; | |
623 | u64 off = le64_to_cpu(sgl->addr); | |
624 | u32 len = le32_to_cpu(sgl->length); | |
625 | ||
626 | if (!nvme_is_write(rsp->req.cmd)) | |
627 | return NVME_SC_INVALID_FIELD | NVME_SC_DNR; | |
628 | ||
629 | if (off + len > NVMET_RDMA_INLINE_DATA_SIZE) { | |
630 | pr_err("invalid inline data offset!\n"); | |
631 | return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; | |
632 | } | |
633 | ||
634 | /* no data command? */ | |
635 | if (!len) | |
636 | return 0; | |
637 | ||
638 | nvmet_rdma_use_inline_sg(rsp, len, off); | |
639 | rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA; | |
5e62d5c9 | 640 | rsp->req.transfer_len += len; |
8f000cac CH |
641 | return 0; |
642 | } | |
643 | ||
644 | static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, | |
645 | struct nvme_keyed_sgl_desc *sgl, bool invalidate) | |
646 | { | |
647 | struct rdma_cm_id *cm_id = rsp->queue->cm_id; | |
648 | u64 addr = le64_to_cpu(sgl->addr); | |
649 | u32 len = get_unaligned_le24(sgl->length); | |
650 | u32 key = get_unaligned_le32(sgl->key); | |
651 | int ret; | |
652 | u16 status; | |
653 | ||
654 | /* no data command? */ | |
655 | if (!len) | |
656 | return 0; | |
657 | ||
40e64e07 SG |
658 | status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt, |
659 | len); | |
660 | if (status) | |
661 | return status; | |
8f000cac CH |
662 | |
663 | ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, | |
664 | rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, | |
665 | nvmet_data_dir(&rsp->req)); | |
666 | if (ret < 0) | |
667 | return NVME_SC_INTERNAL; | |
5e62d5c9 | 668 | rsp->req.transfer_len += len; |
8f000cac CH |
669 | rsp->n_rdma += ret; |
670 | ||
671 | if (invalidate) { | |
672 | rsp->invalidate_rkey = key; | |
673 | rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY; | |
674 | } | |
675 | ||
676 | return 0; | |
677 | } | |
678 | ||
679 | static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) | |
680 | { | |
681 | struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl; | |
682 | ||
683 | switch (sgl->type >> 4) { | |
684 | case NVME_SGL_FMT_DATA_DESC: | |
685 | switch (sgl->type & 0xf) { | |
686 | case NVME_SGL_FMT_OFFSET: | |
687 | return nvmet_rdma_map_sgl_inline(rsp); | |
688 | default: | |
689 | pr_err("invalid SGL subtype: %#x\n", sgl->type); | |
690 | return NVME_SC_INVALID_FIELD | NVME_SC_DNR; | |
691 | } | |
692 | case NVME_KEY_SGL_FMT_DATA_DESC: | |
693 | switch (sgl->type & 0xf) { | |
694 | case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE: | |
695 | return nvmet_rdma_map_sgl_keyed(rsp, sgl, true); | |
696 | case NVME_SGL_FMT_ADDRESS: | |
697 | return nvmet_rdma_map_sgl_keyed(rsp, sgl, false); | |
698 | default: | |
699 | pr_err("invalid SGL subtype: %#x\n", sgl->type); | |
700 | return NVME_SC_INVALID_FIELD | NVME_SC_DNR; | |
701 | } | |
702 | default: | |
703 | pr_err("invalid SGL type: %#x\n", sgl->type); | |
704 | return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR; | |
705 | } | |
706 | } | |
707 | ||
708 | static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) | |
709 | { | |
710 | struct nvmet_rdma_queue *queue = rsp->queue; | |
711 | ||
712 | if (unlikely(atomic_sub_return(1 + rsp->n_rdma, | |
713 | &queue->sq_wr_avail) < 0)) { | |
714 | pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n", | |
715 | 1 + rsp->n_rdma, queue->idx, | |
716 | queue->nvme_sq.ctrl->cntlid); | |
717 | atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); | |
718 | return false; | |
719 | } | |
720 | ||
721 | if (nvmet_rdma_need_data_in(rsp)) { | |
722 | if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp, | |
723 | queue->cm_id->port_num, &rsp->read_cqe, NULL)) | |
724 | nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); | |
725 | } else { | |
5e62d5c9 | 726 | nvmet_req_execute(&rsp->req); |
8f000cac CH |
727 | } |
728 | ||
729 | return true; | |
730 | } | |
731 | ||
732 | static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, | |
733 | struct nvmet_rdma_rsp *cmd) | |
734 | { | |
735 | u16 status; | |
736 | ||
748ff840 PP |
737 | ib_dma_sync_single_for_cpu(queue->dev->device, |
738 | cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, | |
739 | DMA_FROM_DEVICE); | |
740 | ib_dma_sync_single_for_cpu(queue->dev->device, | |
741 | cmd->send_sge.addr, cmd->send_sge.length, | |
742 | DMA_TO_DEVICE); | |
743 | ||
8f000cac CH |
744 | if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, |
745 | &queue->nvme_sq, &nvmet_rdma_ops)) | |
746 | return; | |
747 | ||
748 | status = nvmet_rdma_map_sgl(cmd); | |
749 | if (status) | |
750 | goto out_err; | |
751 | ||
752 | if (unlikely(!nvmet_rdma_execute_command(cmd))) { | |
753 | spin_lock(&queue->rsp_wr_wait_lock); | |
754 | list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list); | |
755 | spin_unlock(&queue->rsp_wr_wait_lock); | |
756 | } | |
757 | ||
758 | return; | |
759 | ||
760 | out_err: | |
761 | nvmet_req_complete(&cmd->req, status); | |
762 | } | |
763 | ||
764 | static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) | |
765 | { | |
766 | struct nvmet_rdma_cmd *cmd = | |
767 | container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe); | |
768 | struct nvmet_rdma_queue *queue = cq->cq_context; | |
769 | struct nvmet_rdma_rsp *rsp; | |
770 | ||
771 | if (unlikely(wc->status != IB_WC_SUCCESS)) { | |
772 | if (wc->status != IB_WC_WR_FLUSH_ERR) { | |
773 | pr_err("RECV for CQE 0x%p failed with status %s (%d)\n", | |
774 | wc->wr_cqe, ib_wc_status_msg(wc->status), | |
775 | wc->status); | |
776 | nvmet_rdma_error_comp(queue); | |
777 | } | |
778 | return; | |
779 | } | |
780 | ||
781 | if (unlikely(wc->byte_len < sizeof(struct nvme_command))) { | |
782 | pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n"); | |
783 | nvmet_rdma_error_comp(queue); | |
784 | return; | |
785 | } | |
786 | ||
787 | cmd->queue = queue; | |
788 | rsp = nvmet_rdma_get_rsp(queue); | |
701e06eb SG |
789 | if (unlikely(!rsp)) { |
790 | /* | |
791 | * we get here only under memory pressure, | |
792 | * silently drop and have the host retry | |
793 | * as we can't even fail it. | |
794 | */ | |
795 | nvmet_rdma_post_recv(queue->dev, cmd); | |
796 | return; | |
797 | } | |
8d61413d | 798 | rsp->queue = queue; |
8f000cac CH |
799 | rsp->cmd = cmd; |
800 | rsp->flags = 0; | |
801 | rsp->req.cmd = cmd->nvme_cmd; | |
8d61413d SG |
802 | rsp->req.port = queue->port; |
803 | rsp->n_rdma = 0; | |
8f000cac CH |
804 | |
805 | if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { | |
806 | unsigned long flags; | |
807 | ||
808 | spin_lock_irqsave(&queue->state_lock, flags); | |
809 | if (queue->state == NVMET_RDMA_Q_CONNECTING) | |
810 | list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); | |
811 | else | |
812 | nvmet_rdma_put_rsp(rsp); | |
813 | spin_unlock_irqrestore(&queue->state_lock, flags); | |
814 | return; | |
815 | } | |
816 | ||
817 | nvmet_rdma_handle_command(queue, rsp); | |
818 | } | |
819 | ||
820 | static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev) | |
821 | { | |
822 | if (!ndev->srq) | |
823 | return; | |
824 | ||
825 | nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false); | |
826 | ib_destroy_srq(ndev->srq); | |
827 | } | |
828 | ||
829 | static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev) | |
830 | { | |
831 | struct ib_srq_init_attr srq_attr = { NULL, }; | |
832 | struct ib_srq *srq; | |
833 | size_t srq_size; | |
834 | int ret, i; | |
835 | ||
836 | srq_size = 4095; /* XXX: tune */ | |
837 | ||
838 | srq_attr.attr.max_wr = srq_size; | |
839 | srq_attr.attr.max_sge = 2; | |
840 | srq_attr.attr.srq_limit = 0; | |
841 | srq_attr.srq_type = IB_SRQT_BASIC; | |
842 | srq = ib_create_srq(ndev->pd, &srq_attr); | |
843 | if (IS_ERR(srq)) { | |
844 | /* | |
845 | * If SRQs aren't supported we just go ahead and use normal | |
846 | * non-shared receive queues. | |
847 | */ | |
848 | pr_info("SRQ requested but not supported.\n"); | |
849 | return 0; | |
850 | } | |
851 | ||
852 | ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false); | |
853 | if (IS_ERR(ndev->srq_cmds)) { | |
854 | ret = PTR_ERR(ndev->srq_cmds); | |
855 | goto out_destroy_srq; | |
856 | } | |
857 | ||
858 | ndev->srq = srq; | |
859 | ndev->srq_size = srq_size; | |
860 | ||
861 | for (i = 0; i < srq_size; i++) | |
862 | nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]); | |
863 | ||
864 | return 0; | |
865 | ||
866 | out_destroy_srq: | |
867 | ib_destroy_srq(srq); | |
868 | return ret; | |
869 | } | |
870 | ||
871 | static void nvmet_rdma_free_dev(struct kref *ref) | |
872 | { | |
873 | struct nvmet_rdma_device *ndev = | |
874 | container_of(ref, struct nvmet_rdma_device, ref); | |
875 | ||
876 | mutex_lock(&device_list_mutex); | |
877 | list_del(&ndev->entry); | |
878 | mutex_unlock(&device_list_mutex); | |
879 | ||
880 | nvmet_rdma_destroy_srq(ndev); | |
881 | ib_dealloc_pd(ndev->pd); | |
882 | ||
883 | kfree(ndev); | |
884 | } | |
885 | ||
886 | static struct nvmet_rdma_device * | |
887 | nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id) | |
888 | { | |
889 | struct nvmet_rdma_device *ndev; | |
890 | int ret; | |
891 | ||
892 | mutex_lock(&device_list_mutex); | |
893 | list_for_each_entry(ndev, &device_list, entry) { | |
894 | if (ndev->device->node_guid == cm_id->device->node_guid && | |
895 | kref_get_unless_zero(&ndev->ref)) | |
896 | goto out_unlock; | |
897 | } | |
898 | ||
899 | ndev = kzalloc(sizeof(*ndev), GFP_KERNEL); | |
900 | if (!ndev) | |
901 | goto out_err; | |
902 | ||
903 | ndev->device = cm_id->device; | |
904 | kref_init(&ndev->ref); | |
905 | ||
ed082d36 | 906 | ndev->pd = ib_alloc_pd(ndev->device, 0); |
8f000cac CH |
907 | if (IS_ERR(ndev->pd)) |
908 | goto out_free_dev; | |
909 | ||
910 | if (nvmet_rdma_use_srq) { | |
911 | ret = nvmet_rdma_init_srq(ndev); | |
912 | if (ret) | |
913 | goto out_free_pd; | |
914 | } | |
915 | ||
916 | list_add(&ndev->entry, &device_list); | |
917 | out_unlock: | |
918 | mutex_unlock(&device_list_mutex); | |
919 | pr_debug("added %s.\n", ndev->device->name); | |
920 | return ndev; | |
921 | ||
922 | out_free_pd: | |
923 | ib_dealloc_pd(ndev->pd); | |
924 | out_free_dev: | |
925 | kfree(ndev); | |
926 | out_err: | |
927 | mutex_unlock(&device_list_mutex); | |
928 | return NULL; | |
929 | } | |
930 | ||
931 | static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) | |
932 | { | |
933 | struct ib_qp_init_attr qp_attr; | |
934 | struct nvmet_rdma_device *ndev = queue->dev; | |
935 | int comp_vector, nr_cqe, ret, i; | |
936 | ||
937 | /* | |
938 | * Spread the io queues across completion vectors, | |
939 | * but still keep all admin queues on vector 0. | |
940 | */ | |
941 | comp_vector = !queue->host_qid ? 0 : | |
942 | queue->idx % ndev->device->num_comp_vectors; | |
943 | ||
944 | /* | |
945 | * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND. | |
946 | */ | |
947 | nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size; | |
948 | ||
949 | queue->cq = ib_alloc_cq(ndev->device, queue, | |
950 | nr_cqe + 1, comp_vector, | |
951 | IB_POLL_WORKQUEUE); | |
952 | if (IS_ERR(queue->cq)) { | |
953 | ret = PTR_ERR(queue->cq); | |
954 | pr_err("failed to create CQ cqe= %d ret= %d\n", | |
955 | nr_cqe + 1, ret); | |
956 | goto out; | |
957 | } | |
958 | ||
959 | memset(&qp_attr, 0, sizeof(qp_attr)); | |
960 | qp_attr.qp_context = queue; | |
961 | qp_attr.event_handler = nvmet_rdma_qp_event; | |
962 | qp_attr.send_cq = queue->cq; | |
963 | qp_attr.recv_cq = queue->cq; | |
964 | qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; | |
965 | qp_attr.qp_type = IB_QPT_RC; | |
966 | /* +1 for drain */ | |
967 | qp_attr.cap.max_send_wr = queue->send_queue_size + 1; | |
968 | qp_attr.cap.max_rdma_ctxs = queue->send_queue_size; | |
969 | qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd, | |
970 | ndev->device->attrs.max_sge); | |
971 | ||
972 | if (ndev->srq) { | |
973 | qp_attr.srq = ndev->srq; | |
974 | } else { | |
975 | /* +1 for drain */ | |
976 | qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size; | |
977 | qp_attr.cap.max_recv_sge = 2; | |
978 | } | |
979 | ||
980 | ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr); | |
981 | if (ret) { | |
982 | pr_err("failed to create_qp ret= %d\n", ret); | |
983 | goto err_destroy_cq; | |
984 | } | |
985 | ||
986 | atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr); | |
987 | ||
988 | pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n", | |
989 | __func__, queue->cq->cqe, qp_attr.cap.max_send_sge, | |
990 | qp_attr.cap.max_send_wr, queue->cm_id); | |
991 | ||
992 | if (!ndev->srq) { | |
993 | for (i = 0; i < queue->recv_queue_size; i++) { | |
994 | queue->cmds[i].queue = queue; | |
995 | nvmet_rdma_post_recv(ndev, &queue->cmds[i]); | |
996 | } | |
997 | } | |
998 | ||
999 | out: | |
1000 | return ret; | |
1001 | ||
1002 | err_destroy_cq: | |
1003 | ib_free_cq(queue->cq); | |
1004 | goto out; | |
1005 | } | |
1006 | ||
1007 | static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) | |
1008 | { | |
14c862db | 1009 | ib_drain_qp(queue->cm_id->qp); |
8f000cac CH |
1010 | rdma_destroy_qp(queue->cm_id); |
1011 | ib_free_cq(queue->cq); | |
1012 | } | |
1013 | ||
1014 | static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) | |
1015 | { | |
1016 | pr_info("freeing queue %d\n", queue->idx); | |
1017 | ||
1018 | nvmet_sq_destroy(&queue->nvme_sq); | |
1019 | ||
1020 | nvmet_rdma_destroy_queue_ib(queue); | |
1021 | if (!queue->dev->srq) { | |
1022 | nvmet_rdma_free_cmds(queue->dev, queue->cmds, | |
1023 | queue->recv_queue_size, | |
1024 | !queue->host_qid); | |
1025 | } | |
1026 | nvmet_rdma_free_rsps(queue); | |
1027 | ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); | |
1028 | kfree(queue); | |
1029 | } | |
1030 | ||
1031 | static void nvmet_rdma_release_queue_work(struct work_struct *w) | |
1032 | { | |
1033 | struct nvmet_rdma_queue *queue = | |
1034 | container_of(w, struct nvmet_rdma_queue, release_work); | |
1035 | struct rdma_cm_id *cm_id = queue->cm_id; | |
1036 | struct nvmet_rdma_device *dev = queue->dev; | |
3256aaef | 1037 | enum nvmet_rdma_queue_state state = queue->state; |
8f000cac CH |
1038 | |
1039 | nvmet_rdma_free_queue(queue); | |
d8f7750a | 1040 | |
3256aaef | 1041 | if (state != NVMET_RDMA_IN_DEVICE_REMOVAL) |
d8f7750a SG |
1042 | rdma_destroy_id(cm_id); |
1043 | ||
8f000cac CH |
1044 | kref_put(&dev->ref, nvmet_rdma_free_dev); |
1045 | } | |
1046 | ||
1047 | static int | |
1048 | nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn, | |
1049 | struct nvmet_rdma_queue *queue) | |
1050 | { | |
1051 | struct nvme_rdma_cm_req *req; | |
1052 | ||
1053 | req = (struct nvme_rdma_cm_req *)conn->private_data; | |
1054 | if (!req || conn->private_data_len == 0) | |
1055 | return NVME_RDMA_CM_INVALID_LEN; | |
1056 | ||
1057 | if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0) | |
1058 | return NVME_RDMA_CM_INVALID_RECFMT; | |
1059 | ||
1060 | queue->host_qid = le16_to_cpu(req->qid); | |
1061 | ||
1062 | /* | |
b825b44c | 1063 | * req->hsqsize corresponds to our recv queue size plus 1 |
8f000cac CH |
1064 | * req->hrqsize corresponds to our send queue size |
1065 | */ | |
b825b44c | 1066 | queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1; |
8f000cac CH |
1067 | queue->send_queue_size = le16_to_cpu(req->hrqsize); |
1068 | ||
7aa1f427 | 1069 | if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH) |
8f000cac CH |
1070 | return NVME_RDMA_CM_INVALID_HSQSIZE; |
1071 | ||
1072 | /* XXX: Should we enforce some kind of max for IO queues? */ | |
1073 | ||
1074 | return 0; | |
1075 | } | |
1076 | ||
1077 | static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id, | |
1078 | enum nvme_rdma_cm_status status) | |
1079 | { | |
1080 | struct nvme_rdma_cm_rej rej; | |
1081 | ||
7a01a6ea MG |
1082 | pr_debug("rejecting connect request: status %d (%s)\n", |
1083 | status, nvme_rdma_cm_msg(status)); | |
1084 | ||
8f000cac CH |
1085 | rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); |
1086 | rej.sts = cpu_to_le16(status); | |
1087 | ||
1088 | return rdma_reject(cm_id, (void *)&rej, sizeof(rej)); | |
1089 | } | |
1090 | ||
1091 | static struct nvmet_rdma_queue * | |
1092 | nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, | |
1093 | struct rdma_cm_id *cm_id, | |
1094 | struct rdma_cm_event *event) | |
1095 | { | |
1096 | struct nvmet_rdma_queue *queue; | |
1097 | int ret; | |
1098 | ||
1099 | queue = kzalloc(sizeof(*queue), GFP_KERNEL); | |
1100 | if (!queue) { | |
1101 | ret = NVME_RDMA_CM_NO_RSC; | |
1102 | goto out_reject; | |
1103 | } | |
1104 | ||
1105 | ret = nvmet_sq_init(&queue->nvme_sq); | |
70d4281c BVA |
1106 | if (ret) { |
1107 | ret = NVME_RDMA_CM_NO_RSC; | |
8f000cac | 1108 | goto out_free_queue; |
70d4281c | 1109 | } |
8f000cac CH |
1110 | |
1111 | ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue); | |
1112 | if (ret) | |
1113 | goto out_destroy_sq; | |
1114 | ||
1115 | /* | |
1116 | * Schedules the actual release because calling rdma_destroy_id from | |
1117 | * inside a CM callback would trigger a deadlock. (great API design..) | |
1118 | */ | |
1119 | INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work); | |
1120 | queue->dev = ndev; | |
1121 | queue->cm_id = cm_id; | |
1122 | ||
1123 | spin_lock_init(&queue->state_lock); | |
1124 | queue->state = NVMET_RDMA_Q_CONNECTING; | |
1125 | INIT_LIST_HEAD(&queue->rsp_wait_list); | |
1126 | INIT_LIST_HEAD(&queue->rsp_wr_wait_list); | |
1127 | spin_lock_init(&queue->rsp_wr_wait_lock); | |
1128 | INIT_LIST_HEAD(&queue->free_rsps); | |
1129 | spin_lock_init(&queue->rsps_lock); | |
766dbb17 | 1130 | INIT_LIST_HEAD(&queue->queue_list); |
8f000cac CH |
1131 | |
1132 | queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); | |
1133 | if (queue->idx < 0) { | |
1134 | ret = NVME_RDMA_CM_NO_RSC; | |
6ccaeb56 | 1135 | goto out_destroy_sq; |
8f000cac CH |
1136 | } |
1137 | ||
1138 | ret = nvmet_rdma_alloc_rsps(queue); | |
1139 | if (ret) { | |
1140 | ret = NVME_RDMA_CM_NO_RSC; | |
1141 | goto out_ida_remove; | |
1142 | } | |
1143 | ||
1144 | if (!ndev->srq) { | |
1145 | queue->cmds = nvmet_rdma_alloc_cmds(ndev, | |
1146 | queue->recv_queue_size, | |
1147 | !queue->host_qid); | |
1148 | if (IS_ERR(queue->cmds)) { | |
1149 | ret = NVME_RDMA_CM_NO_RSC; | |
1150 | goto out_free_responses; | |
1151 | } | |
1152 | } | |
1153 | ||
1154 | ret = nvmet_rdma_create_queue_ib(queue); | |
1155 | if (ret) { | |
1156 | pr_err("%s: creating RDMA queue failed (%d).\n", | |
1157 | __func__, ret); | |
1158 | ret = NVME_RDMA_CM_NO_RSC; | |
1159 | goto out_free_cmds; | |
1160 | } | |
1161 | ||
1162 | return queue; | |
1163 | ||
1164 | out_free_cmds: | |
1165 | if (!ndev->srq) { | |
1166 | nvmet_rdma_free_cmds(queue->dev, queue->cmds, | |
1167 | queue->recv_queue_size, | |
1168 | !queue->host_qid); | |
1169 | } | |
1170 | out_free_responses: | |
1171 | nvmet_rdma_free_rsps(queue); | |
1172 | out_ida_remove: | |
1173 | ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); | |
1174 | out_destroy_sq: | |
1175 | nvmet_sq_destroy(&queue->nvme_sq); | |
1176 | out_free_queue: | |
1177 | kfree(queue); | |
1178 | out_reject: | |
1179 | nvmet_rdma_cm_reject(cm_id, ret); | |
1180 | return NULL; | |
1181 | } | |
1182 | ||
1183 | static void nvmet_rdma_qp_event(struct ib_event *event, void *priv) | |
1184 | { | |
1185 | struct nvmet_rdma_queue *queue = priv; | |
1186 | ||
1187 | switch (event->event) { | |
1188 | case IB_EVENT_COMM_EST: | |
1189 | rdma_notify(queue->cm_id, event->event); | |
1190 | break; | |
1191 | default: | |
675796be MG |
1192 | pr_err("received IB QP event: %s (%d)\n", |
1193 | ib_event_msg(event->event), event->event); | |
8f000cac CH |
1194 | break; |
1195 | } | |
1196 | } | |
1197 | ||
1198 | static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id, | |
1199 | struct nvmet_rdma_queue *queue, | |
1200 | struct rdma_conn_param *p) | |
1201 | { | |
1202 | struct rdma_conn_param param = { }; | |
1203 | struct nvme_rdma_cm_rep priv = { }; | |
1204 | int ret = -ENOMEM; | |
1205 | ||
1206 | param.rnr_retry_count = 7; | |
1207 | param.flow_control = 1; | |
1208 | param.initiator_depth = min_t(u8, p->initiator_depth, | |
1209 | queue->dev->device->attrs.max_qp_init_rd_atom); | |
1210 | param.private_data = &priv; | |
1211 | param.private_data_len = sizeof(priv); | |
1212 | priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); | |
1213 | priv.crqsize = cpu_to_le16(queue->recv_queue_size); | |
1214 | ||
1215 | ret = rdma_accept(cm_id, ¶m); | |
1216 | if (ret) | |
1217 | pr_err("rdma_accept failed (error code = %d)\n", ret); | |
1218 | ||
1219 | return ret; | |
1220 | } | |
1221 | ||
1222 | static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, | |
1223 | struct rdma_cm_event *event) | |
1224 | { | |
1225 | struct nvmet_rdma_device *ndev; | |
1226 | struct nvmet_rdma_queue *queue; | |
1227 | int ret = -EINVAL; | |
1228 | ||
1229 | ndev = nvmet_rdma_find_get_device(cm_id); | |
1230 | if (!ndev) { | |
8f000cac CH |
1231 | nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC); |
1232 | return -ECONNREFUSED; | |
1233 | } | |
1234 | ||
1235 | queue = nvmet_rdma_alloc_queue(ndev, cm_id, event); | |
1236 | if (!queue) { | |
1237 | ret = -ENOMEM; | |
1238 | goto put_device; | |
1239 | } | |
1240 | queue->port = cm_id->context; | |
1241 | ||
777dc823 SG |
1242 | if (queue->host_qid == 0) { |
1243 | /* Let inflight controller teardown complete */ | |
1244 | flush_scheduled_work(); | |
1245 | } | |
1246 | ||
8f000cac CH |
1247 | ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); |
1248 | if (ret) | |
1249 | goto release_queue; | |
1250 | ||
1251 | mutex_lock(&nvmet_rdma_queue_mutex); | |
1252 | list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list); | |
1253 | mutex_unlock(&nvmet_rdma_queue_mutex); | |
1254 | ||
1255 | return 0; | |
1256 | ||
1257 | release_queue: | |
1258 | nvmet_rdma_free_queue(queue); | |
1259 | put_device: | |
1260 | kref_put(&ndev->ref, nvmet_rdma_free_dev); | |
1261 | ||
1262 | return ret; | |
1263 | } | |
1264 | ||
1265 | static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue) | |
1266 | { | |
1267 | unsigned long flags; | |
1268 | ||
1269 | spin_lock_irqsave(&queue->state_lock, flags); | |
1270 | if (queue->state != NVMET_RDMA_Q_CONNECTING) { | |
1271 | pr_warn("trying to establish a connected queue\n"); | |
1272 | goto out_unlock; | |
1273 | } | |
1274 | queue->state = NVMET_RDMA_Q_LIVE; | |
1275 | ||
1276 | while (!list_empty(&queue->rsp_wait_list)) { | |
1277 | struct nvmet_rdma_rsp *cmd; | |
1278 | ||
1279 | cmd = list_first_entry(&queue->rsp_wait_list, | |
1280 | struct nvmet_rdma_rsp, wait_list); | |
1281 | list_del(&cmd->wait_list); | |
1282 | ||
1283 | spin_unlock_irqrestore(&queue->state_lock, flags); | |
1284 | nvmet_rdma_handle_command(queue, cmd); | |
1285 | spin_lock_irqsave(&queue->state_lock, flags); | |
1286 | } | |
1287 | ||
1288 | out_unlock: | |
1289 | spin_unlock_irqrestore(&queue->state_lock, flags); | |
1290 | } | |
1291 | ||
1292 | static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) | |
1293 | { | |
1294 | bool disconnect = false; | |
1295 | unsigned long flags; | |
1296 | ||
1297 | pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state); | |
1298 | ||
1299 | spin_lock_irqsave(&queue->state_lock, flags); | |
1300 | switch (queue->state) { | |
1301 | case NVMET_RDMA_Q_CONNECTING: | |
1302 | case NVMET_RDMA_Q_LIVE: | |
8f000cac | 1303 | queue->state = NVMET_RDMA_Q_DISCONNECTING; |
d8f7750a SG |
1304 | case NVMET_RDMA_IN_DEVICE_REMOVAL: |
1305 | disconnect = true; | |
8f000cac CH |
1306 | break; |
1307 | case NVMET_RDMA_Q_DISCONNECTING: | |
1308 | break; | |
1309 | } | |
1310 | spin_unlock_irqrestore(&queue->state_lock, flags); | |
1311 | ||
1312 | if (disconnect) { | |
1313 | rdma_disconnect(queue->cm_id); | |
8f000cac CH |
1314 | schedule_work(&queue->release_work); |
1315 | } | |
1316 | } | |
1317 | ||
1318 | static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) | |
1319 | { | |
1320 | bool disconnect = false; | |
1321 | ||
1322 | mutex_lock(&nvmet_rdma_queue_mutex); | |
1323 | if (!list_empty(&queue->queue_list)) { | |
1324 | list_del_init(&queue->queue_list); | |
1325 | disconnect = true; | |
1326 | } | |
1327 | mutex_unlock(&nvmet_rdma_queue_mutex); | |
1328 | ||
1329 | if (disconnect) | |
1330 | __nvmet_rdma_queue_disconnect(queue); | |
1331 | } | |
1332 | ||
1333 | static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, | |
1334 | struct nvmet_rdma_queue *queue) | |
1335 | { | |
1336 | WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); | |
1337 | ||
766dbb17 SG |
1338 | mutex_lock(&nvmet_rdma_queue_mutex); |
1339 | if (!list_empty(&queue->queue_list)) | |
1340 | list_del_init(&queue->queue_list); | |
1341 | mutex_unlock(&nvmet_rdma_queue_mutex); | |
1342 | ||
1343 | pr_err("failed to connect queue %d\n", queue->idx); | |
8f000cac CH |
1344 | schedule_work(&queue->release_work); |
1345 | } | |
1346 | ||
d8f7750a SG |
1347 | /** |
1348 | * nvme_rdma_device_removal() - Handle RDMA device removal | |
f1d4ef7d | 1349 | * @cm_id: rdma_cm id, used for nvmet port |
d8f7750a | 1350 | * @queue: nvmet rdma queue (cm id qp_context) |
d8f7750a SG |
1351 | * |
1352 | * DEVICE_REMOVAL event notifies us that the RDMA device is about | |
f1d4ef7d SG |
1353 | * to unplug. Note that this event can be generated on a normal |
1354 | * queue cm_id and/or a device bound listener cm_id (where in this | |
1355 | * case queue will be null). | |
d8f7750a | 1356 | * |
f1d4ef7d SG |
1357 | * We registered an ib_client to handle device removal for queues, |
1358 | * so we only need to handle the listening port cm_ids. In this case | |
d8f7750a SG |
1359 | * we nullify the priv to prevent double cm_id destruction and destroying |
1360 | * the cm_id implicitely by returning a non-zero rc to the callout. | |
1361 | */ | |
1362 | static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, | |
1363 | struct nvmet_rdma_queue *queue) | |
1364 | { | |
f1d4ef7d | 1365 | struct nvmet_port *port; |
d8f7750a | 1366 | |
f1d4ef7d | 1367 | if (queue) { |
d8f7750a | 1368 | /* |
f1d4ef7d SG |
1369 | * This is a queue cm_id. we have registered |
1370 | * an ib_client to handle queues removal | |
1371 | * so don't interfear and just return. | |
d8f7750a | 1372 | */ |
f1d4ef7d | 1373 | return 0; |
d8f7750a SG |
1374 | } |
1375 | ||
f1d4ef7d SG |
1376 | port = cm_id->context; |
1377 | ||
1378 | /* | |
1379 | * This is a listener cm_id. Make sure that | |
1380 | * future remove_port won't invoke a double | |
1381 | * cm_id destroy. use atomic xchg to make sure | |
1382 | * we don't compete with remove_port. | |
1383 | */ | |
1384 | if (xchg(&port->priv, NULL) != cm_id) | |
1385 | return 0; | |
1386 | ||
d8f7750a SG |
1387 | /* |
1388 | * We need to return 1 so that the core will destroy | |
1389 | * it's own ID. What a great API design.. | |
1390 | */ | |
1391 | return 1; | |
1392 | } | |
1393 | ||
8f000cac CH |
1394 | static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, |
1395 | struct rdma_cm_event *event) | |
1396 | { | |
1397 | struct nvmet_rdma_queue *queue = NULL; | |
1398 | int ret = 0; | |
1399 | ||
1400 | if (cm_id->qp) | |
1401 | queue = cm_id->qp->qp_context; | |
1402 | ||
1403 | pr_debug("%s (%d): status %d id %p\n", | |
1404 | rdma_event_msg(event->event), event->event, | |
1405 | event->status, cm_id); | |
1406 | ||
1407 | switch (event->event) { | |
1408 | case RDMA_CM_EVENT_CONNECT_REQUEST: | |
1409 | ret = nvmet_rdma_queue_connect(cm_id, event); | |
1410 | break; | |
1411 | case RDMA_CM_EVENT_ESTABLISHED: | |
1412 | nvmet_rdma_queue_established(queue); | |
1413 | break; | |
1414 | case RDMA_CM_EVENT_ADDR_CHANGE: | |
1415 | case RDMA_CM_EVENT_DISCONNECTED: | |
8f000cac | 1416 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: |
fa14a0ac BVA |
1417 | /* |
1418 | * We might end up here when we already freed the qp | |
1419 | * which means queue release sequence is in progress, | |
1420 | * so don't get in the way... | |
1421 | */ | |
1422 | if (queue) | |
1423 | nvmet_rdma_queue_disconnect(queue); | |
d8f7750a SG |
1424 | break; |
1425 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | |
1426 | ret = nvmet_rdma_device_removal(cm_id, queue); | |
8f000cac CH |
1427 | break; |
1428 | case RDMA_CM_EVENT_REJECTED: | |
512fb1b3 SW |
1429 | pr_debug("Connection rejected: %s\n", |
1430 | rdma_reject_msg(cm_id, event->status)); | |
1431 | /* FALLTHROUGH */ | |
8f000cac CH |
1432 | case RDMA_CM_EVENT_UNREACHABLE: |
1433 | case RDMA_CM_EVENT_CONNECT_ERROR: | |
1434 | nvmet_rdma_queue_connect_fail(cm_id, queue); | |
1435 | break; | |
1436 | default: | |
1437 | pr_err("received unrecognized RDMA CM event %d\n", | |
1438 | event->event); | |
1439 | break; | |
1440 | } | |
1441 | ||
1442 | return ret; | |
1443 | } | |
1444 | ||
1445 | static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl) | |
1446 | { | |
1447 | struct nvmet_rdma_queue *queue; | |
1448 | ||
1449 | restart: | |
1450 | mutex_lock(&nvmet_rdma_queue_mutex); | |
1451 | list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) { | |
1452 | if (queue->nvme_sq.ctrl == ctrl) { | |
1453 | list_del_init(&queue->queue_list); | |
1454 | mutex_unlock(&nvmet_rdma_queue_mutex); | |
1455 | ||
1456 | __nvmet_rdma_queue_disconnect(queue); | |
1457 | goto restart; | |
1458 | } | |
1459 | } | |
1460 | mutex_unlock(&nvmet_rdma_queue_mutex); | |
1461 | } | |
1462 | ||
1463 | static int nvmet_rdma_add_port(struct nvmet_port *port) | |
1464 | { | |
1465 | struct rdma_cm_id *cm_id; | |
670c2a3a SG |
1466 | struct sockaddr_storage addr = { }; |
1467 | __kernel_sa_family_t af; | |
8f000cac CH |
1468 | int ret; |
1469 | ||
1470 | switch (port->disc_addr.adrfam) { | |
1471 | case NVMF_ADDR_FAMILY_IP4: | |
670c2a3a SG |
1472 | af = AF_INET; |
1473 | break; | |
1474 | case NVMF_ADDR_FAMILY_IP6: | |
1475 | af = AF_INET6; | |
8f000cac CH |
1476 | break; |
1477 | default: | |
1478 | pr_err("address family %d not supported\n", | |
1479 | port->disc_addr.adrfam); | |
1480 | return -EINVAL; | |
1481 | } | |
1482 | ||
670c2a3a SG |
1483 | ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr, |
1484 | port->disc_addr.trsvcid, &addr); | |
1485 | if (ret) { | |
1486 | pr_err("malformed ip/port passed: %s:%s\n", | |
1487 | port->disc_addr.traddr, port->disc_addr.trsvcid); | |
8f000cac | 1488 | return ret; |
670c2a3a | 1489 | } |
8f000cac CH |
1490 | |
1491 | cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port, | |
1492 | RDMA_PS_TCP, IB_QPT_RC); | |
1493 | if (IS_ERR(cm_id)) { | |
1494 | pr_err("CM ID creation failed\n"); | |
1495 | return PTR_ERR(cm_id); | |
1496 | } | |
1497 | ||
670c2a3a SG |
1498 | /* |
1499 | * Allow both IPv4 and IPv6 sockets to bind a single port | |
1500 | * at the same time. | |
1501 | */ | |
1502 | ret = rdma_set_afonly(cm_id, 1); | |
1503 | if (ret) { | |
1504 | pr_err("rdma_set_afonly failed (%d)\n", ret); | |
1505 | goto out_destroy_id; | |
1506 | } | |
1507 | ||
1508 | ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr); | |
8f000cac | 1509 | if (ret) { |
670c2a3a SG |
1510 | pr_err("binding CM ID to %pISpcs failed (%d)\n", |
1511 | (struct sockaddr *)&addr, ret); | |
8f000cac CH |
1512 | goto out_destroy_id; |
1513 | } | |
1514 | ||
1515 | ret = rdma_listen(cm_id, 128); | |
1516 | if (ret) { | |
670c2a3a SG |
1517 | pr_err("listening to %pISpcs failed (%d)\n", |
1518 | (struct sockaddr *)&addr, ret); | |
8f000cac CH |
1519 | goto out_destroy_id; |
1520 | } | |
1521 | ||
670c2a3a SG |
1522 | pr_info("enabling port %d (%pISpcs)\n", |
1523 | le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr); | |
8f000cac CH |
1524 | port->priv = cm_id; |
1525 | return 0; | |
1526 | ||
1527 | out_destroy_id: | |
1528 | rdma_destroy_id(cm_id); | |
1529 | return ret; | |
1530 | } | |
1531 | ||
1532 | static void nvmet_rdma_remove_port(struct nvmet_port *port) | |
1533 | { | |
d8f7750a | 1534 | struct rdma_cm_id *cm_id = xchg(&port->priv, NULL); |
8f000cac | 1535 | |
d8f7750a SG |
1536 | if (cm_id) |
1537 | rdma_destroy_id(cm_id); | |
8f000cac CH |
1538 | } |
1539 | ||
1540 | static struct nvmet_fabrics_ops nvmet_rdma_ops = { | |
1541 | .owner = THIS_MODULE, | |
1542 | .type = NVMF_TRTYPE_RDMA, | |
1543 | .sqe_inline_size = NVMET_RDMA_INLINE_DATA_SIZE, | |
1544 | .msdbd = 1, | |
1545 | .has_keyed_sgls = 1, | |
1546 | .add_port = nvmet_rdma_add_port, | |
1547 | .remove_port = nvmet_rdma_remove_port, | |
1548 | .queue_response = nvmet_rdma_queue_response, | |
1549 | .delete_ctrl = nvmet_rdma_delete_ctrl, | |
1550 | }; | |
1551 | ||
f1d4ef7d SG |
1552 | static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data) |
1553 | { | |
43b92fd2 | 1554 | struct nvmet_rdma_queue *queue, *tmp; |
ab6f95f8 MG |
1555 | struct nvmet_rdma_device *ndev; |
1556 | bool found = false; | |
1557 | ||
1558 | mutex_lock(&device_list_mutex); | |
1559 | list_for_each_entry(ndev, &device_list, entry) { | |
1560 | if (ndev->device == ib_device) { | |
1561 | found = true; | |
1562 | break; | |
1563 | } | |
1564 | } | |
1565 | mutex_unlock(&device_list_mutex); | |
1566 | ||
1567 | if (!found) | |
1568 | return; | |
f1d4ef7d | 1569 | |
ab6f95f8 MG |
1570 | /* |
1571 | * IB Device that is used by nvmet controllers is being removed, | |
1572 | * delete all queues using this device. | |
1573 | */ | |
f1d4ef7d | 1574 | mutex_lock(&nvmet_rdma_queue_mutex); |
43b92fd2 IR |
1575 | list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, |
1576 | queue_list) { | |
f1d4ef7d SG |
1577 | if (queue->dev->device != ib_device) |
1578 | continue; | |
1579 | ||
1580 | pr_info("Removing queue %d\n", queue->idx); | |
43b92fd2 | 1581 | list_del_init(&queue->queue_list); |
f1d4ef7d SG |
1582 | __nvmet_rdma_queue_disconnect(queue); |
1583 | } | |
1584 | mutex_unlock(&nvmet_rdma_queue_mutex); | |
1585 | ||
1586 | flush_scheduled_work(); | |
1587 | } | |
1588 | ||
1589 | static struct ib_client nvmet_rdma_ib_client = { | |
1590 | .name = "nvmet_rdma", | |
f1d4ef7d SG |
1591 | .remove = nvmet_rdma_remove_one |
1592 | }; | |
1593 | ||
8f000cac CH |
1594 | static int __init nvmet_rdma_init(void) |
1595 | { | |
f1d4ef7d SG |
1596 | int ret; |
1597 | ||
1598 | ret = ib_register_client(&nvmet_rdma_ib_client); | |
1599 | if (ret) | |
1600 | return ret; | |
1601 | ||
1602 | ret = nvmet_register_transport(&nvmet_rdma_ops); | |
1603 | if (ret) | |
1604 | goto err_ib_client; | |
1605 | ||
1606 | return 0; | |
1607 | ||
1608 | err_ib_client: | |
1609 | ib_unregister_client(&nvmet_rdma_ib_client); | |
1610 | return ret; | |
8f000cac CH |
1611 | } |
1612 | ||
1613 | static void __exit nvmet_rdma_exit(void) | |
1614 | { | |
1615 | struct nvmet_rdma_queue *queue; | |
1616 | ||
1617 | nvmet_unregister_transport(&nvmet_rdma_ops); | |
1618 | ||
1619 | flush_scheduled_work(); | |
1620 | ||
1621 | mutex_lock(&nvmet_rdma_queue_mutex); | |
1622 | while ((queue = list_first_entry_or_null(&nvmet_rdma_queue_list, | |
1623 | struct nvmet_rdma_queue, queue_list))) { | |
1624 | list_del_init(&queue->queue_list); | |
1625 | ||
1626 | mutex_unlock(&nvmet_rdma_queue_mutex); | |
1627 | __nvmet_rdma_queue_disconnect(queue); | |
1628 | mutex_lock(&nvmet_rdma_queue_mutex); | |
1629 | } | |
1630 | mutex_unlock(&nvmet_rdma_queue_mutex); | |
1631 | ||
1632 | flush_scheduled_work(); | |
f1d4ef7d | 1633 | ib_unregister_client(&nvmet_rdma_ib_client); |
8f000cac CH |
1634 | ida_destroy(&nvmet_rdma_queue_ida); |
1635 | } | |
1636 | ||
1637 | module_init(nvmet_rdma_init); | |
1638 | module_exit(nvmet_rdma_exit); | |
1639 | ||
1640 | MODULE_LICENSE("GPL v2"); | |
1641 | MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */ |