2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/slab.h>
37 #include <linux/delay.h>
39 #include "iscsi_iser.h"
41 #define ISCSI_ISER_MAX_CONN 8
42 #define ISER_MAX_RX_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
43 #define ISER_MAX_TX_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN)
44 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_LEN + ISER_MAX_TX_LEN + \
47 static int iser_cq_poll_limit
= 512;
49 static void iser_cq_tasklet_fn(unsigned long data
);
50 static void iser_cq_callback(struct ib_cq
*cq
, void *cq_context
);
52 static void iser_cq_event_callback(struct ib_event
*cause
, void *context
)
54 iser_err("cq event %s (%d)\n",
55 ib_event_msg(cause
->event
), cause
->event
);
58 static void iser_qp_event_callback(struct ib_event
*cause
, void *context
)
60 iser_err("qp event %s (%d)\n",
61 ib_event_msg(cause
->event
), cause
->event
);
64 static void iser_event_handler(struct ib_event_handler
*handler
,
65 struct ib_event
*event
)
67 iser_err("async event %s (%d) on device %s port %d\n",
68 ib_event_msg(event
->event
), event
->event
,
69 event
->device
->name
, event
->element
.port_num
);
73 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
74 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
77 * returns 0 on success, -1 on failure
79 static int iser_create_device_ib_res(struct iser_device
*device
)
81 struct ib_device_attr
*dev_attr
= &device
->dev_attr
;
84 ret
= ib_query_device(device
->ib_device
, dev_attr
);
86 pr_warn("Query device failed for %s\n", device
->ib_device
->name
);
90 ret
= iser_assign_reg_ops(device
);
94 device
->comps_used
= min_t(int, num_online_cpus(),
95 device
->ib_device
->num_comp_vectors
);
97 device
->comps
= kcalloc(device
->comps_used
, sizeof(*device
->comps
),
102 max_cqe
= min(ISER_MAX_CQ_LEN
, dev_attr
->max_cqe
);
104 iser_info("using %d CQs, device %s supports %d vectors max_cqe %d\n",
105 device
->comps_used
, device
->ib_device
->name
,
106 device
->ib_device
->num_comp_vectors
, max_cqe
);
108 device
->pd
= ib_alloc_pd(device
->ib_device
);
109 if (IS_ERR(device
->pd
))
112 for (i
= 0; i
< device
->comps_used
; i
++) {
113 struct ib_cq_init_attr cq_attr
= {};
114 struct iser_comp
*comp
= &device
->comps
[i
];
116 comp
->device
= device
;
117 cq_attr
.cqe
= max_cqe
;
118 cq_attr
.comp_vector
= i
;
119 comp
->cq
= ib_create_cq(device
->ib_device
,
121 iser_cq_event_callback
,
124 if (IS_ERR(comp
->cq
)) {
129 if (ib_req_notify_cq(comp
->cq
, IB_CQ_NEXT_COMP
))
132 tasklet_init(&comp
->tasklet
, iser_cq_tasklet_fn
,
133 (unsigned long)comp
);
136 if (!iser_always_reg
) {
137 int access
= IB_ACCESS_LOCAL_WRITE
|
138 IB_ACCESS_REMOTE_WRITE
|
139 IB_ACCESS_REMOTE_READ
;
141 device
->mr
= ib_get_dma_mr(device
->pd
, access
);
142 if (IS_ERR(device
->mr
))
146 INIT_IB_EVENT_HANDLER(&device
->event_handler
, device
->ib_device
,
148 if (ib_register_event_handler(&device
->event_handler
))
155 ib_dereg_mr(device
->mr
);
157 for (i
= 0; i
< device
->comps_used
; i
++)
158 tasklet_kill(&device
->comps
[i
].tasklet
);
160 for (i
= 0; i
< device
->comps_used
; i
++) {
161 struct iser_comp
*comp
= &device
->comps
[i
];
164 ib_destroy_cq(comp
->cq
);
166 ib_dealloc_pd(device
->pd
);
168 kfree(device
->comps
);
170 iser_err("failed to allocate an IB resource\n");
175 * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
176 * CQ and PD created with the device associated with the adapator.
178 static void iser_free_device_ib_res(struct iser_device
*device
)
182 for (i
= 0; i
< device
->comps_used
; i
++) {
183 struct iser_comp
*comp
= &device
->comps
[i
];
185 tasklet_kill(&comp
->tasklet
);
186 ib_destroy_cq(comp
->cq
);
190 (void)ib_unregister_event_handler(&device
->event_handler
);
192 (void)ib_dereg_mr(device
->mr
);
193 ib_dealloc_pd(device
->pd
);
195 kfree(device
->comps
);
196 device
->comps
= NULL
;
203 * iser_alloc_fmr_pool - Creates FMR pool and page_vector
205 * returns 0 on success, or errno code on failure
207 int iser_alloc_fmr_pool(struct ib_conn
*ib_conn
,
211 struct iser_device
*device
= ib_conn
->device
;
212 struct iser_fr_pool
*fr_pool
= &ib_conn
->fr_pool
;
213 struct iser_page_vec
*page_vec
;
214 struct iser_fr_desc
*desc
;
215 struct ib_fmr_pool
*fmr_pool
;
216 struct ib_fmr_pool_param params
;
219 INIT_LIST_HEAD(&fr_pool
->list
);
220 spin_lock_init(&fr_pool
->lock
);
222 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
226 page_vec
= kmalloc(sizeof(*page_vec
) + (sizeof(u64
) * size
),
233 page_vec
->pages
= (u64
*)(page_vec
+ 1);
235 params
.page_shift
= SHIFT_4K
;
236 params
.max_pages_per_fmr
= size
;
237 /* make the pool size twice the max number of SCSI commands *
238 * the ML is expected to queue, watermark for unmap at 50% */
239 params
.pool_size
= cmds_max
* 2;
240 params
.dirty_watermark
= cmds_max
;
242 params
.flush_function
= NULL
;
243 params
.access
= (IB_ACCESS_LOCAL_WRITE
|
244 IB_ACCESS_REMOTE_WRITE
|
245 IB_ACCESS_REMOTE_READ
);
247 fmr_pool
= ib_create_fmr_pool(device
->pd
, ¶ms
);
248 if (IS_ERR(fmr_pool
)) {
249 ret
= PTR_ERR(fmr_pool
);
250 iser_err("FMR allocation failed, err %d\n", ret
);
254 desc
->rsc
.page_vec
= page_vec
;
255 desc
->rsc
.fmr_pool
= fmr_pool
;
256 list_add(&desc
->list
, &fr_pool
->list
);
269 * iser_free_fmr_pool - releases the FMR pool and page vec
271 void iser_free_fmr_pool(struct ib_conn
*ib_conn
)
273 struct iser_fr_pool
*fr_pool
= &ib_conn
->fr_pool
;
274 struct iser_fr_desc
*desc
;
276 desc
= list_first_entry(&fr_pool
->list
,
277 struct iser_fr_desc
, list
);
278 list_del(&desc
->list
);
280 iser_info("freeing conn %p fmr pool %p\n",
281 ib_conn
, desc
->rsc
.fmr_pool
);
283 ib_destroy_fmr_pool(desc
->rsc
.fmr_pool
);
284 kfree(desc
->rsc
.page_vec
);
289 iser_alloc_reg_res(struct ib_device
*ib_device
,
291 struct iser_reg_resources
*res
,
296 res
->frpl
= ib_alloc_fast_reg_page_list(ib_device
, size
);
297 if (IS_ERR(res
->frpl
)) {
298 ret
= PTR_ERR(res
->frpl
);
299 iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n",
301 return PTR_ERR(res
->frpl
);
304 res
->mr
= ib_alloc_mr(pd
, IB_MR_TYPE_MEM_REG
, size
);
305 if (IS_ERR(res
->mr
)) {
306 ret
= PTR_ERR(res
->mr
);
307 iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret
);
308 goto fast_reg_mr_failure
;
315 ib_free_fast_reg_page_list(res
->frpl
);
321 iser_free_reg_res(struct iser_reg_resources
*rsc
)
323 ib_dereg_mr(rsc
->mr
);
324 ib_free_fast_reg_page_list(rsc
->frpl
);
328 iser_alloc_pi_ctx(struct ib_device
*ib_device
,
330 struct iser_fr_desc
*desc
,
333 struct iser_pi_context
*pi_ctx
= NULL
;
336 desc
->pi_ctx
= kzalloc(sizeof(*desc
->pi_ctx
), GFP_KERNEL
);
340 pi_ctx
= desc
->pi_ctx
;
342 ret
= iser_alloc_reg_res(ib_device
, pd
, &pi_ctx
->rsc
, size
);
344 iser_err("failed to allocate reg_resources\n");
345 goto alloc_reg_res_err
;
348 pi_ctx
->sig_mr
= ib_alloc_mr(pd
, IB_MR_TYPE_SIGNATURE
, 2);
349 if (IS_ERR(pi_ctx
->sig_mr
)) {
350 ret
= PTR_ERR(pi_ctx
->sig_mr
);
353 pi_ctx
->sig_mr_valid
= 1;
354 desc
->pi_ctx
->sig_protected
= 0;
359 iser_free_reg_res(&pi_ctx
->rsc
);
367 iser_free_pi_ctx(struct iser_pi_context
*pi_ctx
)
369 iser_free_reg_res(&pi_ctx
->rsc
);
370 ib_dereg_mr(pi_ctx
->sig_mr
);
374 static struct iser_fr_desc
*
375 iser_create_fastreg_desc(struct ib_device
*ib_device
,
380 struct iser_fr_desc
*desc
;
383 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
385 return ERR_PTR(-ENOMEM
);
387 ret
= iser_alloc_reg_res(ib_device
, pd
, &desc
->rsc
, size
);
389 goto reg_res_alloc_failure
;
392 ret
= iser_alloc_pi_ctx(ib_device
, pd
, desc
, size
);
394 goto pi_ctx_alloc_failure
;
399 pi_ctx_alloc_failure
:
400 iser_free_reg_res(&desc
->rsc
);
401 reg_res_alloc_failure
:
408 * iser_alloc_fastreg_pool - Creates pool of fast_reg descriptors
409 * for fast registration work requests.
410 * returns 0 on success, or errno code on failure
412 int iser_alloc_fastreg_pool(struct ib_conn
*ib_conn
,
416 struct iser_device
*device
= ib_conn
->device
;
417 struct iser_fr_pool
*fr_pool
= &ib_conn
->fr_pool
;
418 struct iser_fr_desc
*desc
;
421 INIT_LIST_HEAD(&fr_pool
->list
);
422 spin_lock_init(&fr_pool
->lock
);
424 for (i
= 0; i
< cmds_max
; i
++) {
425 desc
= iser_create_fastreg_desc(device
->ib_device
, device
->pd
,
426 ib_conn
->pi_support
, size
);
432 list_add_tail(&desc
->list
, &fr_pool
->list
);
439 iser_free_fastreg_pool(ib_conn
);
444 * iser_free_fastreg_pool - releases the pool of fast_reg descriptors
446 void iser_free_fastreg_pool(struct ib_conn
*ib_conn
)
448 struct iser_fr_pool
*fr_pool
= &ib_conn
->fr_pool
;
449 struct iser_fr_desc
*desc
, *tmp
;
452 if (list_empty(&fr_pool
->list
))
455 iser_info("freeing conn %p fr pool\n", ib_conn
);
457 list_for_each_entry_safe(desc
, tmp
, &fr_pool
->list
, list
) {
458 list_del(&desc
->list
);
459 iser_free_reg_res(&desc
->rsc
);
461 iser_free_pi_ctx(desc
->pi_ctx
);
466 if (i
< fr_pool
->size
)
467 iser_warn("pool still has %d regions registered\n",
472 * iser_create_ib_conn_res - Queue-Pair (QP)
474 * returns 0 on success, -1 on failure
476 static int iser_create_ib_conn_res(struct ib_conn
*ib_conn
)
478 struct iser_conn
*iser_conn
= container_of(ib_conn
, struct iser_conn
,
480 struct iser_device
*device
;
481 struct ib_device_attr
*dev_attr
;
482 struct ib_qp_init_attr init_attr
;
484 int index
, min_index
= 0;
486 BUG_ON(ib_conn
->device
== NULL
);
488 device
= ib_conn
->device
;
489 dev_attr
= &device
->dev_attr
;
491 memset(&init_attr
, 0, sizeof init_attr
);
493 mutex_lock(&ig
.connlist_mutex
);
494 /* select the CQ with the minimal number of usages */
495 for (index
= 0; index
< device
->comps_used
; index
++) {
496 if (device
->comps
[index
].active_qps
<
497 device
->comps
[min_index
].active_qps
)
500 ib_conn
->comp
= &device
->comps
[min_index
];
501 ib_conn
->comp
->active_qps
++;
502 mutex_unlock(&ig
.connlist_mutex
);
503 iser_info("cq index %d used for ib_conn %p\n", min_index
, ib_conn
);
505 init_attr
.event_handler
= iser_qp_event_callback
;
506 init_attr
.qp_context
= (void *)ib_conn
;
507 init_attr
.send_cq
= ib_conn
->comp
->cq
;
508 init_attr
.recv_cq
= ib_conn
->comp
->cq
;
509 init_attr
.cap
.max_recv_wr
= ISER_QP_MAX_RECV_DTOS
;
510 init_attr
.cap
.max_send_sge
= 2;
511 init_attr
.cap
.max_recv_sge
= 1;
512 init_attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
513 init_attr
.qp_type
= IB_QPT_RC
;
514 if (ib_conn
->pi_support
) {
515 init_attr
.cap
.max_send_wr
= ISER_QP_SIG_MAX_REQ_DTOS
+ 1;
516 init_attr
.create_flags
|= IB_QP_CREATE_SIGNATURE_EN
;
517 iser_conn
->max_cmds
=
518 ISER_GET_MAX_XMIT_CMDS(ISER_QP_SIG_MAX_REQ_DTOS
);
520 if (dev_attr
->max_qp_wr
> ISER_QP_MAX_REQ_DTOS
) {
521 init_attr
.cap
.max_send_wr
= ISER_QP_MAX_REQ_DTOS
+ 1;
522 iser_conn
->max_cmds
=
523 ISER_GET_MAX_XMIT_CMDS(ISER_QP_MAX_REQ_DTOS
);
525 init_attr
.cap
.max_send_wr
= dev_attr
->max_qp_wr
;
526 iser_conn
->max_cmds
=
527 ISER_GET_MAX_XMIT_CMDS(dev_attr
->max_qp_wr
);
528 iser_dbg("device %s supports max_send_wr %d\n",
529 device
->ib_device
->name
, dev_attr
->max_qp_wr
);
533 ret
= rdma_create_qp(ib_conn
->cma_id
, device
->pd
, &init_attr
);
537 ib_conn
->qp
= ib_conn
->cma_id
->qp
;
538 iser_info("setting conn %p cma_id %p qp %p\n",
539 ib_conn
, ib_conn
->cma_id
,
540 ib_conn
->cma_id
->qp
);
544 mutex_lock(&ig
.connlist_mutex
);
545 ib_conn
->comp
->active_qps
--;
546 mutex_unlock(&ig
.connlist_mutex
);
547 iser_err("unable to alloc mem or create resource, err %d\n", ret
);
553 * based on the resolved device node GUID see if there already allocated
554 * device for this device. If there's no such, create one.
557 struct iser_device
*iser_device_find_by_ib_device(struct rdma_cm_id
*cma_id
)
559 struct iser_device
*device
;
561 mutex_lock(&ig
.device_list_mutex
);
563 list_for_each_entry(device
, &ig
.device_list
, ig_list
)
564 /* find if there's a match using the node GUID */
565 if (device
->ib_device
->node_guid
== cma_id
->device
->node_guid
)
568 device
= kzalloc(sizeof *device
, GFP_KERNEL
);
572 /* assign this device to the device */
573 device
->ib_device
= cma_id
->device
;
574 /* init the device and link it into ig device list */
575 if (iser_create_device_ib_res(device
)) {
580 list_add(&device
->ig_list
, &ig
.device_list
);
585 mutex_unlock(&ig
.device_list_mutex
);
589 /* if there's no demand for this device, release it */
590 static void iser_device_try_release(struct iser_device
*device
)
592 mutex_lock(&ig
.device_list_mutex
);
594 iser_info("device %p refcount %d\n", device
, device
->refcount
);
595 if (!device
->refcount
) {
596 iser_free_device_ib_res(device
);
597 list_del(&device
->ig_list
);
600 mutex_unlock(&ig
.device_list_mutex
);
604 * Called with state mutex held
606 static int iser_conn_state_comp_exch(struct iser_conn
*iser_conn
,
607 enum iser_conn_state comp
,
608 enum iser_conn_state exch
)
612 ret
= (iser_conn
->state
== comp
);
614 iser_conn
->state
= exch
;
619 void iser_release_work(struct work_struct
*work
)
621 struct iser_conn
*iser_conn
;
623 iser_conn
= container_of(work
, struct iser_conn
, release_work
);
625 /* Wait for conn_stop to complete */
626 wait_for_completion(&iser_conn
->stop_completion
);
627 /* Wait for IB resouces cleanup to complete */
628 wait_for_completion(&iser_conn
->ib_completion
);
630 mutex_lock(&iser_conn
->state_mutex
);
631 iser_conn
->state
= ISER_CONN_DOWN
;
632 mutex_unlock(&iser_conn
->state_mutex
);
634 iser_conn_release(iser_conn
);
638 * iser_free_ib_conn_res - release IB related resources
639 * @iser_conn: iser connection struct
640 * @destroy: indicator if we need to try to release the
641 * iser device and memory regoins pool (only iscsi
642 * shutdown and DEVICE_REMOVAL will use this).
644 * This routine is called with the iser state mutex held
645 * so the cm_id removal is out of here. It is Safe to
646 * be invoked multiple times.
648 static void iser_free_ib_conn_res(struct iser_conn
*iser_conn
,
651 struct ib_conn
*ib_conn
= &iser_conn
->ib_conn
;
652 struct iser_device
*device
= ib_conn
->device
;
654 iser_info("freeing conn %p cma_id %p qp %p\n",
655 iser_conn
, ib_conn
->cma_id
, ib_conn
->qp
);
657 if (ib_conn
->qp
!= NULL
) {
658 ib_conn
->comp
->active_qps
--;
659 rdma_destroy_qp(ib_conn
->cma_id
);
664 if (iser_conn
->rx_descs
)
665 iser_free_rx_descriptors(iser_conn
);
667 if (device
!= NULL
) {
668 iser_device_try_release(device
);
669 ib_conn
->device
= NULL
;
675 * Frees all conn objects and deallocs conn descriptor
677 void iser_conn_release(struct iser_conn
*iser_conn
)
679 struct ib_conn
*ib_conn
= &iser_conn
->ib_conn
;
681 mutex_lock(&ig
.connlist_mutex
);
682 list_del(&iser_conn
->conn_list
);
683 mutex_unlock(&ig
.connlist_mutex
);
685 mutex_lock(&iser_conn
->state_mutex
);
686 /* In case we endup here without ep_disconnect being invoked. */
687 if (iser_conn
->state
!= ISER_CONN_DOWN
) {
688 iser_warn("iser conn %p state %d, expected state down.\n",
689 iser_conn
, iser_conn
->state
);
690 iscsi_destroy_endpoint(iser_conn
->ep
);
691 iser_conn
->state
= ISER_CONN_DOWN
;
694 * In case we never got to bind stage, we still need to
695 * release IB resources (which is safe to call more than once).
697 iser_free_ib_conn_res(iser_conn
, true);
698 mutex_unlock(&iser_conn
->state_mutex
);
700 if (ib_conn
->cma_id
!= NULL
) {
701 rdma_destroy_id(ib_conn
->cma_id
);
702 ib_conn
->cma_id
= NULL
;
709 * triggers start of the disconnect procedures and wait for them to be done
710 * Called with state mutex held
712 int iser_conn_terminate(struct iser_conn
*iser_conn
)
714 struct ib_conn
*ib_conn
= &iser_conn
->ib_conn
;
715 struct ib_send_wr
*bad_wr
;
718 /* terminate the iser conn only if the conn state is UP */
719 if (!iser_conn_state_comp_exch(iser_conn
, ISER_CONN_UP
,
720 ISER_CONN_TERMINATING
))
723 iser_info("iser_conn %p state %d\n", iser_conn
, iser_conn
->state
);
725 /* suspend queuing of new iscsi commands */
726 if (iser_conn
->iscsi_conn
)
727 iscsi_suspend_queue(iser_conn
->iscsi_conn
);
730 * In case we didn't already clean up the cma_id (peer initiated
731 * a disconnection), we need to Cause the CMA to change the QP
734 if (ib_conn
->cma_id
) {
735 err
= rdma_disconnect(ib_conn
->cma_id
);
737 iser_err("Failed to disconnect, conn: 0x%p err %d\n",
740 /* post an indication that all flush errors were consumed */
741 err
= ib_post_send(ib_conn
->qp
, &ib_conn
->beacon
, &bad_wr
);
743 iser_err("conn %p failed to post beacon", ib_conn
);
747 wait_for_completion(&ib_conn
->flush_comp
);
754 * Called with state mutex held
756 static void iser_connect_error(struct rdma_cm_id
*cma_id
)
758 struct iser_conn
*iser_conn
;
760 iser_conn
= (struct iser_conn
*)cma_id
->context
;
761 iser_conn
->state
= ISER_CONN_TERMINATING
;
765 iser_calc_scsi_params(struct iser_conn
*iser_conn
,
766 unsigned int max_sectors
)
768 struct iser_device
*device
= iser_conn
->ib_conn
.device
;
769 unsigned short sg_tablesize
, sup_sg_tablesize
;
771 sg_tablesize
= DIV_ROUND_UP(max_sectors
* 512, SIZE_4K
);
772 sup_sg_tablesize
= min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE
,
773 device
->dev_attr
.max_fast_reg_page_list_len
);
775 if (sg_tablesize
> sup_sg_tablesize
) {
776 sg_tablesize
= sup_sg_tablesize
;
777 iser_conn
->scsi_max_sectors
= sg_tablesize
* SIZE_4K
/ 512;
779 iser_conn
->scsi_max_sectors
= max_sectors
;
782 iser_conn
->scsi_sg_tablesize
= sg_tablesize
;
784 iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
785 iser_conn
, iser_conn
->scsi_sg_tablesize
,
786 iser_conn
->scsi_max_sectors
);
790 * Called with state mutex held
792 static void iser_addr_handler(struct rdma_cm_id
*cma_id
)
794 struct iser_device
*device
;
795 struct iser_conn
*iser_conn
;
796 struct ib_conn
*ib_conn
;
799 iser_conn
= (struct iser_conn
*)cma_id
->context
;
800 if (iser_conn
->state
!= ISER_CONN_PENDING
)
804 ib_conn
= &iser_conn
->ib_conn
;
805 device
= iser_device_find_by_ib_device(cma_id
);
807 iser_err("device lookup/creation failed\n");
808 iser_connect_error(cma_id
);
812 ib_conn
->device
= device
;
814 /* connection T10-PI support */
815 if (iser_pi_enable
) {
816 if (!(device
->dev_attr
.device_cap_flags
&
817 IB_DEVICE_SIGNATURE_HANDOVER
)) {
818 iser_warn("T10-PI requested but not supported on %s, "
819 "continue without T10-PI\n",
820 ib_conn
->device
->ib_device
->name
);
821 ib_conn
->pi_support
= false;
823 ib_conn
->pi_support
= true;
827 iser_calc_scsi_params(iser_conn
, iser_max_sectors
);
829 ret
= rdma_resolve_route(cma_id
, 1000);
831 iser_err("resolve route failed: %d\n", ret
);
832 iser_connect_error(cma_id
);
838 * Called with state mutex held
840 static void iser_route_handler(struct rdma_cm_id
*cma_id
)
842 struct rdma_conn_param conn_param
;
844 struct iser_cm_hdr req_hdr
;
845 struct iser_conn
*iser_conn
= (struct iser_conn
*)cma_id
->context
;
846 struct ib_conn
*ib_conn
= &iser_conn
->ib_conn
;
847 struct iser_device
*device
= ib_conn
->device
;
849 if (iser_conn
->state
!= ISER_CONN_PENDING
)
853 ret
= iser_create_ib_conn_res(ib_conn
);
857 memset(&conn_param
, 0, sizeof conn_param
);
858 conn_param
.responder_resources
= device
->dev_attr
.max_qp_rd_atom
;
859 conn_param
.initiator_depth
= 1;
860 conn_param
.retry_count
= 7;
861 conn_param
.rnr_retry_count
= 6;
863 memset(&req_hdr
, 0, sizeof(req_hdr
));
864 req_hdr
.flags
= (ISER_ZBVA_NOT_SUPPORTED
|
865 ISER_SEND_W_INV_NOT_SUPPORTED
);
866 conn_param
.private_data
= (void *)&req_hdr
;
867 conn_param
.private_data_len
= sizeof(struct iser_cm_hdr
);
869 ret
= rdma_connect(cma_id
, &conn_param
);
871 iser_err("failure connecting: %d\n", ret
);
877 iser_connect_error(cma_id
);
880 static void iser_connected_handler(struct rdma_cm_id
*cma_id
)
882 struct iser_conn
*iser_conn
;
883 struct ib_qp_attr attr
;
884 struct ib_qp_init_attr init_attr
;
886 iser_conn
= (struct iser_conn
*)cma_id
->context
;
887 if (iser_conn
->state
!= ISER_CONN_PENDING
)
891 (void)ib_query_qp(cma_id
->qp
, &attr
, ~0, &init_attr
);
892 iser_info("remote qpn:%x my qpn:%x\n", attr
.dest_qp_num
, cma_id
->qp
->qp_num
);
894 iser_conn
->state
= ISER_CONN_UP
;
895 complete(&iser_conn
->up_completion
);
898 static void iser_disconnected_handler(struct rdma_cm_id
*cma_id
)
900 struct iser_conn
*iser_conn
= (struct iser_conn
*)cma_id
->context
;
902 if (iser_conn_terminate(iser_conn
)) {
903 if (iser_conn
->iscsi_conn
)
904 iscsi_conn_failure(iser_conn
->iscsi_conn
,
905 ISCSI_ERR_CONN_FAILED
);
907 iser_err("iscsi_iser connection isn't bound\n");
911 static void iser_cleanup_handler(struct rdma_cm_id
*cma_id
,
914 struct iser_conn
*iser_conn
= (struct iser_conn
*)cma_id
->context
;
917 * We are not guaranteed that we visited disconnected_handler
918 * by now, call it here to be safe that we handle CM drep
921 iser_disconnected_handler(cma_id
);
922 iser_free_ib_conn_res(iser_conn
, destroy
);
923 complete(&iser_conn
->ib_completion
);
926 static int iser_cma_handler(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
928 struct iser_conn
*iser_conn
;
931 iser_conn
= (struct iser_conn
*)cma_id
->context
;
932 iser_info("%s (%d): status %d conn %p id %p\n",
933 rdma_event_msg(event
->event
), event
->event
,
934 event
->status
, cma_id
->context
, cma_id
);
936 mutex_lock(&iser_conn
->state_mutex
);
937 switch (event
->event
) {
938 case RDMA_CM_EVENT_ADDR_RESOLVED
:
939 iser_addr_handler(cma_id
);
941 case RDMA_CM_EVENT_ROUTE_RESOLVED
:
942 iser_route_handler(cma_id
);
944 case RDMA_CM_EVENT_ESTABLISHED
:
945 iser_connected_handler(cma_id
);
947 case RDMA_CM_EVENT_ADDR_ERROR
:
948 case RDMA_CM_EVENT_ROUTE_ERROR
:
949 case RDMA_CM_EVENT_CONNECT_ERROR
:
950 case RDMA_CM_EVENT_UNREACHABLE
:
951 case RDMA_CM_EVENT_REJECTED
:
952 iser_connect_error(cma_id
);
954 case RDMA_CM_EVENT_DISCONNECTED
:
955 case RDMA_CM_EVENT_ADDR_CHANGE
:
956 case RDMA_CM_EVENT_TIMEWAIT_EXIT
:
957 iser_cleanup_handler(cma_id
, false);
959 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
961 * we *must* destroy the device as we cannot rely
962 * on iscsid to be around to initiate error handling.
963 * also if we are not in state DOWN implicitly destroy
966 iser_cleanup_handler(cma_id
, true);
967 if (iser_conn
->state
!= ISER_CONN_DOWN
) {
968 iser_conn
->ib_conn
.cma_id
= NULL
;
973 iser_err("Unexpected RDMA CM event: %s (%d)\n",
974 rdma_event_msg(event
->event
), event
->event
);
977 mutex_unlock(&iser_conn
->state_mutex
);
982 void iser_conn_init(struct iser_conn
*iser_conn
)
984 iser_conn
->state
= ISER_CONN_INIT
;
985 iser_conn
->ib_conn
.post_recv_buf_count
= 0;
986 init_completion(&iser_conn
->ib_conn
.flush_comp
);
987 init_completion(&iser_conn
->stop_completion
);
988 init_completion(&iser_conn
->ib_completion
);
989 init_completion(&iser_conn
->up_completion
);
990 INIT_LIST_HEAD(&iser_conn
->conn_list
);
991 mutex_init(&iser_conn
->state_mutex
);
995 * starts the process of connecting to the target
996 * sleeps until the connection is established or rejected
998 int iser_connect(struct iser_conn
*iser_conn
,
999 struct sockaddr
*src_addr
,
1000 struct sockaddr
*dst_addr
,
1003 struct ib_conn
*ib_conn
= &iser_conn
->ib_conn
;
1006 mutex_lock(&iser_conn
->state_mutex
);
1008 sprintf(iser_conn
->name
, "%pISp", dst_addr
);
1010 iser_info("connecting to: %s\n", iser_conn
->name
);
1012 /* the device is known only --after-- address resolution */
1013 ib_conn
->device
= NULL
;
1015 iser_conn
->state
= ISER_CONN_PENDING
;
1017 ib_conn
->beacon
.wr_id
= ISER_BEACON_WRID
;
1018 ib_conn
->beacon
.opcode
= IB_WR_SEND
;
1020 ib_conn
->cma_id
= rdma_create_id(iser_cma_handler
,
1022 RDMA_PS_TCP
, IB_QPT_RC
);
1023 if (IS_ERR(ib_conn
->cma_id
)) {
1024 err
= PTR_ERR(ib_conn
->cma_id
);
1025 iser_err("rdma_create_id failed: %d\n", err
);
1029 err
= rdma_resolve_addr(ib_conn
->cma_id
, src_addr
, dst_addr
, 1000);
1031 iser_err("rdma_resolve_addr failed: %d\n", err
);
1035 if (!non_blocking
) {
1036 wait_for_completion_interruptible(&iser_conn
->up_completion
);
1038 if (iser_conn
->state
!= ISER_CONN_UP
) {
1040 goto connect_failure
;
1043 mutex_unlock(&iser_conn
->state_mutex
);
1045 mutex_lock(&ig
.connlist_mutex
);
1046 list_add(&iser_conn
->conn_list
, &ig
.connlist
);
1047 mutex_unlock(&ig
.connlist_mutex
);
1051 ib_conn
->cma_id
= NULL
;
1053 iser_conn
->state
= ISER_CONN_DOWN
;
1055 mutex_unlock(&iser_conn
->state_mutex
);
1056 iser_conn_release(iser_conn
);
1060 int iser_post_recvl(struct iser_conn
*iser_conn
)
1062 struct ib_recv_wr rx_wr
, *rx_wr_failed
;
1063 struct ib_conn
*ib_conn
= &iser_conn
->ib_conn
;
1067 sge
.addr
= iser_conn
->login_resp_dma
;
1068 sge
.length
= ISER_RX_LOGIN_SIZE
;
1069 sge
.lkey
= ib_conn
->device
->pd
->local_dma_lkey
;
1071 rx_wr
.wr_id
= (uintptr_t)iser_conn
->login_resp_buf
;
1072 rx_wr
.sg_list
= &sge
;
1076 ib_conn
->post_recv_buf_count
++;
1077 ib_ret
= ib_post_recv(ib_conn
->qp
, &rx_wr
, &rx_wr_failed
);
1079 iser_err("ib_post_recv failed ret=%d\n", ib_ret
);
1080 ib_conn
->post_recv_buf_count
--;
1085 int iser_post_recvm(struct iser_conn
*iser_conn
, int count
)
1087 struct ib_recv_wr
*rx_wr
, *rx_wr_failed
;
1089 struct ib_conn
*ib_conn
= &iser_conn
->ib_conn
;
1090 unsigned int my_rx_head
= iser_conn
->rx_desc_head
;
1091 struct iser_rx_desc
*rx_desc
;
1093 for (rx_wr
= ib_conn
->rx_wr
, i
= 0; i
< count
; i
++, rx_wr
++) {
1094 rx_desc
= &iser_conn
->rx_descs
[my_rx_head
];
1095 rx_wr
->wr_id
= (uintptr_t)rx_desc
;
1096 rx_wr
->sg_list
= &rx_desc
->rx_sg
;
1098 rx_wr
->next
= rx_wr
+ 1;
1099 my_rx_head
= (my_rx_head
+ 1) & iser_conn
->qp_max_recv_dtos_mask
;
1103 rx_wr
->next
= NULL
; /* mark end of work requests list */
1105 ib_conn
->post_recv_buf_count
+= count
;
1106 ib_ret
= ib_post_recv(ib_conn
->qp
, ib_conn
->rx_wr
, &rx_wr_failed
);
1108 iser_err("ib_post_recv failed ret=%d\n", ib_ret
);
1109 ib_conn
->post_recv_buf_count
-= count
;
1111 iser_conn
->rx_desc_head
= my_rx_head
;
1117 * iser_start_send - Initiate a Send DTO operation
1119 * returns 0 on success, -1 on failure
1121 int iser_post_send(struct ib_conn
*ib_conn
, struct iser_tx_desc
*tx_desc
,
1124 struct ib_send_wr
*bad_wr
, *wr
= iser_tx_next_wr(tx_desc
);
1127 ib_dma_sync_single_for_device(ib_conn
->device
->ib_device
,
1128 tx_desc
->dma_addr
, ISER_HEADERS_LEN
,
1132 wr
->wr_id
= (uintptr_t)tx_desc
;
1133 wr
->sg_list
= tx_desc
->tx_sg
;
1134 wr
->num_sge
= tx_desc
->num_sge
;
1135 wr
->opcode
= IB_WR_SEND
;
1136 wr
->send_flags
= signal
? IB_SEND_SIGNALED
: 0;
1138 ib_ret
= ib_post_send(ib_conn
->qp
, &tx_desc
->wrs
[0], &bad_wr
);
1140 iser_err("ib_post_send failed, ret:%d opcode:%d\n",
1141 ib_ret
, bad_wr
->opcode
);
1147 * is_iser_tx_desc - Indicate if the completion wr_id
1148 * is a TX descriptor or not.
1149 * @iser_conn: iser connection
1150 * @wr_id: completion WR identifier
1152 * Since we cannot rely on wc opcode in FLUSH errors
1153 * we must work around it by checking if the wr_id address
1154 * falls in the iser connection rx_descs buffer. If so
1155 * it is an RX descriptor, otherwize it is a TX.
1158 is_iser_tx_desc(struct iser_conn
*iser_conn
, void *wr_id
)
1160 void *start
= iser_conn
->rx_descs
;
1161 int len
= iser_conn
->num_rx_descs
* sizeof(*iser_conn
->rx_descs
);
1163 if (wr_id
>= start
&& wr_id
< start
+ len
)
1170 * iser_handle_comp_error() - Handle error completion
1171 * @ib_conn: connection RDMA resources
1172 * @wc: work completion
1174 * Notes: We may handle a FLUSH error completion and in this case
1175 * we only cleanup in case TX type was DATAOUT. For non-FLUSH
1176 * error completion we should also notify iscsi layer that
1177 * connection is failed (in case we passed bind stage).
1180 iser_handle_comp_error(struct ib_conn
*ib_conn
,
1183 void *wr_id
= (void *)(uintptr_t)wc
->wr_id
;
1184 struct iser_conn
*iser_conn
= container_of(ib_conn
, struct iser_conn
,
1187 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1188 if (iser_conn
->iscsi_conn
)
1189 iscsi_conn_failure(iser_conn
->iscsi_conn
,
1190 ISCSI_ERR_CONN_FAILED
);
1192 if (wc
->wr_id
== ISER_FASTREG_LI_WRID
)
1195 if (is_iser_tx_desc(iser_conn
, wr_id
)) {
1196 struct iser_tx_desc
*desc
= wr_id
;
1198 if (desc
->type
== ISCSI_TX_DATAOUT
)
1199 kmem_cache_free(ig
.desc_cache
, desc
);
1201 ib_conn
->post_recv_buf_count
--;
1206 * iser_handle_wc - handle a single work completion
1207 * @wc: work completion
1209 * Soft-IRQ context, work completion can be either
1210 * SEND or RECV, and can turn out successful or
1211 * with error (or flush error).
1213 static void iser_handle_wc(struct ib_wc
*wc
)
1215 struct ib_conn
*ib_conn
;
1216 struct iser_tx_desc
*tx_desc
;
1217 struct iser_rx_desc
*rx_desc
;
1219 ib_conn
= wc
->qp
->qp_context
;
1220 if (likely(wc
->status
== IB_WC_SUCCESS
)) {
1221 if (wc
->opcode
== IB_WC_RECV
) {
1222 rx_desc
= (struct iser_rx_desc
*)(uintptr_t)wc
->wr_id
;
1223 iser_rcv_completion(rx_desc
, wc
->byte_len
,
1226 if (wc
->opcode
== IB_WC_SEND
) {
1227 tx_desc
= (struct iser_tx_desc
*)(uintptr_t)wc
->wr_id
;
1228 iser_snd_completion(tx_desc
, ib_conn
);
1230 iser_err("Unknown wc opcode %d\n", wc
->opcode
);
1233 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1234 iser_err("%s (%d): wr id %llx vend_err %x\n",
1235 ib_wc_status_msg(wc
->status
), wc
->status
,
1236 wc
->wr_id
, wc
->vendor_err
);
1238 iser_dbg("%s (%d): wr id %llx\n",
1239 ib_wc_status_msg(wc
->status
), wc
->status
,
1242 if (wc
->wr_id
== ISER_BEACON_WRID
)
1243 /* all flush errors were consumed */
1244 complete(&ib_conn
->flush_comp
);
1246 iser_handle_comp_error(ib_conn
, wc
);
1251 * iser_cq_tasklet_fn - iSER completion polling loop
1252 * @data: iSER completion context
1254 * Soft-IRQ context, polling connection CQ until
1255 * either CQ was empty or we exausted polling budget
1257 static void iser_cq_tasklet_fn(unsigned long data
)
1259 struct iser_comp
*comp
= (struct iser_comp
*)data
;
1260 struct ib_cq
*cq
= comp
->cq
;
1261 struct ib_wc
*const wcs
= comp
->wcs
;
1262 int i
, n
, completed
= 0;
1264 while ((n
= ib_poll_cq(cq
, ARRAY_SIZE(comp
->wcs
), wcs
)) > 0) {
1265 for (i
= 0; i
< n
; i
++)
1266 iser_handle_wc(&wcs
[i
]);
1269 if (completed
>= iser_cq_poll_limit
)
1274 * It is assumed here that arming CQ only once its empty
1275 * would not cause interrupts to be missed.
1277 ib_req_notify_cq(cq
, IB_CQ_NEXT_COMP
);
1279 iser_dbg("got %d completions\n", completed
);
1282 static void iser_cq_callback(struct ib_cq
*cq
, void *cq_context
)
1284 struct iser_comp
*comp
= cq_context
;
1286 tasklet_schedule(&comp
->tasklet
);
1289 u8
iser_check_task_pi_status(struct iscsi_iser_task
*iser_task
,
1290 enum iser_data_dir cmd_dir
, sector_t
*sector
)
1292 struct iser_mem_reg
*reg
= &iser_task
->rdma_reg
[cmd_dir
];
1293 struct iser_fr_desc
*desc
= reg
->mem_h
;
1294 unsigned long sector_size
= iser_task
->sc
->device
->sector_size
;
1295 struct ib_mr_status mr_status
;
1298 if (desc
&& desc
->pi_ctx
->sig_protected
) {
1299 desc
->pi_ctx
->sig_protected
= 0;
1300 ret
= ib_check_mr_status(desc
->pi_ctx
->sig_mr
,
1301 IB_MR_CHECK_SIG_STATUS
, &mr_status
);
1303 pr_err("ib_check_mr_status failed, ret %d\n", ret
);
1307 if (mr_status
.fail_status
& IB_MR_CHECK_SIG_STATUS
) {
1308 sector_t sector_off
= mr_status
.sig_err
.sig_err_offset
;
1310 do_div(sector_off
, sector_size
+ 8);
1311 *sector
= scsi_get_lba(iser_task
->sc
) + sector_off
;
1313 pr_err("PI error found type %d at sector %llx "
1314 "expected %x vs actual %x\n",
1315 mr_status
.sig_err
.err_type
,
1316 (unsigned long long)*sector
,
1317 mr_status
.sig_err
.expected
,
1318 mr_status
.sig_err
.actual
);
1320 switch (mr_status
.sig_err
.err_type
) {
1321 case IB_SIG_BAD_GUARD
:
1323 case IB_SIG_BAD_REFTAG
:
1325 case IB_SIG_BAD_APPTAG
:
1333 /* Not alot we can do here, return ambiguous guard error */