1 /*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
4 * (c) Copyright 2013 Datera, Inc.
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
24 #include <linux/in6.h>
25 #include <rdma/ib_verbs.h>
26 #include <rdma/rdma_cm.h>
27 #include <target/target_core_base.h>
28 #include <target/target_core_fabric.h>
29 #include <target/iscsi/iscsi_transport.h>
30 #include <linux/semaphore.h>
32 #include "isert_proto.h"
35 #define ISERT_MAX_CONN 8
36 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37 #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
38 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
41 static int isert_debug_level
;
42 module_param_named(debug_level
, isert_debug_level
, int, 0644);
43 MODULE_PARM_DESC(debug_level
, "Enable debug tracing if > 0 (default:0)");
45 static DEFINE_MUTEX(device_list_mutex
);
46 static LIST_HEAD(device_list
);
47 static struct workqueue_struct
*isert_comp_wq
;
48 static struct workqueue_struct
*isert_release_wq
;
51 isert_unmap_cmd(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
);
53 isert_map_rdma(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
54 struct isert_rdma_wr
*wr
);
56 isert_unreg_rdma(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
);
58 isert_reg_rdma(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
59 struct isert_rdma_wr
*wr
);
61 isert_put_response(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
);
63 isert_rdma_post_recvl(struct isert_conn
*isert_conn
);
65 isert_rdma_accept(struct isert_conn
*isert_conn
);
66 struct rdma_cm_id
*isert_setup_id(struct isert_np
*isert_np
);
68 static void isert_release_work(struct work_struct
*work
);
71 isert_prot_cmd(struct isert_conn
*conn
, struct se_cmd
*cmd
)
73 return (conn
->pi_support
&&
74 cmd
->prot_op
!= TARGET_PROT_NORMAL
);
79 isert_qp_event_callback(struct ib_event
*e
, void *context
)
81 struct isert_conn
*isert_conn
= context
;
83 isert_err("%s (%d): conn %p\n",
84 ib_event_msg(e
->event
), e
->event
, isert_conn
);
87 case IB_EVENT_COMM_EST
:
88 rdma_notify(isert_conn
->cm_id
, IB_EVENT_COMM_EST
);
90 case IB_EVENT_QP_LAST_WQE_REACHED
:
91 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
99 isert_query_device(struct ib_device
*ib_dev
, struct ib_device_attr
*devattr
)
103 ret
= ib_query_device(ib_dev
, devattr
);
105 isert_err("ib_query_device() failed: %d\n", ret
);
108 isert_dbg("devattr->max_sge: %d\n", devattr
->max_sge
);
109 isert_dbg("devattr->max_sge_rd: %d\n", devattr
->max_sge_rd
);
114 static struct isert_comp
*
115 isert_comp_get(struct isert_conn
*isert_conn
)
117 struct isert_device
*device
= isert_conn
->device
;
118 struct isert_comp
*comp
;
121 mutex_lock(&device_list_mutex
);
122 for (i
= 0; i
< device
->comps_used
; i
++)
123 if (device
->comps
[i
].active_qps
<
124 device
->comps
[min
].active_qps
)
126 comp
= &device
->comps
[min
];
128 mutex_unlock(&device_list_mutex
);
130 isert_info("conn %p, using comp %p min_index: %d\n",
131 isert_conn
, comp
, min
);
137 isert_comp_put(struct isert_comp
*comp
)
139 mutex_lock(&device_list_mutex
);
141 mutex_unlock(&device_list_mutex
);
144 static struct ib_qp
*
145 isert_create_qp(struct isert_conn
*isert_conn
,
146 struct isert_comp
*comp
,
147 struct rdma_cm_id
*cma_id
)
149 struct isert_device
*device
= isert_conn
->device
;
150 struct ib_qp_init_attr attr
;
153 memset(&attr
, 0, sizeof(struct ib_qp_init_attr
));
154 attr
.event_handler
= isert_qp_event_callback
;
155 attr
.qp_context
= isert_conn
;
156 attr
.send_cq
= comp
->cq
;
157 attr
.recv_cq
= comp
->cq
;
158 attr
.cap
.max_send_wr
= ISERT_QP_MAX_REQ_DTOS
;
159 attr
.cap
.max_recv_wr
= ISERT_QP_MAX_RECV_DTOS
+ 1;
161 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
162 * work-around for RDMA_READs with ConnectX-2.
164 * Also, still make sure to have at least two SGEs for
165 * outgoing control PDU responses.
167 attr
.cap
.max_send_sge
= max(2, device
->dev_attr
.max_sge
- 2);
168 isert_conn
->max_sge
= attr
.cap
.max_send_sge
;
170 attr
.cap
.max_recv_sge
= 1;
171 attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
172 attr
.qp_type
= IB_QPT_RC
;
173 if (device
->pi_capable
)
174 attr
.create_flags
|= IB_QP_CREATE_SIGNATURE_EN
;
176 ret
= rdma_create_qp(cma_id
, device
->pd
, &attr
);
178 isert_err("rdma_create_qp failed for cma_id %d\n", ret
);
186 isert_conn_setup_qp(struct isert_conn
*isert_conn
, struct rdma_cm_id
*cma_id
)
188 struct isert_comp
*comp
;
191 comp
= isert_comp_get(isert_conn
);
192 isert_conn
->qp
= isert_create_qp(isert_conn
, comp
, cma_id
);
193 if (IS_ERR(isert_conn
->qp
)) {
194 ret
= PTR_ERR(isert_conn
->qp
);
200 isert_comp_put(comp
);
205 isert_cq_event_callback(struct ib_event
*e
, void *context
)
207 isert_dbg("event: %d\n", e
->event
);
211 isert_alloc_rx_descriptors(struct isert_conn
*isert_conn
)
213 struct isert_device
*device
= isert_conn
->device
;
214 struct ib_device
*ib_dev
= device
->ib_device
;
215 struct iser_rx_desc
*rx_desc
;
216 struct ib_sge
*rx_sg
;
220 isert_conn
->rx_descs
= kzalloc(ISERT_QP_MAX_RECV_DTOS
*
221 sizeof(struct iser_rx_desc
), GFP_KERNEL
);
222 if (!isert_conn
->rx_descs
)
225 rx_desc
= isert_conn
->rx_descs
;
227 for (i
= 0; i
< ISERT_QP_MAX_RECV_DTOS
; i
++, rx_desc
++) {
228 dma_addr
= ib_dma_map_single(ib_dev
, (void *)rx_desc
,
229 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
230 if (ib_dma_mapping_error(ib_dev
, dma_addr
))
233 rx_desc
->dma_addr
= dma_addr
;
235 rx_sg
= &rx_desc
->rx_sg
;
236 rx_sg
->addr
= rx_desc
->dma_addr
;
237 rx_sg
->length
= ISER_RX_PAYLOAD_SIZE
;
238 rx_sg
->lkey
= device
->pd
->local_dma_lkey
;
241 isert_conn
->rx_desc_head
= 0;
246 rx_desc
= isert_conn
->rx_descs
;
247 for (j
= 0; j
< i
; j
++, rx_desc
++) {
248 ib_dma_unmap_single(ib_dev
, rx_desc
->dma_addr
,
249 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
251 kfree(isert_conn
->rx_descs
);
252 isert_conn
->rx_descs
= NULL
;
254 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn
);
260 isert_free_rx_descriptors(struct isert_conn
*isert_conn
)
262 struct ib_device
*ib_dev
= isert_conn
->device
->ib_device
;
263 struct iser_rx_desc
*rx_desc
;
266 if (!isert_conn
->rx_descs
)
269 rx_desc
= isert_conn
->rx_descs
;
270 for (i
= 0; i
< ISERT_QP_MAX_RECV_DTOS
; i
++, rx_desc
++) {
271 ib_dma_unmap_single(ib_dev
, rx_desc
->dma_addr
,
272 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
275 kfree(isert_conn
->rx_descs
);
276 isert_conn
->rx_descs
= NULL
;
279 static void isert_cq_work(struct work_struct
*);
280 static void isert_cq_callback(struct ib_cq
*, void *);
283 isert_free_comps(struct isert_device
*device
)
287 for (i
= 0; i
< device
->comps_used
; i
++) {
288 struct isert_comp
*comp
= &device
->comps
[i
];
291 cancel_work_sync(&comp
->work
);
292 ib_destroy_cq(comp
->cq
);
295 kfree(device
->comps
);
299 isert_alloc_comps(struct isert_device
*device
,
300 struct ib_device_attr
*attr
)
302 int i
, max_cqe
, ret
= 0;
304 device
->comps_used
= min(ISERT_MAX_CQ
, min_t(int, num_online_cpus(),
305 device
->ib_device
->num_comp_vectors
));
307 isert_info("Using %d CQs, %s supports %d vectors support "
308 "Fast registration %d pi_capable %d\n",
309 device
->comps_used
, device
->ib_device
->name
,
310 device
->ib_device
->num_comp_vectors
, device
->use_fastreg
,
313 device
->comps
= kcalloc(device
->comps_used
, sizeof(struct isert_comp
),
315 if (!device
->comps
) {
316 isert_err("Unable to allocate completion contexts\n");
320 max_cqe
= min(ISER_MAX_CQ_LEN
, attr
->max_cqe
);
322 for (i
= 0; i
< device
->comps_used
; i
++) {
323 struct ib_cq_init_attr cq_attr
= {};
324 struct isert_comp
*comp
= &device
->comps
[i
];
326 comp
->device
= device
;
327 INIT_WORK(&comp
->work
, isert_cq_work
);
328 cq_attr
.cqe
= max_cqe
;
329 cq_attr
.comp_vector
= i
;
330 comp
->cq
= ib_create_cq(device
->ib_device
,
332 isert_cq_event_callback
,
335 if (IS_ERR(comp
->cq
)) {
336 isert_err("Unable to allocate cq\n");
337 ret
= PTR_ERR(comp
->cq
);
342 ret
= ib_req_notify_cq(comp
->cq
, IB_CQ_NEXT_COMP
);
349 isert_free_comps(device
);
354 isert_create_device_ib_res(struct isert_device
*device
)
356 struct ib_device_attr
*dev_attr
;
359 dev_attr
= &device
->dev_attr
;
360 ret
= isert_query_device(device
->ib_device
, dev_attr
);
364 /* asign function handlers */
365 if (dev_attr
->device_cap_flags
& IB_DEVICE_MEM_MGT_EXTENSIONS
&&
366 dev_attr
->device_cap_flags
& IB_DEVICE_SIGNATURE_HANDOVER
) {
367 device
->use_fastreg
= 1;
368 device
->reg_rdma_mem
= isert_reg_rdma
;
369 device
->unreg_rdma_mem
= isert_unreg_rdma
;
371 device
->use_fastreg
= 0;
372 device
->reg_rdma_mem
= isert_map_rdma
;
373 device
->unreg_rdma_mem
= isert_unmap_cmd
;
376 ret
= isert_alloc_comps(device
, dev_attr
);
380 device
->pd
= ib_alloc_pd(device
->ib_device
);
381 if (IS_ERR(device
->pd
)) {
382 ret
= PTR_ERR(device
->pd
);
383 isert_err("failed to allocate pd, device %p, ret=%d\n",
388 /* Check signature cap */
389 device
->pi_capable
= dev_attr
->device_cap_flags
&
390 IB_DEVICE_SIGNATURE_HANDOVER
? true : false;
395 isert_free_comps(device
);
400 isert_free_device_ib_res(struct isert_device
*device
)
402 isert_info("device %p\n", device
);
404 ib_dealloc_pd(device
->pd
);
405 isert_free_comps(device
);
409 isert_device_put(struct isert_device
*device
)
411 mutex_lock(&device_list_mutex
);
413 isert_info("device %p refcount %d\n", device
, device
->refcount
);
414 if (!device
->refcount
) {
415 isert_free_device_ib_res(device
);
416 list_del(&device
->dev_node
);
419 mutex_unlock(&device_list_mutex
);
422 static struct isert_device
*
423 isert_device_get(struct rdma_cm_id
*cma_id
)
425 struct isert_device
*device
;
428 mutex_lock(&device_list_mutex
);
429 list_for_each_entry(device
, &device_list
, dev_node
) {
430 if (device
->ib_device
->node_guid
== cma_id
->device
->node_guid
) {
432 isert_info("Found iser device %p refcount %d\n",
433 device
, device
->refcount
);
434 mutex_unlock(&device_list_mutex
);
439 device
= kzalloc(sizeof(struct isert_device
), GFP_KERNEL
);
441 mutex_unlock(&device_list_mutex
);
442 return ERR_PTR(-ENOMEM
);
445 INIT_LIST_HEAD(&device
->dev_node
);
447 device
->ib_device
= cma_id
->device
;
448 ret
= isert_create_device_ib_res(device
);
451 mutex_unlock(&device_list_mutex
);
456 list_add_tail(&device
->dev_node
, &device_list
);
457 isert_info("Created a new iser device %p refcount %d\n",
458 device
, device
->refcount
);
459 mutex_unlock(&device_list_mutex
);
465 isert_conn_free_fastreg_pool(struct isert_conn
*isert_conn
)
467 struct fast_reg_descriptor
*fr_desc
, *tmp
;
470 if (list_empty(&isert_conn
->fr_pool
))
473 isert_info("Freeing conn %p fastreg pool", isert_conn
);
475 list_for_each_entry_safe(fr_desc
, tmp
,
476 &isert_conn
->fr_pool
, list
) {
477 list_del(&fr_desc
->list
);
478 ib_free_fast_reg_page_list(fr_desc
->data_frpl
);
479 ib_dereg_mr(fr_desc
->data_mr
);
480 if (fr_desc
->pi_ctx
) {
481 ib_free_fast_reg_page_list(fr_desc
->pi_ctx
->prot_frpl
);
482 ib_dereg_mr(fr_desc
->pi_ctx
->prot_mr
);
483 ib_dereg_mr(fr_desc
->pi_ctx
->sig_mr
);
484 kfree(fr_desc
->pi_ctx
);
490 if (i
< isert_conn
->fr_pool_size
)
491 isert_warn("Pool still has %d regions registered\n",
492 isert_conn
->fr_pool_size
- i
);
496 isert_create_pi_ctx(struct fast_reg_descriptor
*desc
,
497 struct ib_device
*device
,
500 struct pi_context
*pi_ctx
;
503 pi_ctx
= kzalloc(sizeof(*desc
->pi_ctx
), GFP_KERNEL
);
505 isert_err("Failed to allocate pi context\n");
509 pi_ctx
->prot_frpl
= ib_alloc_fast_reg_page_list(device
,
510 ISCSI_ISER_SG_TABLESIZE
);
511 if (IS_ERR(pi_ctx
->prot_frpl
)) {
512 isert_err("Failed to allocate prot frpl err=%ld\n",
513 PTR_ERR(pi_ctx
->prot_frpl
));
514 ret
= PTR_ERR(pi_ctx
->prot_frpl
);
518 pi_ctx
->prot_mr
= ib_alloc_mr(pd
, IB_MR_TYPE_MEM_REG
,
519 ISCSI_ISER_SG_TABLESIZE
);
520 if (IS_ERR(pi_ctx
->prot_mr
)) {
521 isert_err("Failed to allocate prot frmr err=%ld\n",
522 PTR_ERR(pi_ctx
->prot_mr
));
523 ret
= PTR_ERR(pi_ctx
->prot_mr
);
526 desc
->ind
|= ISERT_PROT_KEY_VALID
;
528 pi_ctx
->sig_mr
= ib_alloc_mr(pd
, IB_MR_TYPE_SIGNATURE
, 2);
529 if (IS_ERR(pi_ctx
->sig_mr
)) {
530 isert_err("Failed to allocate signature enabled mr err=%ld\n",
531 PTR_ERR(pi_ctx
->sig_mr
));
532 ret
= PTR_ERR(pi_ctx
->sig_mr
);
536 desc
->pi_ctx
= pi_ctx
;
537 desc
->ind
|= ISERT_SIG_KEY_VALID
;
538 desc
->ind
&= ~ISERT_PROTECTED
;
543 ib_dereg_mr(pi_ctx
->prot_mr
);
545 ib_free_fast_reg_page_list(pi_ctx
->prot_frpl
);
553 isert_create_fr_desc(struct ib_device
*ib_device
, struct ib_pd
*pd
,
554 struct fast_reg_descriptor
*fr_desc
)
558 fr_desc
->data_frpl
= ib_alloc_fast_reg_page_list(ib_device
,
559 ISCSI_ISER_SG_TABLESIZE
);
560 if (IS_ERR(fr_desc
->data_frpl
)) {
561 isert_err("Failed to allocate data frpl err=%ld\n",
562 PTR_ERR(fr_desc
->data_frpl
));
563 return PTR_ERR(fr_desc
->data_frpl
);
566 fr_desc
->data_mr
= ib_alloc_mr(pd
, IB_MR_TYPE_MEM_REG
,
567 ISCSI_ISER_SG_TABLESIZE
);
568 if (IS_ERR(fr_desc
->data_mr
)) {
569 isert_err("Failed to allocate data frmr err=%ld\n",
570 PTR_ERR(fr_desc
->data_mr
));
571 ret
= PTR_ERR(fr_desc
->data_mr
);
574 fr_desc
->ind
|= ISERT_DATA_KEY_VALID
;
576 isert_dbg("Created fr_desc %p\n", fr_desc
);
581 ib_free_fast_reg_page_list(fr_desc
->data_frpl
);
587 isert_conn_create_fastreg_pool(struct isert_conn
*isert_conn
)
589 struct fast_reg_descriptor
*fr_desc
;
590 struct isert_device
*device
= isert_conn
->device
;
591 struct se_session
*se_sess
= isert_conn
->conn
->sess
->se_sess
;
592 struct se_node_acl
*se_nacl
= se_sess
->se_node_acl
;
595 * Setup the number of FRMRs based upon the number of tags
596 * available to session in iscsi_target_locate_portal().
598 tag_num
= max_t(u32
, ISCSIT_MIN_TAGS
, se_nacl
->queue_depth
);
599 tag_num
= (tag_num
* 2) + ISCSIT_EXTRA_TAGS
;
601 isert_conn
->fr_pool_size
= 0;
602 for (i
= 0; i
< tag_num
; i
++) {
603 fr_desc
= kzalloc(sizeof(*fr_desc
), GFP_KERNEL
);
605 isert_err("Failed to allocate fast_reg descriptor\n");
610 ret
= isert_create_fr_desc(device
->ib_device
,
611 device
->pd
, fr_desc
);
613 isert_err("Failed to create fastreg descriptor err=%d\n",
619 list_add_tail(&fr_desc
->list
, &isert_conn
->fr_pool
);
620 isert_conn
->fr_pool_size
++;
623 isert_dbg("Creating conn %p fastreg pool size=%d",
624 isert_conn
, isert_conn
->fr_pool_size
);
629 isert_conn_free_fastreg_pool(isert_conn
);
634 isert_init_conn(struct isert_conn
*isert_conn
)
636 isert_conn
->state
= ISER_CONN_INIT
;
637 INIT_LIST_HEAD(&isert_conn
->accept_node
);
638 init_completion(&isert_conn
->login_comp
);
639 init_completion(&isert_conn
->login_req_comp
);
640 init_completion(&isert_conn
->wait
);
641 kref_init(&isert_conn
->kref
);
642 mutex_init(&isert_conn
->mutex
);
643 spin_lock_init(&isert_conn
->pool_lock
);
644 INIT_LIST_HEAD(&isert_conn
->fr_pool
);
645 INIT_WORK(&isert_conn
->release_work
, isert_release_work
);
649 isert_free_login_buf(struct isert_conn
*isert_conn
)
651 struct ib_device
*ib_dev
= isert_conn
->device
->ib_device
;
653 ib_dma_unmap_single(ib_dev
, isert_conn
->login_rsp_dma
,
654 ISER_RX_LOGIN_SIZE
, DMA_TO_DEVICE
);
655 ib_dma_unmap_single(ib_dev
, isert_conn
->login_req_dma
,
656 ISCSI_DEF_MAX_RECV_SEG_LEN
,
658 kfree(isert_conn
->login_buf
);
662 isert_alloc_login_buf(struct isert_conn
*isert_conn
,
663 struct ib_device
*ib_dev
)
667 isert_conn
->login_buf
= kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN
+
668 ISER_RX_LOGIN_SIZE
, GFP_KERNEL
);
669 if (!isert_conn
->login_buf
) {
670 isert_err("Unable to allocate isert_conn->login_buf\n");
674 isert_conn
->login_req_buf
= isert_conn
->login_buf
;
675 isert_conn
->login_rsp_buf
= isert_conn
->login_buf
+
676 ISCSI_DEF_MAX_RECV_SEG_LEN
;
678 isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
679 isert_conn
->login_buf
, isert_conn
->login_req_buf
,
680 isert_conn
->login_rsp_buf
);
682 isert_conn
->login_req_dma
= ib_dma_map_single(ib_dev
,
683 (void *)isert_conn
->login_req_buf
,
684 ISCSI_DEF_MAX_RECV_SEG_LEN
, DMA_FROM_DEVICE
);
686 ret
= ib_dma_mapping_error(ib_dev
, isert_conn
->login_req_dma
);
688 isert_err("login_req_dma mapping error: %d\n", ret
);
689 isert_conn
->login_req_dma
= 0;
693 isert_conn
->login_rsp_dma
= ib_dma_map_single(ib_dev
,
694 (void *)isert_conn
->login_rsp_buf
,
695 ISER_RX_LOGIN_SIZE
, DMA_TO_DEVICE
);
697 ret
= ib_dma_mapping_error(ib_dev
, isert_conn
->login_rsp_dma
);
699 isert_err("login_rsp_dma mapping error: %d\n", ret
);
700 isert_conn
->login_rsp_dma
= 0;
701 goto out_req_dma_map
;
707 ib_dma_unmap_single(ib_dev
, isert_conn
->login_req_dma
,
708 ISCSI_DEF_MAX_RECV_SEG_LEN
, DMA_FROM_DEVICE
);
710 kfree(isert_conn
->login_buf
);
715 isert_connect_request(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
717 struct isert_np
*isert_np
= cma_id
->context
;
718 struct iscsi_np
*np
= isert_np
->np
;
719 struct isert_conn
*isert_conn
;
720 struct isert_device
*device
;
723 spin_lock_bh(&np
->np_thread_lock
);
725 spin_unlock_bh(&np
->np_thread_lock
);
726 isert_dbg("iscsi_np is not enabled, reject connect request\n");
727 return rdma_reject(cma_id
, NULL
, 0);
729 spin_unlock_bh(&np
->np_thread_lock
);
731 isert_dbg("cma_id: %p, portal: %p\n",
732 cma_id
, cma_id
->context
);
734 isert_conn
= kzalloc(sizeof(struct isert_conn
), GFP_KERNEL
);
738 isert_init_conn(isert_conn
);
739 isert_conn
->cm_id
= cma_id
;
741 ret
= isert_alloc_login_buf(isert_conn
, cma_id
->device
);
745 device
= isert_device_get(cma_id
);
746 if (IS_ERR(device
)) {
747 ret
= PTR_ERR(device
);
748 goto out_rsp_dma_map
;
750 isert_conn
->device
= device
;
752 /* Set max inflight RDMA READ requests */
753 isert_conn
->initiator_depth
= min_t(u8
,
754 event
->param
.conn
.initiator_depth
,
755 device
->dev_attr
.max_qp_init_rd_atom
);
756 isert_dbg("Using initiator_depth: %u\n", isert_conn
->initiator_depth
);
758 ret
= isert_conn_setup_qp(isert_conn
, cma_id
);
762 ret
= isert_rdma_post_recvl(isert_conn
);
766 * Obtain the second reference now before isert_rdma_accept() to
767 * ensure that any initiator generated REJECT CM event that occurs
768 * asynchronously won't drop the last reference until the error path
769 * in iscsi_target_login_sess_out() does it's ->iscsit_free_conn() ->
770 * isert_free_conn() -> isert_put_conn() -> kref_put().
772 if (!kref_get_unless_zero(&isert_conn
->kref
)) {
773 isert_warn("conn %p connect_release is running\n", isert_conn
);
777 ret
= isert_rdma_accept(isert_conn
);
781 mutex_lock(&isert_np
->np_accept_mutex
);
782 list_add_tail(&isert_conn
->accept_node
, &isert_np
->np_accept_list
);
783 mutex_unlock(&isert_np
->np_accept_mutex
);
785 isert_info("np %p: Allow accept_np to continue\n", np
);
786 up(&isert_np
->np_sem
);
790 isert_device_put(device
);
792 isert_free_login_buf(isert_conn
);
795 rdma_reject(cma_id
, NULL
, 0);
800 isert_connect_release(struct isert_conn
*isert_conn
)
802 struct isert_device
*device
= isert_conn
->device
;
804 isert_dbg("conn %p\n", isert_conn
);
808 if (device
->use_fastreg
)
809 isert_conn_free_fastreg_pool(isert_conn
);
811 isert_free_rx_descriptors(isert_conn
);
812 if (isert_conn
->cm_id
)
813 rdma_destroy_id(isert_conn
->cm_id
);
815 if (isert_conn
->qp
) {
816 struct isert_comp
*comp
= isert_conn
->qp
->recv_cq
->cq_context
;
818 isert_comp_put(comp
);
819 ib_destroy_qp(isert_conn
->qp
);
822 if (isert_conn
->login_buf
)
823 isert_free_login_buf(isert_conn
);
825 isert_device_put(device
);
831 isert_connected_handler(struct rdma_cm_id
*cma_id
)
833 struct isert_conn
*isert_conn
= cma_id
->qp
->qp_context
;
835 isert_info("conn %p\n", isert_conn
);
837 mutex_lock(&isert_conn
->mutex
);
838 if (isert_conn
->state
!= ISER_CONN_FULL_FEATURE
)
839 isert_conn
->state
= ISER_CONN_UP
;
840 mutex_unlock(&isert_conn
->mutex
);
844 isert_release_kref(struct kref
*kref
)
846 struct isert_conn
*isert_conn
= container_of(kref
,
847 struct isert_conn
, kref
);
849 isert_info("conn %p final kref %s/%d\n", isert_conn
, current
->comm
,
852 isert_connect_release(isert_conn
);
856 isert_put_conn(struct isert_conn
*isert_conn
)
858 kref_put(&isert_conn
->kref
, isert_release_kref
);
862 * isert_conn_terminate() - Initiate connection termination
863 * @isert_conn: isert connection struct
866 * In case the connection state is FULL_FEATURE, move state
867 * to TEMINATING and start teardown sequence (rdma_disconnect).
868 * In case the connection state is UP, complete flush as well.
870 * This routine must be called with mutex held. Thus it is
871 * safe to call multiple times.
874 isert_conn_terminate(struct isert_conn
*isert_conn
)
878 switch (isert_conn
->state
) {
879 case ISER_CONN_TERMINATING
:
882 case ISER_CONN_FULL_FEATURE
: /* FALLTHRU */
883 isert_info("Terminating conn %p state %d\n",
884 isert_conn
, isert_conn
->state
);
885 isert_conn
->state
= ISER_CONN_TERMINATING
;
886 err
= rdma_disconnect(isert_conn
->cm_id
);
888 isert_warn("Failed rdma_disconnect isert_conn %p\n",
892 isert_warn("conn %p teminating in state %d\n",
893 isert_conn
, isert_conn
->state
);
898 isert_np_cma_handler(struct isert_np
*isert_np
,
899 enum rdma_cm_event_type event
)
901 isert_dbg("%s (%d): isert np %p\n",
902 rdma_event_msg(event
), event
, isert_np
);
905 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
906 isert_np
->np_cm_id
= NULL
;
908 case RDMA_CM_EVENT_ADDR_CHANGE
:
909 isert_np
->np_cm_id
= isert_setup_id(isert_np
);
910 if (IS_ERR(isert_np
->np_cm_id
)) {
911 isert_err("isert np %p setup id failed: %ld\n",
912 isert_np
, PTR_ERR(isert_np
->np_cm_id
));
913 isert_np
->np_cm_id
= NULL
;
917 isert_err("isert np %p Unexpected event %d\n",
925 isert_disconnected_handler(struct rdma_cm_id
*cma_id
,
926 enum rdma_cm_event_type event
)
928 struct isert_np
*isert_np
= cma_id
->context
;
929 struct isert_conn
*isert_conn
;
930 bool terminating
= false;
932 if (isert_np
->np_cm_id
== cma_id
)
933 return isert_np_cma_handler(cma_id
->context
, event
);
935 isert_conn
= cma_id
->qp
->qp_context
;
937 mutex_lock(&isert_conn
->mutex
);
938 terminating
= (isert_conn
->state
== ISER_CONN_TERMINATING
);
939 isert_conn_terminate(isert_conn
);
940 mutex_unlock(&isert_conn
->mutex
);
942 isert_info("conn %p completing wait\n", isert_conn
);
943 complete(&isert_conn
->wait
);
948 mutex_lock(&isert_np
->np_accept_mutex
);
949 if (!list_empty(&isert_conn
->accept_node
)) {
950 list_del_init(&isert_conn
->accept_node
);
951 isert_put_conn(isert_conn
);
952 queue_work(isert_release_wq
, &isert_conn
->release_work
);
954 mutex_unlock(&isert_np
->np_accept_mutex
);
961 isert_connect_error(struct rdma_cm_id
*cma_id
)
963 struct isert_conn
*isert_conn
= cma_id
->qp
->qp_context
;
965 isert_conn
->cm_id
= NULL
;
966 isert_put_conn(isert_conn
);
972 isert_cma_handler(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
976 isert_info("%s (%d): status %d id %p np %p\n",
977 rdma_event_msg(event
->event
), event
->event
,
978 event
->status
, cma_id
, cma_id
->context
);
980 switch (event
->event
) {
981 case RDMA_CM_EVENT_CONNECT_REQUEST
:
982 ret
= isert_connect_request(cma_id
, event
);
984 isert_err("failed handle connect request %d\n", ret
);
986 case RDMA_CM_EVENT_ESTABLISHED
:
987 isert_connected_handler(cma_id
);
989 case RDMA_CM_EVENT_ADDR_CHANGE
: /* FALLTHRU */
990 case RDMA_CM_EVENT_DISCONNECTED
: /* FALLTHRU */
991 case RDMA_CM_EVENT_DEVICE_REMOVAL
: /* FALLTHRU */
992 case RDMA_CM_EVENT_TIMEWAIT_EXIT
: /* FALLTHRU */
993 ret
= isert_disconnected_handler(cma_id
, event
->event
);
995 case RDMA_CM_EVENT_REJECTED
: /* FALLTHRU */
996 case RDMA_CM_EVENT_UNREACHABLE
: /* FALLTHRU */
997 case RDMA_CM_EVENT_CONNECT_ERROR
:
998 ret
= isert_connect_error(cma_id
);
1001 isert_err("Unhandled RDMA CMA event: %d\n", event
->event
);
1009 isert_post_recv(struct isert_conn
*isert_conn
, u32 count
)
1011 struct ib_recv_wr
*rx_wr
, *rx_wr_failed
;
1013 unsigned int rx_head
= isert_conn
->rx_desc_head
;
1014 struct iser_rx_desc
*rx_desc
;
1016 for (rx_wr
= isert_conn
->rx_wr
, i
= 0; i
< count
; i
++, rx_wr
++) {
1017 rx_desc
= &isert_conn
->rx_descs
[rx_head
];
1018 rx_wr
->wr_id
= (uintptr_t)rx_desc
;
1019 rx_wr
->sg_list
= &rx_desc
->rx_sg
;
1021 rx_wr
->next
= rx_wr
+ 1;
1022 rx_head
= (rx_head
+ 1) & (ISERT_QP_MAX_RECV_DTOS
- 1);
1026 rx_wr
->next
= NULL
; /* mark end of work requests list */
1028 isert_conn
->post_recv_buf_count
+= count
;
1029 ret
= ib_post_recv(isert_conn
->qp
, isert_conn
->rx_wr
,
1032 isert_err("ib_post_recv() failed with ret: %d\n", ret
);
1033 isert_conn
->post_recv_buf_count
-= count
;
1035 isert_dbg("Posted %d RX buffers\n", count
);
1036 isert_conn
->rx_desc_head
= rx_head
;
1042 isert_post_send(struct isert_conn
*isert_conn
, struct iser_tx_desc
*tx_desc
)
1044 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1045 struct ib_send_wr send_wr
, *send_wr_failed
;
1048 ib_dma_sync_single_for_device(ib_dev
, tx_desc
->dma_addr
,
1049 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
1051 send_wr
.next
= NULL
;
1052 send_wr
.wr_id
= (uintptr_t)tx_desc
;
1053 send_wr
.sg_list
= tx_desc
->tx_sg
;
1054 send_wr
.num_sge
= tx_desc
->num_sge
;
1055 send_wr
.opcode
= IB_WR_SEND
;
1056 send_wr
.send_flags
= IB_SEND_SIGNALED
;
1058 ret
= ib_post_send(isert_conn
->qp
, &send_wr
, &send_wr_failed
);
1060 isert_err("ib_post_send() failed, ret: %d\n", ret
);
1066 isert_create_send_desc(struct isert_conn
*isert_conn
,
1067 struct isert_cmd
*isert_cmd
,
1068 struct iser_tx_desc
*tx_desc
)
1070 struct isert_device
*device
= isert_conn
->device
;
1071 struct ib_device
*ib_dev
= device
->ib_device
;
1073 ib_dma_sync_single_for_cpu(ib_dev
, tx_desc
->dma_addr
,
1074 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
1076 memset(&tx_desc
->iser_header
, 0, sizeof(struct iser_hdr
));
1077 tx_desc
->iser_header
.flags
= ISER_VER
;
1079 tx_desc
->num_sge
= 1;
1080 tx_desc
->isert_cmd
= isert_cmd
;
1082 if (tx_desc
->tx_sg
[0].lkey
!= device
->pd
->local_dma_lkey
) {
1083 tx_desc
->tx_sg
[0].lkey
= device
->pd
->local_dma_lkey
;
1084 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc
);
1089 isert_init_tx_hdrs(struct isert_conn
*isert_conn
,
1090 struct iser_tx_desc
*tx_desc
)
1092 struct isert_device
*device
= isert_conn
->device
;
1093 struct ib_device
*ib_dev
= device
->ib_device
;
1096 dma_addr
= ib_dma_map_single(ib_dev
, (void *)tx_desc
,
1097 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
1098 if (ib_dma_mapping_error(ib_dev
, dma_addr
)) {
1099 isert_err("ib_dma_mapping_error() failed\n");
1103 tx_desc
->dma_addr
= dma_addr
;
1104 tx_desc
->tx_sg
[0].addr
= tx_desc
->dma_addr
;
1105 tx_desc
->tx_sg
[0].length
= ISER_HEADERS_LEN
;
1106 tx_desc
->tx_sg
[0].lkey
= device
->pd
->local_dma_lkey
;
1108 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
1109 tx_desc
->tx_sg
[0].addr
, tx_desc
->tx_sg
[0].length
,
1110 tx_desc
->tx_sg
[0].lkey
);
1116 isert_init_send_wr(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1117 struct ib_send_wr
*send_wr
)
1119 struct iser_tx_desc
*tx_desc
= &isert_cmd
->tx_desc
;
1121 isert_cmd
->rdma_wr
.iser_ib_op
= ISER_IB_SEND
;
1122 send_wr
->wr_id
= (uintptr_t)&isert_cmd
->tx_desc
;
1123 send_wr
->opcode
= IB_WR_SEND
;
1124 send_wr
->sg_list
= &tx_desc
->tx_sg
[0];
1125 send_wr
->num_sge
= isert_cmd
->tx_desc
.num_sge
;
1126 send_wr
->send_flags
= IB_SEND_SIGNALED
;
1130 isert_rdma_post_recvl(struct isert_conn
*isert_conn
)
1132 struct ib_recv_wr rx_wr
, *rx_wr_fail
;
1136 memset(&sge
, 0, sizeof(struct ib_sge
));
1137 sge
.addr
= isert_conn
->login_req_dma
;
1138 sge
.length
= ISER_RX_LOGIN_SIZE
;
1139 sge
.lkey
= isert_conn
->device
->pd
->local_dma_lkey
;
1141 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
1142 sge
.addr
, sge
.length
, sge
.lkey
);
1144 memset(&rx_wr
, 0, sizeof(struct ib_recv_wr
));
1145 rx_wr
.wr_id
= (uintptr_t)isert_conn
->login_req_buf
;
1146 rx_wr
.sg_list
= &sge
;
1149 isert_conn
->post_recv_buf_count
++;
1150 ret
= ib_post_recv(isert_conn
->qp
, &rx_wr
, &rx_wr_fail
);
1152 isert_err("ib_post_recv() failed: %d\n", ret
);
1153 isert_conn
->post_recv_buf_count
--;
1160 isert_put_login_tx(struct iscsi_conn
*conn
, struct iscsi_login
*login
,
1163 struct isert_conn
*isert_conn
= conn
->context
;
1164 struct isert_device
*device
= isert_conn
->device
;
1165 struct ib_device
*ib_dev
= device
->ib_device
;
1166 struct iser_tx_desc
*tx_desc
= &isert_conn
->login_tx_desc
;
1169 isert_create_send_desc(isert_conn
, NULL
, tx_desc
);
1171 memcpy(&tx_desc
->iscsi_header
, &login
->rsp
[0],
1172 sizeof(struct iscsi_hdr
));
1174 isert_init_tx_hdrs(isert_conn
, tx_desc
);
1177 struct ib_sge
*tx_dsg
= &tx_desc
->tx_sg
[1];
1179 ib_dma_sync_single_for_cpu(ib_dev
, isert_conn
->login_rsp_dma
,
1180 length
, DMA_TO_DEVICE
);
1182 memcpy(isert_conn
->login_rsp_buf
, login
->rsp_buf
, length
);
1184 ib_dma_sync_single_for_device(ib_dev
, isert_conn
->login_rsp_dma
,
1185 length
, DMA_TO_DEVICE
);
1187 tx_dsg
->addr
= isert_conn
->login_rsp_dma
;
1188 tx_dsg
->length
= length
;
1189 tx_dsg
->lkey
= isert_conn
->device
->pd
->local_dma_lkey
;
1190 tx_desc
->num_sge
= 2;
1192 if (!login
->login_failed
) {
1193 if (login
->login_complete
) {
1194 if (!conn
->sess
->sess_ops
->SessionType
&&
1195 isert_conn
->device
->use_fastreg
) {
1196 ret
= isert_conn_create_fastreg_pool(isert_conn
);
1198 isert_err("Conn: %p failed to create"
1199 " fastreg pool\n", isert_conn
);
1204 ret
= isert_alloc_rx_descriptors(isert_conn
);
1208 ret
= isert_post_recv(isert_conn
, ISERT_MIN_POSTED_RX
);
1212 /* Now we are in FULL_FEATURE phase */
1213 mutex_lock(&isert_conn
->mutex
);
1214 isert_conn
->state
= ISER_CONN_FULL_FEATURE
;
1215 mutex_unlock(&isert_conn
->mutex
);
1219 ret
= isert_rdma_post_recvl(isert_conn
);
1224 ret
= isert_post_send(isert_conn
, tx_desc
);
1232 isert_rx_login_req(struct isert_conn
*isert_conn
)
1234 struct iser_rx_desc
*rx_desc
= (void *)isert_conn
->login_req_buf
;
1235 int rx_buflen
= isert_conn
->login_req_len
;
1236 struct iscsi_conn
*conn
= isert_conn
->conn
;
1237 struct iscsi_login
*login
= conn
->conn_login
;
1240 isert_info("conn %p\n", isert_conn
);
1242 WARN_ON_ONCE(!login
);
1244 if (login
->first_request
) {
1245 struct iscsi_login_req
*login_req
=
1246 (struct iscsi_login_req
*)&rx_desc
->iscsi_header
;
1248 * Setup the initial iscsi_login values from the leading
1249 * login request PDU.
1251 login
->leading_connection
= (!login_req
->tsih
) ? 1 : 0;
1252 login
->current_stage
=
1253 (login_req
->flags
& ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK
)
1255 login
->version_min
= login_req
->min_version
;
1256 login
->version_max
= login_req
->max_version
;
1257 memcpy(login
->isid
, login_req
->isid
, 6);
1258 login
->cmd_sn
= be32_to_cpu(login_req
->cmdsn
);
1259 login
->init_task_tag
= login_req
->itt
;
1260 login
->initial_exp_statsn
= be32_to_cpu(login_req
->exp_statsn
);
1261 login
->cid
= be16_to_cpu(login_req
->cid
);
1262 login
->tsih
= be16_to_cpu(login_req
->tsih
);
1265 memcpy(&login
->req
[0], (void *)&rx_desc
->iscsi_header
, ISCSI_HDR_LEN
);
1267 size
= min(rx_buflen
, MAX_KEY_VALUE_PAIRS
);
1268 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1269 "MAX_KEY_VALUE_PAIRS: %d\n", size
, rx_buflen
,
1270 MAX_KEY_VALUE_PAIRS
);
1271 memcpy(login
->req_buf
, &rx_desc
->data
[0], size
);
1273 if (login
->first_request
) {
1274 complete(&isert_conn
->login_comp
);
1277 schedule_delayed_work(&conn
->login_work
, 0);
1280 static struct iscsi_cmd
1281 *isert_allocate_cmd(struct iscsi_conn
*conn
)
1283 struct isert_conn
*isert_conn
= conn
->context
;
1284 struct isert_cmd
*isert_cmd
;
1285 struct iscsi_cmd
*cmd
;
1287 cmd
= iscsit_allocate_cmd(conn
, TASK_INTERRUPTIBLE
);
1289 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1292 isert_cmd
= iscsit_priv_cmd(cmd
);
1293 isert_cmd
->conn
= isert_conn
;
1294 isert_cmd
->iscsi_cmd
= cmd
;
1300 isert_handle_scsi_cmd(struct isert_conn
*isert_conn
,
1301 struct isert_cmd
*isert_cmd
, struct iscsi_cmd
*cmd
,
1302 struct iser_rx_desc
*rx_desc
, unsigned char *buf
)
1304 struct iscsi_conn
*conn
= isert_conn
->conn
;
1305 struct iscsi_scsi_req
*hdr
= (struct iscsi_scsi_req
*)buf
;
1306 struct scatterlist
*sg
;
1307 int imm_data
, imm_data_len
, unsol_data
, sg_nents
, rc
;
1308 bool dump_payload
= false;
1310 rc
= iscsit_setup_scsi_cmd(conn
, cmd
, buf
);
1314 imm_data
= cmd
->immediate_data
;
1315 imm_data_len
= cmd
->first_burst_len
;
1316 unsol_data
= cmd
->unsolicited_data
;
1318 rc
= iscsit_process_scsi_cmd(conn
, cmd
, hdr
);
1321 } else if (rc
> 0) {
1322 dump_payload
= true;
1329 sg
= &cmd
->se_cmd
.t_data_sg
[0];
1330 sg_nents
= max(1UL, DIV_ROUND_UP(imm_data_len
, PAGE_SIZE
));
1332 isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
1333 sg
, sg_nents
, &rx_desc
->data
[0], imm_data_len
);
1335 sg_copy_from_buffer(sg
, sg_nents
, &rx_desc
->data
[0], imm_data_len
);
1337 cmd
->write_data_done
+= imm_data_len
;
1339 if (cmd
->write_data_done
== cmd
->se_cmd
.data_length
) {
1340 spin_lock_bh(&cmd
->istate_lock
);
1341 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
1342 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
1343 spin_unlock_bh(&cmd
->istate_lock
);
1347 rc
= iscsit_sequence_cmd(conn
, cmd
, buf
, hdr
->cmdsn
);
1349 if (!rc
&& dump_payload
== false && unsol_data
)
1350 iscsit_set_unsoliticed_dataout(cmd
);
1351 else if (dump_payload
&& imm_data
)
1352 target_put_sess_cmd(&cmd
->se_cmd
);
1358 isert_handle_iscsi_dataout(struct isert_conn
*isert_conn
,
1359 struct iser_rx_desc
*rx_desc
, unsigned char *buf
)
1361 struct scatterlist
*sg_start
;
1362 struct iscsi_conn
*conn
= isert_conn
->conn
;
1363 struct iscsi_cmd
*cmd
= NULL
;
1364 struct iscsi_data
*hdr
= (struct iscsi_data
*)buf
;
1365 u32 unsol_data_len
= ntoh24(hdr
->dlength
);
1366 int rc
, sg_nents
, sg_off
, page_off
;
1368 rc
= iscsit_check_dataout_hdr(conn
, buf
, &cmd
);
1374 * FIXME: Unexpected unsolicited_data out
1376 if (!cmd
->unsolicited_data
) {
1377 isert_err("Received unexpected solicited data payload\n");
1382 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1383 "write_data_done: %u, data_length: %u\n",
1384 unsol_data_len
, cmd
->write_data_done
,
1385 cmd
->se_cmd
.data_length
);
1387 sg_off
= cmd
->write_data_done
/ PAGE_SIZE
;
1388 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
1389 sg_nents
= max(1UL, DIV_ROUND_UP(unsol_data_len
, PAGE_SIZE
));
1390 page_off
= cmd
->write_data_done
% PAGE_SIZE
;
1392 * FIXME: Non page-aligned unsolicited_data out
1395 isert_err("unexpected non-page aligned data payload\n");
1399 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1400 "sg_nents: %u from %p %u\n", sg_start
, sg_off
,
1401 sg_nents
, &rx_desc
->data
[0], unsol_data_len
);
1403 sg_copy_from_buffer(sg_start
, sg_nents
, &rx_desc
->data
[0],
1406 rc
= iscsit_check_dataout_payload(cmd
, hdr
, false);
1414 isert_handle_nop_out(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1415 struct iscsi_cmd
*cmd
, struct iser_rx_desc
*rx_desc
,
1418 struct iscsi_conn
*conn
= isert_conn
->conn
;
1419 struct iscsi_nopout
*hdr
= (struct iscsi_nopout
*)buf
;
1422 rc
= iscsit_setup_nop_out(conn
, cmd
, hdr
);
1426 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1429 return iscsit_process_nop_out(conn
, cmd
, hdr
);
1433 isert_handle_text_cmd(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1434 struct iscsi_cmd
*cmd
, struct iser_rx_desc
*rx_desc
,
1435 struct iscsi_text
*hdr
)
1437 struct iscsi_conn
*conn
= isert_conn
->conn
;
1438 u32 payload_length
= ntoh24(hdr
->dlength
);
1440 unsigned char *text_in
= NULL
;
1442 rc
= iscsit_setup_text_cmd(conn
, cmd
, hdr
);
1446 if (payload_length
) {
1447 text_in
= kzalloc(payload_length
, GFP_KERNEL
);
1449 isert_err("Unable to allocate text_in of payload_length: %u\n",
1454 cmd
->text_in_ptr
= text_in
;
1456 memcpy(cmd
->text_in_ptr
, &rx_desc
->data
[0], payload_length
);
1458 return iscsit_process_text_cmd(conn
, cmd
, hdr
);
1462 isert_rx_opcode(struct isert_conn
*isert_conn
, struct iser_rx_desc
*rx_desc
,
1463 uint32_t read_stag
, uint64_t read_va
,
1464 uint32_t write_stag
, uint64_t write_va
)
1466 struct iscsi_hdr
*hdr
= &rx_desc
->iscsi_header
;
1467 struct iscsi_conn
*conn
= isert_conn
->conn
;
1468 struct iscsi_cmd
*cmd
;
1469 struct isert_cmd
*isert_cmd
;
1471 u8 opcode
= (hdr
->opcode
& ISCSI_OPCODE_MASK
);
1473 if (conn
->sess
->sess_ops
->SessionType
&&
1474 (!(opcode
& ISCSI_OP_TEXT
) || !(opcode
& ISCSI_OP_LOGOUT
))) {
1475 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1476 " ignoring\n", opcode
);
1481 case ISCSI_OP_SCSI_CMD
:
1482 cmd
= isert_allocate_cmd(conn
);
1486 isert_cmd
= iscsit_priv_cmd(cmd
);
1487 isert_cmd
->read_stag
= read_stag
;
1488 isert_cmd
->read_va
= read_va
;
1489 isert_cmd
->write_stag
= write_stag
;
1490 isert_cmd
->write_va
= write_va
;
1492 ret
= isert_handle_scsi_cmd(isert_conn
, isert_cmd
, cmd
,
1493 rx_desc
, (unsigned char *)hdr
);
1495 case ISCSI_OP_NOOP_OUT
:
1496 cmd
= isert_allocate_cmd(conn
);
1500 isert_cmd
= iscsit_priv_cmd(cmd
);
1501 ret
= isert_handle_nop_out(isert_conn
, isert_cmd
, cmd
,
1502 rx_desc
, (unsigned char *)hdr
);
1504 case ISCSI_OP_SCSI_DATA_OUT
:
1505 ret
= isert_handle_iscsi_dataout(isert_conn
, rx_desc
,
1506 (unsigned char *)hdr
);
1508 case ISCSI_OP_SCSI_TMFUNC
:
1509 cmd
= isert_allocate_cmd(conn
);
1513 ret
= iscsit_handle_task_mgt_cmd(conn
, cmd
,
1514 (unsigned char *)hdr
);
1516 case ISCSI_OP_LOGOUT
:
1517 cmd
= isert_allocate_cmd(conn
);
1521 ret
= iscsit_handle_logout_cmd(conn
, cmd
, (unsigned char *)hdr
);
1524 if (be32_to_cpu(hdr
->ttt
) != 0xFFFFFFFF) {
1525 cmd
= iscsit_find_cmd_from_itt(conn
, hdr
->itt
);
1529 cmd
= isert_allocate_cmd(conn
);
1534 isert_cmd
= iscsit_priv_cmd(cmd
);
1535 ret
= isert_handle_text_cmd(isert_conn
, isert_cmd
, cmd
,
1536 rx_desc
, (struct iscsi_text
*)hdr
);
1539 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode
);
1548 isert_rx_do_work(struct iser_rx_desc
*rx_desc
, struct isert_conn
*isert_conn
)
1550 struct iser_hdr
*iser_hdr
= &rx_desc
->iser_header
;
1551 uint64_t read_va
= 0, write_va
= 0;
1552 uint32_t read_stag
= 0, write_stag
= 0;
1555 switch (iser_hdr
->flags
& 0xF0) {
1557 if (iser_hdr
->flags
& ISER_RSV
) {
1558 read_stag
= be32_to_cpu(iser_hdr
->read_stag
);
1559 read_va
= be64_to_cpu(iser_hdr
->read_va
);
1560 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1561 read_stag
, (unsigned long long)read_va
);
1563 if (iser_hdr
->flags
& ISER_WSV
) {
1564 write_stag
= be32_to_cpu(iser_hdr
->write_stag
);
1565 write_va
= be64_to_cpu(iser_hdr
->write_va
);
1566 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1567 write_stag
, (unsigned long long)write_va
);
1570 isert_dbg("ISER ISCSI_CTRL PDU\n");
1573 isert_err("iSER Hello message\n");
1576 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr
->flags
);
1580 rc
= isert_rx_opcode(isert_conn
, rx_desc
,
1581 read_stag
, read_va
, write_stag
, write_va
);
1585 isert_rcv_completion(struct iser_rx_desc
*desc
,
1586 struct isert_conn
*isert_conn
,
1589 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1590 struct iscsi_hdr
*hdr
;
1592 int rx_buflen
, outstanding
;
1594 if ((char *)desc
== isert_conn
->login_req_buf
) {
1595 rx_dma
= isert_conn
->login_req_dma
;
1596 rx_buflen
= ISER_RX_LOGIN_SIZE
;
1597 isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1600 rx_dma
= desc
->dma_addr
;
1601 rx_buflen
= ISER_RX_PAYLOAD_SIZE
;
1602 isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1606 ib_dma_sync_single_for_cpu(ib_dev
, rx_dma
, rx_buflen
, DMA_FROM_DEVICE
);
1608 hdr
= &desc
->iscsi_header
;
1609 isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1610 hdr
->opcode
, hdr
->itt
, hdr
->flags
,
1611 (int)(xfer_len
- ISER_HEADERS_LEN
));
1613 if ((char *)desc
== isert_conn
->login_req_buf
) {
1614 isert_conn
->login_req_len
= xfer_len
- ISER_HEADERS_LEN
;
1615 if (isert_conn
->conn
) {
1616 struct iscsi_login
*login
= isert_conn
->conn
->conn_login
;
1618 if (login
&& !login
->first_request
)
1619 isert_rx_login_req(isert_conn
);
1621 mutex_lock(&isert_conn
->mutex
);
1622 complete(&isert_conn
->login_req_comp
);
1623 mutex_unlock(&isert_conn
->mutex
);
1625 isert_rx_do_work(desc
, isert_conn
);
1628 ib_dma_sync_single_for_device(ib_dev
, rx_dma
, rx_buflen
,
1631 isert_conn
->post_recv_buf_count
--;
1632 isert_dbg("Decremented post_recv_buf_count: %d\n",
1633 isert_conn
->post_recv_buf_count
);
1635 if ((char *)desc
== isert_conn
->login_req_buf
)
1638 outstanding
= isert_conn
->post_recv_buf_count
;
1639 if (outstanding
+ ISERT_MIN_POSTED_RX
<= ISERT_QP_MAX_RECV_DTOS
) {
1640 int err
, count
= min(ISERT_QP_MAX_RECV_DTOS
- outstanding
,
1641 ISERT_MIN_POSTED_RX
);
1642 err
= isert_post_recv(isert_conn
, count
);
1644 isert_err("isert_post_recv() count: %d failed, %d\n",
1651 isert_map_data_buf(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1652 struct scatterlist
*sg
, u32 nents
, u32 length
, u32 offset
,
1653 enum iser_ib_op_code op
, struct isert_data_buf
*data
)
1655 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1657 data
->dma_dir
= op
== ISER_IB_RDMA_WRITE
?
1658 DMA_TO_DEVICE
: DMA_FROM_DEVICE
;
1660 data
->len
= length
- offset
;
1661 data
->offset
= offset
;
1662 data
->sg_off
= data
->offset
/ PAGE_SIZE
;
1664 data
->sg
= &sg
[data
->sg_off
];
1665 data
->nents
= min_t(unsigned int, nents
- data
->sg_off
,
1666 ISCSI_ISER_SG_TABLESIZE
);
1667 data
->len
= min_t(unsigned int, data
->len
, ISCSI_ISER_SG_TABLESIZE
*
1670 data
->dma_nents
= ib_dma_map_sg(ib_dev
, data
->sg
, data
->nents
,
1672 if (unlikely(!data
->dma_nents
)) {
1673 isert_err("Cmd: unable to dma map SGs %p\n", sg
);
1677 isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1678 isert_cmd
, data
->dma_nents
, data
->sg
, data
->nents
, data
->len
);
1684 isert_unmap_data_buf(struct isert_conn
*isert_conn
, struct isert_data_buf
*data
)
1686 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1688 ib_dma_unmap_sg(ib_dev
, data
->sg
, data
->nents
, data
->dma_dir
);
1689 memset(data
, 0, sizeof(*data
));
1695 isert_unmap_cmd(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
)
1697 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1699 isert_dbg("Cmd %p\n", isert_cmd
);
1702 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd
);
1703 isert_unmap_data_buf(isert_conn
, &wr
->data
);
1707 isert_dbg("Cmd %p free send_wr\n", isert_cmd
);
1713 isert_dbg("Cmd %p free ib_sge\n", isert_cmd
);
1720 isert_unreg_rdma(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
)
1722 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1724 isert_dbg("Cmd %p\n", isert_cmd
);
1727 isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd
, wr
->fr_desc
);
1728 if (wr
->fr_desc
->ind
& ISERT_PROTECTED
) {
1729 isert_unmap_data_buf(isert_conn
, &wr
->prot
);
1730 wr
->fr_desc
->ind
&= ~ISERT_PROTECTED
;
1732 spin_lock_bh(&isert_conn
->pool_lock
);
1733 list_add_tail(&wr
->fr_desc
->list
, &isert_conn
->fr_pool
);
1734 spin_unlock_bh(&isert_conn
->pool_lock
);
1739 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd
);
1740 isert_unmap_data_buf(isert_conn
, &wr
->data
);
1748 isert_put_cmd(struct isert_cmd
*isert_cmd
, bool comp_err
)
1750 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1751 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1752 struct iscsi_conn
*conn
= isert_conn
->conn
;
1753 struct isert_device
*device
= isert_conn
->device
;
1754 struct iscsi_text_rsp
*hdr
;
1756 isert_dbg("Cmd %p\n", isert_cmd
);
1758 switch (cmd
->iscsi_opcode
) {
1759 case ISCSI_OP_SCSI_CMD
:
1760 spin_lock_bh(&conn
->cmd_lock
);
1761 if (!list_empty(&cmd
->i_conn_node
))
1762 list_del_init(&cmd
->i_conn_node
);
1763 spin_unlock_bh(&conn
->cmd_lock
);
1765 if (cmd
->data_direction
== DMA_TO_DEVICE
) {
1766 iscsit_stop_dataout_timer(cmd
);
1768 * Check for special case during comp_err where
1769 * WRITE_PENDING has been handed off from core,
1770 * but requires an extra target_put_sess_cmd()
1771 * before transport_generic_free_cmd() below.
1774 cmd
->se_cmd
.t_state
== TRANSPORT_WRITE_PENDING
) {
1775 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1777 target_put_sess_cmd(se_cmd
);
1781 device
->unreg_rdma_mem(isert_cmd
, isert_conn
);
1782 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1784 case ISCSI_OP_SCSI_TMFUNC
:
1785 spin_lock_bh(&conn
->cmd_lock
);
1786 if (!list_empty(&cmd
->i_conn_node
))
1787 list_del_init(&cmd
->i_conn_node
);
1788 spin_unlock_bh(&conn
->cmd_lock
);
1790 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1792 case ISCSI_OP_REJECT
:
1793 case ISCSI_OP_NOOP_OUT
:
1795 hdr
= (struct iscsi_text_rsp
*)&isert_cmd
->tx_desc
.iscsi_header
;
1796 /* If the continue bit is on, keep the command alive */
1797 if (hdr
->flags
& ISCSI_FLAG_TEXT_CONTINUE
)
1800 spin_lock_bh(&conn
->cmd_lock
);
1801 if (!list_empty(&cmd
->i_conn_node
))
1802 list_del_init(&cmd
->i_conn_node
);
1803 spin_unlock_bh(&conn
->cmd_lock
);
1806 * Handle special case for REJECT when iscsi_add_reject*() has
1807 * overwritten the original iscsi_opcode assignment, and the
1808 * associated cmd->se_cmd needs to be released.
1810 if (cmd
->se_cmd
.se_tfo
!= NULL
) {
1811 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
1813 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1820 iscsit_release_cmd(cmd
);
1826 isert_unmap_tx_desc(struct iser_tx_desc
*tx_desc
, struct ib_device
*ib_dev
)
1828 if (tx_desc
->dma_addr
!= 0) {
1829 isert_dbg("unmap single for tx_desc->dma_addr\n");
1830 ib_dma_unmap_single(ib_dev
, tx_desc
->dma_addr
,
1831 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
1832 tx_desc
->dma_addr
= 0;
1837 isert_completion_put(struct iser_tx_desc
*tx_desc
, struct isert_cmd
*isert_cmd
,
1838 struct ib_device
*ib_dev
, bool comp_err
)
1840 if (isert_cmd
->pdu_buf_dma
!= 0) {
1841 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
1842 ib_dma_unmap_single(ib_dev
, isert_cmd
->pdu_buf_dma
,
1843 isert_cmd
->pdu_buf_len
, DMA_TO_DEVICE
);
1844 isert_cmd
->pdu_buf_dma
= 0;
1847 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1848 isert_put_cmd(isert_cmd
, comp_err
);
1852 isert_check_pi_status(struct se_cmd
*se_cmd
, struct ib_mr
*sig_mr
)
1854 struct ib_mr_status mr_status
;
1857 ret
= ib_check_mr_status(sig_mr
, IB_MR_CHECK_SIG_STATUS
, &mr_status
);
1859 isert_err("ib_check_mr_status failed, ret %d\n", ret
);
1860 goto fail_mr_status
;
1863 if (mr_status
.fail_status
& IB_MR_CHECK_SIG_STATUS
) {
1865 u32 block_size
= se_cmd
->se_dev
->dev_attrib
.block_size
+ 8;
1867 switch (mr_status
.sig_err
.err_type
) {
1868 case IB_SIG_BAD_GUARD
:
1869 se_cmd
->pi_err
= TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED
;
1871 case IB_SIG_BAD_REFTAG
:
1872 se_cmd
->pi_err
= TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED
;
1874 case IB_SIG_BAD_APPTAG
:
1875 se_cmd
->pi_err
= TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED
;
1878 sec_offset_err
= mr_status
.sig_err
.sig_err_offset
;
1879 do_div(sec_offset_err
, block_size
);
1880 se_cmd
->bad_sector
= sec_offset_err
+ se_cmd
->t_task_lba
;
1882 isert_err("PI error found type %d at sector 0x%llx "
1883 "expected 0x%x vs actual 0x%x\n",
1884 mr_status
.sig_err
.err_type
,
1885 (unsigned long long)se_cmd
->bad_sector
,
1886 mr_status
.sig_err
.expected
,
1887 mr_status
.sig_err
.actual
);
1896 isert_completion_rdma_write(struct iser_tx_desc
*tx_desc
,
1897 struct isert_cmd
*isert_cmd
)
1899 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1900 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1901 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1902 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1903 struct isert_device
*device
= isert_conn
->device
;
1906 if (wr
->fr_desc
&& wr
->fr_desc
->ind
& ISERT_PROTECTED
) {
1907 ret
= isert_check_pi_status(se_cmd
,
1908 wr
->fr_desc
->pi_ctx
->sig_mr
);
1909 wr
->fr_desc
->ind
&= ~ISERT_PROTECTED
;
1912 device
->unreg_rdma_mem(isert_cmd
, isert_conn
);
1913 wr
->send_wr_num
= 0;
1915 transport_send_check_condition_and_sense(se_cmd
,
1918 isert_put_response(isert_conn
->conn
, cmd
);
1922 isert_completion_rdma_read(struct iser_tx_desc
*tx_desc
,
1923 struct isert_cmd
*isert_cmd
)
1925 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1926 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1927 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1928 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1929 struct isert_device
*device
= isert_conn
->device
;
1932 if (wr
->fr_desc
&& wr
->fr_desc
->ind
& ISERT_PROTECTED
) {
1933 ret
= isert_check_pi_status(se_cmd
,
1934 wr
->fr_desc
->pi_ctx
->sig_mr
);
1935 wr
->fr_desc
->ind
&= ~ISERT_PROTECTED
;
1938 iscsit_stop_dataout_timer(cmd
);
1939 device
->unreg_rdma_mem(isert_cmd
, isert_conn
);
1940 cmd
->write_data_done
= wr
->data
.len
;
1941 wr
->send_wr_num
= 0;
1943 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd
);
1944 spin_lock_bh(&cmd
->istate_lock
);
1945 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
1946 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
1947 spin_unlock_bh(&cmd
->istate_lock
);
1950 target_put_sess_cmd(se_cmd
);
1951 transport_send_check_condition_and_sense(se_cmd
,
1954 target_execute_cmd(se_cmd
);
1959 isert_do_control_comp(struct work_struct
*work
)
1961 struct isert_cmd
*isert_cmd
= container_of(work
,
1962 struct isert_cmd
, comp_work
);
1963 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1964 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1965 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1967 isert_dbg("Cmd %p i_state %d\n", isert_cmd
, cmd
->i_state
);
1969 switch (cmd
->i_state
) {
1970 case ISTATE_SEND_TASKMGTRSP
:
1971 iscsit_tmr_post_handler(cmd
, cmd
->conn
);
1972 case ISTATE_SEND_REJECT
: /* FALLTHRU */
1973 case ISTATE_SEND_TEXTRSP
: /* FALLTHRU */
1974 cmd
->i_state
= ISTATE_SENT_STATUS
;
1975 isert_completion_put(&isert_cmd
->tx_desc
, isert_cmd
,
1978 case ISTATE_SEND_LOGOUTRSP
:
1979 iscsit_logout_post_handler(cmd
, cmd
->conn
);
1982 isert_err("Unknown i_state %d\n", cmd
->i_state
);
1989 isert_response_completion(struct iser_tx_desc
*tx_desc
,
1990 struct isert_cmd
*isert_cmd
,
1991 struct isert_conn
*isert_conn
,
1992 struct ib_device
*ib_dev
)
1994 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1996 if (cmd
->i_state
== ISTATE_SEND_TASKMGTRSP
||
1997 cmd
->i_state
== ISTATE_SEND_LOGOUTRSP
||
1998 cmd
->i_state
== ISTATE_SEND_REJECT
||
1999 cmd
->i_state
== ISTATE_SEND_TEXTRSP
) {
2000 isert_unmap_tx_desc(tx_desc
, ib_dev
);
2002 INIT_WORK(&isert_cmd
->comp_work
, isert_do_control_comp
);
2003 queue_work(isert_comp_wq
, &isert_cmd
->comp_work
);
2007 cmd
->i_state
= ISTATE_SENT_STATUS
;
2008 isert_completion_put(tx_desc
, isert_cmd
, ib_dev
, false);
2012 isert_snd_completion(struct iser_tx_desc
*tx_desc
,
2013 struct isert_conn
*isert_conn
)
2015 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
2016 struct isert_cmd
*isert_cmd
= tx_desc
->isert_cmd
;
2017 struct isert_rdma_wr
*wr
;
2020 isert_unmap_tx_desc(tx_desc
, ib_dev
);
2023 wr
= &isert_cmd
->rdma_wr
;
2025 isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd
, wr
->iser_ib_op
);
2027 switch (wr
->iser_ib_op
) {
2029 isert_response_completion(tx_desc
, isert_cmd
,
2030 isert_conn
, ib_dev
);
2032 case ISER_IB_RDMA_WRITE
:
2033 isert_completion_rdma_write(tx_desc
, isert_cmd
);
2035 case ISER_IB_RDMA_READ
:
2036 isert_completion_rdma_read(tx_desc
, isert_cmd
);
2039 isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr
->iser_ib_op
);
2046 * is_isert_tx_desc() - Indicate if the completion wr_id
2047 * is a TX descriptor or not.
2048 * @isert_conn: iser connection
2049 * @wr_id: completion WR identifier
2051 * Since we cannot rely on wc opcode in FLUSH errors
2052 * we must work around it by checking if the wr_id address
2053 * falls in the iser connection rx_descs buffer. If so
2054 * it is an RX descriptor, otherwize it is a TX.
2057 is_isert_tx_desc(struct isert_conn
*isert_conn
, void *wr_id
)
2059 void *start
= isert_conn
->rx_descs
;
2060 int len
= ISERT_QP_MAX_RECV_DTOS
* sizeof(*isert_conn
->rx_descs
);
2062 if (wr_id
>= start
&& wr_id
< start
+ len
)
2069 isert_cq_comp_err(struct isert_conn
*isert_conn
, struct ib_wc
*wc
)
2071 if (wc
->wr_id
== ISER_BEACON_WRID
) {
2072 isert_info("conn %p completing wait_comp_err\n",
2074 complete(&isert_conn
->wait_comp_err
);
2075 } else if (is_isert_tx_desc(isert_conn
, (void *)(uintptr_t)wc
->wr_id
)) {
2076 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
2077 struct isert_cmd
*isert_cmd
;
2078 struct iser_tx_desc
*desc
;
2080 desc
= (struct iser_tx_desc
*)(uintptr_t)wc
->wr_id
;
2081 isert_cmd
= desc
->isert_cmd
;
2083 isert_unmap_tx_desc(desc
, ib_dev
);
2085 isert_completion_put(desc
, isert_cmd
, ib_dev
, true);
2087 isert_conn
->post_recv_buf_count
--;
2088 if (!isert_conn
->post_recv_buf_count
)
2089 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
2094 isert_handle_wc(struct ib_wc
*wc
)
2096 struct isert_conn
*isert_conn
;
2097 struct iser_tx_desc
*tx_desc
;
2098 struct iser_rx_desc
*rx_desc
;
2100 isert_conn
= wc
->qp
->qp_context
;
2101 if (likely(wc
->status
== IB_WC_SUCCESS
)) {
2102 if (wc
->opcode
== IB_WC_RECV
) {
2103 rx_desc
= (struct iser_rx_desc
*)(uintptr_t)wc
->wr_id
;
2104 isert_rcv_completion(rx_desc
, isert_conn
, wc
->byte_len
);
2106 tx_desc
= (struct iser_tx_desc
*)(uintptr_t)wc
->wr_id
;
2107 isert_snd_completion(tx_desc
, isert_conn
);
2110 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
2111 isert_err("%s (%d): wr id %llx vend_err %x\n",
2112 ib_wc_status_msg(wc
->status
), wc
->status
,
2113 wc
->wr_id
, wc
->vendor_err
);
2115 isert_dbg("%s (%d): wr id %llx\n",
2116 ib_wc_status_msg(wc
->status
), wc
->status
,
2119 if (wc
->wr_id
!= ISER_FASTREG_LI_WRID
)
2120 isert_cq_comp_err(isert_conn
, wc
);
2125 isert_cq_work(struct work_struct
*work
)
2127 enum { isert_poll_budget
= 65536 };
2128 struct isert_comp
*comp
= container_of(work
, struct isert_comp
,
2130 struct ib_wc
*const wcs
= comp
->wcs
;
2131 int i
, n
, completed
= 0;
2133 while ((n
= ib_poll_cq(comp
->cq
, ARRAY_SIZE(comp
->wcs
), wcs
)) > 0) {
2134 for (i
= 0; i
< n
; i
++)
2135 isert_handle_wc(&wcs
[i
]);
2138 if (completed
>= isert_poll_budget
)
2142 ib_req_notify_cq(comp
->cq
, IB_CQ_NEXT_COMP
);
2146 isert_cq_callback(struct ib_cq
*cq
, void *context
)
2148 struct isert_comp
*comp
= context
;
2150 queue_work(isert_comp_wq
, &comp
->work
);
2154 isert_post_response(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
)
2156 struct ib_send_wr
*wr_failed
;
2159 ret
= ib_post_send(isert_conn
->qp
, &isert_cmd
->tx_desc
.send_wr
,
2162 isert_err("ib_post_send failed with %d\n", ret
);
2169 isert_put_response(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
2171 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2172 struct isert_conn
*isert_conn
= conn
->context
;
2173 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2174 struct iscsi_scsi_rsp
*hdr
= (struct iscsi_scsi_rsp
*)
2175 &isert_cmd
->tx_desc
.iscsi_header
;
2177 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2178 iscsit_build_rsp_pdu(cmd
, conn
, true, hdr
);
2179 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2181 * Attach SENSE DATA payload to iSCSI Response PDU
2183 if (cmd
->se_cmd
.sense_buffer
&&
2184 ((cmd
->se_cmd
.se_cmd_flags
& SCF_TRANSPORT_TASK_SENSE
) ||
2185 (cmd
->se_cmd
.se_cmd_flags
& SCF_EMULATED_TASK_SENSE
))) {
2186 struct isert_device
*device
= isert_conn
->device
;
2187 struct ib_device
*ib_dev
= device
->ib_device
;
2188 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
2189 u32 padding
, pdu_len
;
2191 put_unaligned_be16(cmd
->se_cmd
.scsi_sense_length
,
2193 cmd
->se_cmd
.scsi_sense_length
+= sizeof(__be16
);
2195 padding
= -(cmd
->se_cmd
.scsi_sense_length
) & 3;
2196 hton24(hdr
->dlength
, (u32
)cmd
->se_cmd
.scsi_sense_length
);
2197 pdu_len
= cmd
->se_cmd
.scsi_sense_length
+ padding
;
2199 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
2200 (void *)cmd
->sense_buffer
, pdu_len
,
2203 isert_cmd
->pdu_buf_len
= pdu_len
;
2204 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
2205 tx_dsg
->length
= pdu_len
;
2206 tx_dsg
->lkey
= device
->pd
->local_dma_lkey
;
2207 isert_cmd
->tx_desc
.num_sge
= 2;
2210 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2212 isert_dbg("Posting SCSI Response\n");
2214 return isert_post_response(isert_conn
, isert_cmd
);
2218 isert_aborted_task(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
2220 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2221 struct isert_conn
*isert_conn
= conn
->context
;
2222 struct isert_device
*device
= isert_conn
->device
;
2224 spin_lock_bh(&conn
->cmd_lock
);
2225 if (!list_empty(&cmd
->i_conn_node
))
2226 list_del_init(&cmd
->i_conn_node
);
2227 spin_unlock_bh(&conn
->cmd_lock
);
2229 if (cmd
->data_direction
== DMA_TO_DEVICE
)
2230 iscsit_stop_dataout_timer(cmd
);
2232 device
->unreg_rdma_mem(isert_cmd
, isert_conn
);
2235 static enum target_prot_op
2236 isert_get_sup_prot_ops(struct iscsi_conn
*conn
)
2238 struct isert_conn
*isert_conn
= conn
->context
;
2239 struct isert_device
*device
= isert_conn
->device
;
2241 if (conn
->tpg
->tpg_attrib
.t10_pi
) {
2242 if (device
->pi_capable
) {
2243 isert_info("conn %p PI offload enabled\n", isert_conn
);
2244 isert_conn
->pi_support
= true;
2245 return TARGET_PROT_ALL
;
2249 isert_info("conn %p PI offload disabled\n", isert_conn
);
2250 isert_conn
->pi_support
= false;
2252 return TARGET_PROT_NORMAL
;
2256 isert_put_nopin(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
,
2257 bool nopout_response
)
2259 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2260 struct isert_conn
*isert_conn
= conn
->context
;
2261 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2263 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2264 iscsit_build_nopin_rsp(cmd
, conn
, (struct iscsi_nopin
*)
2265 &isert_cmd
->tx_desc
.iscsi_header
,
2267 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2268 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2270 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn
);
2272 return isert_post_response(isert_conn
, isert_cmd
);
2276 isert_put_logout_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2278 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2279 struct isert_conn
*isert_conn
= conn
->context
;
2280 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2282 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2283 iscsit_build_logout_rsp(cmd
, conn
, (struct iscsi_logout_rsp
*)
2284 &isert_cmd
->tx_desc
.iscsi_header
);
2285 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2286 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2288 isert_dbg("conn %p Posting Logout Response\n", isert_conn
);
2290 return isert_post_response(isert_conn
, isert_cmd
);
2294 isert_put_tm_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2296 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2297 struct isert_conn
*isert_conn
= conn
->context
;
2298 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2300 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2301 iscsit_build_task_mgt_rsp(cmd
, conn
, (struct iscsi_tm_rsp
*)
2302 &isert_cmd
->tx_desc
.iscsi_header
);
2303 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2304 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2306 isert_dbg("conn %p Posting Task Management Response\n", isert_conn
);
2308 return isert_post_response(isert_conn
, isert_cmd
);
2312 isert_put_reject(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2314 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2315 struct isert_conn
*isert_conn
= conn
->context
;
2316 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2317 struct isert_device
*device
= isert_conn
->device
;
2318 struct ib_device
*ib_dev
= device
->ib_device
;
2319 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
2320 struct iscsi_reject
*hdr
=
2321 (struct iscsi_reject
*)&isert_cmd
->tx_desc
.iscsi_header
;
2323 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2324 iscsit_build_reject(cmd
, conn
, hdr
);
2325 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2327 hton24(hdr
->dlength
, ISCSI_HDR_LEN
);
2328 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
2329 (void *)cmd
->buf_ptr
, ISCSI_HDR_LEN
,
2331 isert_cmd
->pdu_buf_len
= ISCSI_HDR_LEN
;
2332 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
2333 tx_dsg
->length
= ISCSI_HDR_LEN
;
2334 tx_dsg
->lkey
= device
->pd
->local_dma_lkey
;
2335 isert_cmd
->tx_desc
.num_sge
= 2;
2337 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2339 isert_dbg("conn %p Posting Reject\n", isert_conn
);
2341 return isert_post_response(isert_conn
, isert_cmd
);
2345 isert_put_text_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2347 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2348 struct isert_conn
*isert_conn
= conn
->context
;
2349 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2350 struct iscsi_text_rsp
*hdr
=
2351 (struct iscsi_text_rsp
*)&isert_cmd
->tx_desc
.iscsi_header
;
2355 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2356 rc
= iscsit_build_text_rsp(cmd
, conn
, hdr
, ISCSI_INFINIBAND
);
2361 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2364 struct isert_device
*device
= isert_conn
->device
;
2365 struct ib_device
*ib_dev
= device
->ib_device
;
2366 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
2367 void *txt_rsp_buf
= cmd
->buf_ptr
;
2369 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
2370 txt_rsp_buf
, txt_rsp_len
, DMA_TO_DEVICE
);
2372 isert_cmd
->pdu_buf_len
= txt_rsp_len
;
2373 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
2374 tx_dsg
->length
= txt_rsp_len
;
2375 tx_dsg
->lkey
= device
->pd
->local_dma_lkey
;
2376 isert_cmd
->tx_desc
.num_sge
= 2;
2378 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2380 isert_dbg("conn %p Text Response\n", isert_conn
);
2382 return isert_post_response(isert_conn
, isert_cmd
);
2386 isert_build_rdma_wr(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
2387 struct ib_sge
*ib_sge
, struct ib_send_wr
*send_wr
,
2388 u32 data_left
, u32 offset
)
2390 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
2391 struct scatterlist
*sg_start
, *tmp_sg
;
2392 struct isert_device
*device
= isert_conn
->device
;
2393 struct ib_device
*ib_dev
= device
->ib_device
;
2394 u32 sg_off
, page_off
;
2395 int i
= 0, sg_nents
;
2397 sg_off
= offset
/ PAGE_SIZE
;
2398 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
2399 sg_nents
= min(cmd
->se_cmd
.t_data_nents
- sg_off
, isert_conn
->max_sge
);
2400 page_off
= offset
% PAGE_SIZE
;
2402 send_wr
->sg_list
= ib_sge
;
2403 send_wr
->wr_id
= (uintptr_t)&isert_cmd
->tx_desc
;
2405 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2407 for_each_sg(sg_start
, tmp_sg
, sg_nents
, i
) {
2408 isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, "
2410 (unsigned long long)tmp_sg
->dma_address
,
2411 tmp_sg
->length
, page_off
);
2413 ib_sge
->addr
= ib_sg_dma_address(ib_dev
, tmp_sg
) + page_off
;
2414 ib_sge
->length
= min_t(u32
, data_left
,
2415 ib_sg_dma_len(ib_dev
, tmp_sg
) - page_off
);
2416 ib_sge
->lkey
= device
->pd
->local_dma_lkey
;
2418 isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
2419 ib_sge
->addr
, ib_sge
->length
, ib_sge
->lkey
);
2421 data_left
-= ib_sge
->length
;
2425 isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge
);
2428 send_wr
->num_sge
= ++i
;
2429 isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2430 send_wr
->sg_list
, send_wr
->num_sge
);
2432 return send_wr
->num_sge
;
2436 isert_map_rdma(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
2437 struct isert_rdma_wr
*wr
)
2439 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2440 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2441 struct isert_conn
*isert_conn
= conn
->context
;
2442 struct isert_data_buf
*data
= &wr
->data
;
2443 struct ib_send_wr
*send_wr
;
2444 struct ib_sge
*ib_sge
;
2445 u32 offset
, data_len
, data_left
, rdma_write_max
, va_offset
= 0;
2446 int ret
= 0, i
, ib_sge_cnt
;
2448 isert_cmd
->tx_desc
.isert_cmd
= isert_cmd
;
2450 offset
= wr
->iser_ib_op
== ISER_IB_RDMA_READ
? cmd
->write_data_done
: 0;
2451 ret
= isert_map_data_buf(isert_conn
, isert_cmd
, se_cmd
->t_data_sg
,
2452 se_cmd
->t_data_nents
, se_cmd
->data_length
,
2453 offset
, wr
->iser_ib_op
, &wr
->data
);
2457 data_left
= data
->len
;
2458 offset
= data
->offset
;
2460 ib_sge
= kzalloc(sizeof(struct ib_sge
) * data
->nents
, GFP_KERNEL
);
2462 isert_warn("Unable to allocate ib_sge\n");
2466 wr
->ib_sge
= ib_sge
;
2468 wr
->send_wr_num
= DIV_ROUND_UP(data
->nents
, isert_conn
->max_sge
);
2469 wr
->send_wr
= kzalloc(sizeof(struct ib_send_wr
) * wr
->send_wr_num
,
2472 isert_dbg("Unable to allocate wr->send_wr\n");
2477 wr
->isert_cmd
= isert_cmd
;
2478 rdma_write_max
= isert_conn
->max_sge
* PAGE_SIZE
;
2480 for (i
= 0; i
< wr
->send_wr_num
; i
++) {
2481 send_wr
= &isert_cmd
->rdma_wr
.send_wr
[i
];
2482 data_len
= min(data_left
, rdma_write_max
);
2484 send_wr
->send_flags
= 0;
2485 if (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) {
2486 send_wr
->opcode
= IB_WR_RDMA_WRITE
;
2487 send_wr
->wr
.rdma
.remote_addr
= isert_cmd
->read_va
+ offset
;
2488 send_wr
->wr
.rdma
.rkey
= isert_cmd
->read_stag
;
2489 if (i
+ 1 == wr
->send_wr_num
)
2490 send_wr
->next
= &isert_cmd
->tx_desc
.send_wr
;
2492 send_wr
->next
= &wr
->send_wr
[i
+ 1];
2494 send_wr
->opcode
= IB_WR_RDMA_READ
;
2495 send_wr
->wr
.rdma
.remote_addr
= isert_cmd
->write_va
+ va_offset
;
2496 send_wr
->wr
.rdma
.rkey
= isert_cmd
->write_stag
;
2497 if (i
+ 1 == wr
->send_wr_num
)
2498 send_wr
->send_flags
= IB_SEND_SIGNALED
;
2500 send_wr
->next
= &wr
->send_wr
[i
+ 1];
2503 ib_sge_cnt
= isert_build_rdma_wr(isert_conn
, isert_cmd
, ib_sge
,
2504 send_wr
, data_len
, offset
);
2505 ib_sge
+= ib_sge_cnt
;
2508 va_offset
+= data_len
;
2509 data_left
-= data_len
;
2514 isert_unmap_data_buf(isert_conn
, data
);
2520 isert_map_fr_pagelist(struct ib_device
*ib_dev
,
2521 struct scatterlist
*sg_start
, int sg_nents
, u64
*fr_pl
)
2523 u64 start_addr
, end_addr
, page
, chunk_start
= 0;
2524 struct scatterlist
*tmp_sg
;
2525 int i
= 0, new_chunk
, last_ent
, n_pages
;
2529 last_ent
= sg_nents
- 1;
2530 for_each_sg(sg_start
, tmp_sg
, sg_nents
, i
) {
2531 start_addr
= ib_sg_dma_address(ib_dev
, tmp_sg
);
2533 chunk_start
= start_addr
;
2534 end_addr
= start_addr
+ ib_sg_dma_len(ib_dev
, tmp_sg
);
2536 isert_dbg("SGL[%d] dma_addr: 0x%llx len: %u\n",
2537 i
, (unsigned long long)tmp_sg
->dma_address
,
2540 if ((end_addr
& ~PAGE_MASK
) && i
< last_ent
) {
2546 page
= chunk_start
& PAGE_MASK
;
2548 fr_pl
[n_pages
++] = page
;
2549 isert_dbg("Mapped page_list[%d] page_addr: 0x%llx\n",
2552 } while (page
< end_addr
);
2559 isert_inv_rkey(struct ib_send_wr
*inv_wr
, struct ib_mr
*mr
)
2563 memset(inv_wr
, 0, sizeof(*inv_wr
));
2564 inv_wr
->wr_id
= ISER_FASTREG_LI_WRID
;
2565 inv_wr
->opcode
= IB_WR_LOCAL_INV
;
2566 inv_wr
->ex
.invalidate_rkey
= mr
->rkey
;
2569 rkey
= ib_inc_rkey(mr
->rkey
);
2570 ib_update_fast_reg_key(mr
, rkey
);
2574 isert_fast_reg_mr(struct isert_conn
*isert_conn
,
2575 struct fast_reg_descriptor
*fr_desc
,
2576 struct isert_data_buf
*mem
,
2577 enum isert_indicator ind
,
2580 struct isert_device
*device
= isert_conn
->device
;
2581 struct ib_device
*ib_dev
= device
->ib_device
;
2583 struct ib_fast_reg_page_list
*frpl
;
2584 struct ib_send_wr fr_wr
, inv_wr
;
2585 struct ib_send_wr
*bad_wr
, *wr
= NULL
;
2586 int ret
, pagelist_len
;
2589 if (mem
->dma_nents
== 1) {
2590 sge
->lkey
= device
->pd
->local_dma_lkey
;
2591 sge
->addr
= ib_sg_dma_address(ib_dev
, &mem
->sg
[0]);
2592 sge
->length
= ib_sg_dma_len(ib_dev
, &mem
->sg
[0]);
2593 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2594 sge
->addr
, sge
->length
, sge
->lkey
);
2598 if (ind
== ISERT_DATA_KEY_VALID
) {
2599 /* Registering data buffer */
2600 mr
= fr_desc
->data_mr
;
2601 frpl
= fr_desc
->data_frpl
;
2603 /* Registering protection buffer */
2604 mr
= fr_desc
->pi_ctx
->prot_mr
;
2605 frpl
= fr_desc
->pi_ctx
->prot_frpl
;
2608 page_off
= mem
->offset
% PAGE_SIZE
;
2610 isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
2611 fr_desc
, mem
->nents
, mem
->offset
);
2613 pagelist_len
= isert_map_fr_pagelist(ib_dev
, mem
->sg
, mem
->nents
,
2614 &frpl
->page_list
[0]);
2616 if (!(fr_desc
->ind
& ind
)) {
2617 isert_inv_rkey(&inv_wr
, mr
);
2621 /* Prepare FASTREG WR */
2622 memset(&fr_wr
, 0, sizeof(fr_wr
));
2623 fr_wr
.wr_id
= ISER_FASTREG_LI_WRID
;
2624 fr_wr
.opcode
= IB_WR_FAST_REG_MR
;
2625 fr_wr
.wr
.fast_reg
.iova_start
= frpl
->page_list
[0] + page_off
;
2626 fr_wr
.wr
.fast_reg
.page_list
= frpl
;
2627 fr_wr
.wr
.fast_reg
.page_list_len
= pagelist_len
;
2628 fr_wr
.wr
.fast_reg
.page_shift
= PAGE_SHIFT
;
2629 fr_wr
.wr
.fast_reg
.length
= mem
->len
;
2630 fr_wr
.wr
.fast_reg
.rkey
= mr
->rkey
;
2631 fr_wr
.wr
.fast_reg
.access_flags
= IB_ACCESS_LOCAL_WRITE
;
2638 ret
= ib_post_send(isert_conn
->qp
, wr
, &bad_wr
);
2640 isert_err("fast registration failed, ret:%d\n", ret
);
2643 fr_desc
->ind
&= ~ind
;
2645 sge
->lkey
= mr
->lkey
;
2646 sge
->addr
= frpl
->page_list
[0] + page_off
;
2647 sge
->length
= mem
->len
;
2649 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2650 sge
->addr
, sge
->length
, sge
->lkey
);
2656 isert_set_dif_domain(struct se_cmd
*se_cmd
, struct ib_sig_attrs
*sig_attrs
,
2657 struct ib_sig_domain
*domain
)
2659 domain
->sig_type
= IB_SIG_TYPE_T10_DIF
;
2660 domain
->sig
.dif
.bg_type
= IB_T10DIF_CRC
;
2661 domain
->sig
.dif
.pi_interval
= se_cmd
->se_dev
->dev_attrib
.block_size
;
2662 domain
->sig
.dif
.ref_tag
= se_cmd
->reftag_seed
;
2664 * At the moment we hard code those, but if in the future
2665 * the target core would like to use it, we will take it
2668 domain
->sig
.dif
.apptag_check_mask
= 0xffff;
2669 domain
->sig
.dif
.app_escape
= true;
2670 domain
->sig
.dif
.ref_escape
= true;
2671 if (se_cmd
->prot_type
== TARGET_DIF_TYPE1_PROT
||
2672 se_cmd
->prot_type
== TARGET_DIF_TYPE2_PROT
)
2673 domain
->sig
.dif
.ref_remap
= true;
2677 isert_set_sig_attrs(struct se_cmd
*se_cmd
, struct ib_sig_attrs
*sig_attrs
)
2679 switch (se_cmd
->prot_op
) {
2680 case TARGET_PROT_DIN_INSERT
:
2681 case TARGET_PROT_DOUT_STRIP
:
2682 sig_attrs
->mem
.sig_type
= IB_SIG_TYPE_NONE
;
2683 isert_set_dif_domain(se_cmd
, sig_attrs
, &sig_attrs
->wire
);
2685 case TARGET_PROT_DOUT_INSERT
:
2686 case TARGET_PROT_DIN_STRIP
:
2687 sig_attrs
->wire
.sig_type
= IB_SIG_TYPE_NONE
;
2688 isert_set_dif_domain(se_cmd
, sig_attrs
, &sig_attrs
->mem
);
2690 case TARGET_PROT_DIN_PASS
:
2691 case TARGET_PROT_DOUT_PASS
:
2692 isert_set_dif_domain(se_cmd
, sig_attrs
, &sig_attrs
->wire
);
2693 isert_set_dif_domain(se_cmd
, sig_attrs
, &sig_attrs
->mem
);
2696 isert_err("Unsupported PI operation %d\n", se_cmd
->prot_op
);
2704 isert_set_prot_checks(u8 prot_checks
)
2706 return (prot_checks
& TARGET_DIF_CHECK_GUARD
? 0xc0 : 0) |
2707 (prot_checks
& TARGET_DIF_CHECK_REFTAG
? 0x30 : 0) |
2708 (prot_checks
& TARGET_DIF_CHECK_REFTAG
? 0x0f : 0);
2712 isert_reg_sig_mr(struct isert_conn
*isert_conn
,
2713 struct se_cmd
*se_cmd
,
2714 struct isert_rdma_wr
*rdma_wr
,
2715 struct fast_reg_descriptor
*fr_desc
)
2717 struct ib_send_wr sig_wr
, inv_wr
;
2718 struct ib_send_wr
*bad_wr
, *wr
= NULL
;
2719 struct pi_context
*pi_ctx
= fr_desc
->pi_ctx
;
2720 struct ib_sig_attrs sig_attrs
;
2723 memset(&sig_attrs
, 0, sizeof(sig_attrs
));
2724 ret
= isert_set_sig_attrs(se_cmd
, &sig_attrs
);
2728 sig_attrs
.check_mask
= isert_set_prot_checks(se_cmd
->prot_checks
);
2730 if (!(fr_desc
->ind
& ISERT_SIG_KEY_VALID
)) {
2731 isert_inv_rkey(&inv_wr
, pi_ctx
->sig_mr
);
2735 memset(&sig_wr
, 0, sizeof(sig_wr
));
2736 sig_wr
.opcode
= IB_WR_REG_SIG_MR
;
2737 sig_wr
.wr_id
= ISER_FASTREG_LI_WRID
;
2738 sig_wr
.sg_list
= &rdma_wr
->ib_sg
[DATA
];
2740 sig_wr
.wr
.sig_handover
.access_flags
= IB_ACCESS_LOCAL_WRITE
;
2741 sig_wr
.wr
.sig_handover
.sig_attrs
= &sig_attrs
;
2742 sig_wr
.wr
.sig_handover
.sig_mr
= pi_ctx
->sig_mr
;
2743 if (se_cmd
->t_prot_sg
)
2744 sig_wr
.wr
.sig_handover
.prot
= &rdma_wr
->ib_sg
[PROT
];
2751 ret
= ib_post_send(isert_conn
->qp
, wr
, &bad_wr
);
2753 isert_err("fast registration failed, ret:%d\n", ret
);
2756 fr_desc
->ind
&= ~ISERT_SIG_KEY_VALID
;
2758 rdma_wr
->ib_sg
[SIG
].lkey
= pi_ctx
->sig_mr
->lkey
;
2759 rdma_wr
->ib_sg
[SIG
].addr
= 0;
2760 rdma_wr
->ib_sg
[SIG
].length
= se_cmd
->data_length
;
2761 if (se_cmd
->prot_op
!= TARGET_PROT_DIN_STRIP
&&
2762 se_cmd
->prot_op
!= TARGET_PROT_DOUT_INSERT
)
2764 * We have protection guards on the wire
2765 * so we need to set a larget transfer
2767 rdma_wr
->ib_sg
[SIG
].length
+= se_cmd
->prot_length
;
2769 isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
2770 rdma_wr
->ib_sg
[SIG
].addr
, rdma_wr
->ib_sg
[SIG
].length
,
2771 rdma_wr
->ib_sg
[SIG
].lkey
);
2777 isert_handle_prot_cmd(struct isert_conn
*isert_conn
,
2778 struct isert_cmd
*isert_cmd
,
2779 struct isert_rdma_wr
*wr
)
2781 struct isert_device
*device
= isert_conn
->device
;
2782 struct se_cmd
*se_cmd
= &isert_cmd
->iscsi_cmd
->se_cmd
;
2785 if (!wr
->fr_desc
->pi_ctx
) {
2786 ret
= isert_create_pi_ctx(wr
->fr_desc
,
2790 isert_err("conn %p failed to allocate pi_ctx\n",
2796 if (se_cmd
->t_prot_sg
) {
2797 ret
= isert_map_data_buf(isert_conn
, isert_cmd
,
2799 se_cmd
->t_prot_nents
,
2800 se_cmd
->prot_length
,
2801 0, wr
->iser_ib_op
, &wr
->prot
);
2803 isert_err("conn %p failed to map protection buffer\n",
2808 memset(&wr
->ib_sg
[PROT
], 0, sizeof(wr
->ib_sg
[PROT
]));
2809 ret
= isert_fast_reg_mr(isert_conn
, wr
->fr_desc
, &wr
->prot
,
2810 ISERT_PROT_KEY_VALID
, &wr
->ib_sg
[PROT
]);
2812 isert_err("conn %p failed to fast reg mr\n",
2814 goto unmap_prot_cmd
;
2818 ret
= isert_reg_sig_mr(isert_conn
, se_cmd
, wr
, wr
->fr_desc
);
2820 isert_err("conn %p failed to fast reg mr\n",
2822 goto unmap_prot_cmd
;
2824 wr
->fr_desc
->ind
|= ISERT_PROTECTED
;
2829 if (se_cmd
->t_prot_sg
)
2830 isert_unmap_data_buf(isert_conn
, &wr
->prot
);
2836 isert_reg_rdma(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
2837 struct isert_rdma_wr
*wr
)
2839 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2840 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2841 struct isert_conn
*isert_conn
= conn
->context
;
2842 struct fast_reg_descriptor
*fr_desc
= NULL
;
2843 struct ib_send_wr
*send_wr
;
2844 struct ib_sge
*ib_sg
;
2847 unsigned long flags
;
2849 isert_cmd
->tx_desc
.isert_cmd
= isert_cmd
;
2851 offset
= wr
->iser_ib_op
== ISER_IB_RDMA_READ
? cmd
->write_data_done
: 0;
2852 ret
= isert_map_data_buf(isert_conn
, isert_cmd
, se_cmd
->t_data_sg
,
2853 se_cmd
->t_data_nents
, se_cmd
->data_length
,
2854 offset
, wr
->iser_ib_op
, &wr
->data
);
2858 if (wr
->data
.dma_nents
!= 1 || isert_prot_cmd(isert_conn
, se_cmd
)) {
2859 spin_lock_irqsave(&isert_conn
->pool_lock
, flags
);
2860 fr_desc
= list_first_entry(&isert_conn
->fr_pool
,
2861 struct fast_reg_descriptor
, list
);
2862 list_del(&fr_desc
->list
);
2863 spin_unlock_irqrestore(&isert_conn
->pool_lock
, flags
);
2864 wr
->fr_desc
= fr_desc
;
2867 ret
= isert_fast_reg_mr(isert_conn
, fr_desc
, &wr
->data
,
2868 ISERT_DATA_KEY_VALID
, &wr
->ib_sg
[DATA
]);
2872 if (isert_prot_cmd(isert_conn
, se_cmd
)) {
2873 ret
= isert_handle_prot_cmd(isert_conn
, isert_cmd
, wr
);
2877 ib_sg
= &wr
->ib_sg
[SIG
];
2879 ib_sg
= &wr
->ib_sg
[DATA
];
2882 memcpy(&wr
->s_ib_sge
, ib_sg
, sizeof(*ib_sg
));
2883 wr
->ib_sge
= &wr
->s_ib_sge
;
2884 wr
->send_wr_num
= 1;
2885 memset(&wr
->s_send_wr
, 0, sizeof(*send_wr
));
2886 wr
->send_wr
= &wr
->s_send_wr
;
2887 wr
->isert_cmd
= isert_cmd
;
2889 send_wr
= &isert_cmd
->rdma_wr
.s_send_wr
;
2890 send_wr
->sg_list
= &wr
->s_ib_sge
;
2891 send_wr
->num_sge
= 1;
2892 send_wr
->wr_id
= (uintptr_t)&isert_cmd
->tx_desc
;
2893 if (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) {
2894 send_wr
->opcode
= IB_WR_RDMA_WRITE
;
2895 send_wr
->wr
.rdma
.remote_addr
= isert_cmd
->read_va
;
2896 send_wr
->wr
.rdma
.rkey
= isert_cmd
->read_stag
;
2897 send_wr
->send_flags
= !isert_prot_cmd(isert_conn
, se_cmd
) ?
2898 0 : IB_SEND_SIGNALED
;
2900 send_wr
->opcode
= IB_WR_RDMA_READ
;
2901 send_wr
->wr
.rdma
.remote_addr
= isert_cmd
->write_va
;
2902 send_wr
->wr
.rdma
.rkey
= isert_cmd
->write_stag
;
2903 send_wr
->send_flags
= IB_SEND_SIGNALED
;
2910 spin_lock_irqsave(&isert_conn
->pool_lock
, flags
);
2911 list_add_tail(&fr_desc
->list
, &isert_conn
->fr_pool
);
2912 spin_unlock_irqrestore(&isert_conn
->pool_lock
, flags
);
2914 isert_unmap_data_buf(isert_conn
, &wr
->data
);
2920 isert_put_datain(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
2922 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2923 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2924 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
2925 struct isert_conn
*isert_conn
= conn
->context
;
2926 struct isert_device
*device
= isert_conn
->device
;
2927 struct ib_send_wr
*wr_failed
;
2930 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2931 isert_cmd
, se_cmd
->data_length
);
2933 wr
->iser_ib_op
= ISER_IB_RDMA_WRITE
;
2934 rc
= device
->reg_rdma_mem(conn
, cmd
, wr
);
2936 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd
);
2940 if (!isert_prot_cmd(isert_conn
, se_cmd
)) {
2942 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2944 isert_create_send_desc(isert_conn
, isert_cmd
,
2945 &isert_cmd
->tx_desc
);
2946 iscsit_build_rsp_pdu(cmd
, conn
, true, (struct iscsi_scsi_rsp
*)
2947 &isert_cmd
->tx_desc
.iscsi_header
);
2948 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2949 isert_init_send_wr(isert_conn
, isert_cmd
,
2950 &isert_cmd
->tx_desc
.send_wr
);
2951 isert_cmd
->rdma_wr
.s_send_wr
.next
= &isert_cmd
->tx_desc
.send_wr
;
2952 wr
->send_wr_num
+= 1;
2955 rc
= ib_post_send(isert_conn
->qp
, wr
->send_wr
, &wr_failed
);
2957 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2959 if (!isert_prot_cmd(isert_conn
, se_cmd
))
2960 isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
2961 "READ\n", isert_cmd
);
2963 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
2970 isert_get_dataout(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, bool recovery
)
2972 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2973 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2974 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
2975 struct isert_conn
*isert_conn
= conn
->context
;
2976 struct isert_device
*device
= isert_conn
->device
;
2977 struct ib_send_wr
*wr_failed
;
2980 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2981 isert_cmd
, se_cmd
->data_length
, cmd
->write_data_done
);
2982 wr
->iser_ib_op
= ISER_IB_RDMA_READ
;
2983 rc
= device
->reg_rdma_mem(conn
, cmd
, wr
);
2985 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd
);
2989 rc
= ib_post_send(isert_conn
->qp
, wr
->send_wr
, &wr_failed
);
2991 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2993 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
3000 isert_immediate_queue(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, int state
)
3005 case ISTATE_SEND_NOPIN_WANT_RESPONSE
:
3006 ret
= isert_put_nopin(cmd
, conn
, false);
3009 isert_err("Unknown immediate state: 0x%02x\n", state
);
3018 isert_response_queue(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, int state
)
3020 struct isert_conn
*isert_conn
= conn
->context
;
3024 case ISTATE_SEND_LOGOUTRSP
:
3025 ret
= isert_put_logout_rsp(cmd
, conn
);
3027 isert_conn
->logout_posted
= true;
3029 case ISTATE_SEND_NOPIN
:
3030 ret
= isert_put_nopin(cmd
, conn
, true);
3032 case ISTATE_SEND_TASKMGTRSP
:
3033 ret
= isert_put_tm_rsp(cmd
, conn
);
3035 case ISTATE_SEND_REJECT
:
3036 ret
= isert_put_reject(cmd
, conn
);
3038 case ISTATE_SEND_TEXTRSP
:
3039 ret
= isert_put_text_rsp(cmd
, conn
);
3041 case ISTATE_SEND_STATUS
:
3043 * Special case for sending non GOOD SCSI status from TX thread
3044 * context during pre se_cmd excecution failure.
3046 ret
= isert_put_response(conn
, cmd
);
3049 isert_err("Unknown response state: 0x%02x\n", state
);
3058 isert_setup_id(struct isert_np
*isert_np
)
3060 struct iscsi_np
*np
= isert_np
->np
;
3061 struct rdma_cm_id
*id
;
3062 struct sockaddr
*sa
;
3065 sa
= (struct sockaddr
*)&np
->np_sockaddr
;
3066 isert_dbg("ksockaddr: %p, sa: %p\n", &np
->np_sockaddr
, sa
);
3068 id
= rdma_create_id(isert_cma_handler
, isert_np
,
3069 RDMA_PS_TCP
, IB_QPT_RC
);
3071 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id
));
3075 isert_dbg("id %p context %p\n", id
, id
->context
);
3077 ret
= rdma_bind_addr(id
, sa
);
3079 isert_err("rdma_bind_addr() failed: %d\n", ret
);
3083 ret
= rdma_listen(id
, 0);
3085 isert_err("rdma_listen() failed: %d\n", ret
);
3091 rdma_destroy_id(id
);
3093 return ERR_PTR(ret
);
3097 isert_setup_np(struct iscsi_np
*np
,
3098 struct sockaddr_storage
*ksockaddr
)
3100 struct isert_np
*isert_np
;
3101 struct rdma_cm_id
*isert_lid
;
3104 isert_np
= kzalloc(sizeof(struct isert_np
), GFP_KERNEL
);
3106 isert_err("Unable to allocate struct isert_np\n");
3109 sema_init(&isert_np
->np_sem
, 0);
3110 mutex_init(&isert_np
->np_accept_mutex
);
3111 INIT_LIST_HEAD(&isert_np
->np_accept_list
);
3112 init_completion(&isert_np
->np_login_comp
);
3116 * Setup the np->np_sockaddr from the passed sockaddr setup
3117 * in iscsi_target_configfs.c code..
3119 memcpy(&np
->np_sockaddr
, ksockaddr
,
3120 sizeof(struct sockaddr_storage
));
3122 isert_lid
= isert_setup_id(isert_np
);
3123 if (IS_ERR(isert_lid
)) {
3124 ret
= PTR_ERR(isert_lid
);
3128 isert_np
->np_cm_id
= isert_lid
;
3129 np
->np_context
= isert_np
;
3140 isert_rdma_accept(struct isert_conn
*isert_conn
)
3142 struct rdma_cm_id
*cm_id
= isert_conn
->cm_id
;
3143 struct rdma_conn_param cp
;
3146 memset(&cp
, 0, sizeof(struct rdma_conn_param
));
3147 cp
.initiator_depth
= isert_conn
->initiator_depth
;
3149 cp
.rnr_retry_count
= 7;
3151 ret
= rdma_accept(cm_id
, &cp
);
3153 isert_err("rdma_accept() failed with: %d\n", ret
);
3161 isert_get_login_rx(struct iscsi_conn
*conn
, struct iscsi_login
*login
)
3163 struct isert_conn
*isert_conn
= conn
->context
;
3166 isert_info("before login_req comp conn: %p\n", isert_conn
);
3167 ret
= wait_for_completion_interruptible(&isert_conn
->login_req_comp
);
3169 isert_err("isert_conn %p interrupted before got login req\n",
3173 reinit_completion(&isert_conn
->login_req_comp
);
3176 * For login requests after the first PDU, isert_rx_login_req() will
3177 * kick schedule_delayed_work(&conn->login_work) as the packet is
3178 * received, which turns this callback from iscsi_target_do_login_rx()
3181 if (!login
->first_request
)
3184 isert_rx_login_req(isert_conn
);
3186 isert_info("before login_comp conn: %p\n", conn
);
3187 ret
= wait_for_completion_interruptible(&isert_conn
->login_comp
);
3191 isert_info("processing login->req: %p\n", login
->req
);
3197 isert_set_conn_info(struct iscsi_np
*np
, struct iscsi_conn
*conn
,
3198 struct isert_conn
*isert_conn
)
3200 struct rdma_cm_id
*cm_id
= isert_conn
->cm_id
;
3201 struct rdma_route
*cm_route
= &cm_id
->route
;
3203 conn
->login_family
= np
->np_sockaddr
.ss_family
;
3205 conn
->login_sockaddr
= cm_route
->addr
.dst_addr
;
3206 conn
->local_sockaddr
= cm_route
->addr
.src_addr
;
3210 isert_accept_np(struct iscsi_np
*np
, struct iscsi_conn
*conn
)
3212 struct isert_np
*isert_np
= np
->np_context
;
3213 struct isert_conn
*isert_conn
;
3217 ret
= down_interruptible(&isert_np
->np_sem
);
3221 spin_lock_bh(&np
->np_thread_lock
);
3222 if (np
->np_thread_state
>= ISCSI_NP_THREAD_RESET
) {
3223 spin_unlock_bh(&np
->np_thread_lock
);
3224 isert_dbg("np_thread_state %d\n",
3225 np
->np_thread_state
);
3227 * No point in stalling here when np_thread
3228 * is in state RESET/SHUTDOWN/EXIT - bail
3232 spin_unlock_bh(&np
->np_thread_lock
);
3234 mutex_lock(&isert_np
->np_accept_mutex
);
3235 if (list_empty(&isert_np
->np_accept_list
)) {
3236 mutex_unlock(&isert_np
->np_accept_mutex
);
3239 isert_conn
= list_first_entry(&isert_np
->np_accept_list
,
3240 struct isert_conn
, accept_node
);
3241 list_del_init(&isert_conn
->accept_node
);
3242 mutex_unlock(&isert_np
->np_accept_mutex
);
3244 conn
->context
= isert_conn
;
3245 isert_conn
->conn
= conn
;
3247 isert_set_conn_info(np
, conn
, isert_conn
);
3249 isert_dbg("Processing isert_conn: %p\n", isert_conn
);
3255 isert_free_np(struct iscsi_np
*np
)
3257 struct isert_np
*isert_np
= np
->np_context
;
3258 struct isert_conn
*isert_conn
, *n
;
3260 if (isert_np
->np_cm_id
)
3261 rdma_destroy_id(isert_np
->np_cm_id
);
3264 * FIXME: At this point we don't have a good way to insure
3265 * that at this point we don't have hanging connections that
3266 * completed RDMA establishment but didn't start iscsi login
3267 * process. So work-around this by cleaning up what ever piled
3268 * up in np_accept_list.
3270 mutex_lock(&isert_np
->np_accept_mutex
);
3271 if (!list_empty(&isert_np
->np_accept_list
)) {
3272 isert_info("Still have isert connections, cleaning up...\n");
3273 list_for_each_entry_safe(isert_conn
, n
,
3274 &isert_np
->np_accept_list
,
3276 isert_info("cleaning isert_conn %p state (%d)\n",
3277 isert_conn
, isert_conn
->state
);
3278 isert_connect_release(isert_conn
);
3281 mutex_unlock(&isert_np
->np_accept_mutex
);
3283 np
->np_context
= NULL
;
3287 static void isert_release_work(struct work_struct
*work
)
3289 struct isert_conn
*isert_conn
= container_of(work
,
3293 isert_info("Starting release conn %p\n", isert_conn
);
3295 wait_for_completion(&isert_conn
->wait
);
3297 mutex_lock(&isert_conn
->mutex
);
3298 isert_conn
->state
= ISER_CONN_DOWN
;
3299 mutex_unlock(&isert_conn
->mutex
);
3301 isert_info("Destroying conn %p\n", isert_conn
);
3302 isert_put_conn(isert_conn
);
3306 isert_wait4logout(struct isert_conn
*isert_conn
)
3308 struct iscsi_conn
*conn
= isert_conn
->conn
;
3310 isert_info("conn %p\n", isert_conn
);
3312 if (isert_conn
->logout_posted
) {
3313 isert_info("conn %p wait for conn_logout_comp\n", isert_conn
);
3314 wait_for_completion_timeout(&conn
->conn_logout_comp
,
3315 SECONDS_FOR_LOGOUT_COMP
* HZ
);
3320 isert_wait4cmds(struct iscsi_conn
*conn
)
3322 isert_info("iscsi_conn %p\n", conn
);
3325 target_sess_cmd_list_set_waiting(conn
->sess
->se_sess
);
3326 target_wait_for_sess_cmds(conn
->sess
->se_sess
);
3331 isert_wait4flush(struct isert_conn
*isert_conn
)
3333 struct ib_recv_wr
*bad_wr
;
3335 isert_info("conn %p\n", isert_conn
);
3337 init_completion(&isert_conn
->wait_comp_err
);
3338 isert_conn
->beacon
.wr_id
= ISER_BEACON_WRID
;
3339 /* post an indication that all flush errors were consumed */
3340 if (ib_post_recv(isert_conn
->qp
, &isert_conn
->beacon
, &bad_wr
)) {
3341 isert_err("conn %p failed to post beacon", isert_conn
);
3345 wait_for_completion(&isert_conn
->wait_comp_err
);
3348 static void isert_wait_conn(struct iscsi_conn
*conn
)
3350 struct isert_conn
*isert_conn
= conn
->context
;
3352 isert_info("Starting conn %p\n", isert_conn
);
3354 mutex_lock(&isert_conn
->mutex
);
3356 * Only wait for wait_comp_err if the isert_conn made it
3357 * into full feature phase..
3359 if (isert_conn
->state
== ISER_CONN_INIT
) {
3360 mutex_unlock(&isert_conn
->mutex
);
3363 isert_conn_terminate(isert_conn
);
3364 mutex_unlock(&isert_conn
->mutex
);
3366 isert_wait4cmds(conn
);
3367 isert_wait4flush(isert_conn
);
3368 isert_wait4logout(isert_conn
);
3370 queue_work(isert_release_wq
, &isert_conn
->release_work
);
3373 static void isert_free_conn(struct iscsi_conn
*conn
)
3375 struct isert_conn
*isert_conn
= conn
->context
;
3377 isert_wait4flush(isert_conn
);
3378 isert_put_conn(isert_conn
);
3381 static struct iscsit_transport iser_target_transport
= {
3383 .transport_type
= ISCSI_INFINIBAND
,
3384 .priv_size
= sizeof(struct isert_cmd
),
3385 .owner
= THIS_MODULE
,
3386 .iscsit_setup_np
= isert_setup_np
,
3387 .iscsit_accept_np
= isert_accept_np
,
3388 .iscsit_free_np
= isert_free_np
,
3389 .iscsit_wait_conn
= isert_wait_conn
,
3390 .iscsit_free_conn
= isert_free_conn
,
3391 .iscsit_get_login_rx
= isert_get_login_rx
,
3392 .iscsit_put_login_tx
= isert_put_login_tx
,
3393 .iscsit_immediate_queue
= isert_immediate_queue
,
3394 .iscsit_response_queue
= isert_response_queue
,
3395 .iscsit_get_dataout
= isert_get_dataout
,
3396 .iscsit_queue_data_in
= isert_put_datain
,
3397 .iscsit_queue_status
= isert_put_response
,
3398 .iscsit_aborted_task
= isert_aborted_task
,
3399 .iscsit_get_sup_prot_ops
= isert_get_sup_prot_ops
,
3402 static int __init
isert_init(void)
3406 isert_comp_wq
= alloc_workqueue("isert_comp_wq",
3407 WQ_UNBOUND
| WQ_HIGHPRI
, 0);
3408 if (!isert_comp_wq
) {
3409 isert_err("Unable to allocate isert_comp_wq\n");
3414 isert_release_wq
= alloc_workqueue("isert_release_wq", WQ_UNBOUND
,
3415 WQ_UNBOUND_MAX_ACTIVE
);
3416 if (!isert_release_wq
) {
3417 isert_err("Unable to allocate isert_release_wq\n");
3419 goto destroy_comp_wq
;
3422 iscsit_register_transport(&iser_target_transport
);
3423 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
3428 destroy_workqueue(isert_comp_wq
);
3433 static void __exit
isert_exit(void)
3435 flush_scheduled_work();
3436 destroy_workqueue(isert_release_wq
);
3437 destroy_workqueue(isert_comp_wq
);
3438 iscsit_unregister_transport(&iser_target_transport
);
3439 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
3442 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
3443 MODULE_VERSION("1.0");
3444 MODULE_AUTHOR("nab@Linux-iSCSI.org");
3445 MODULE_LICENSE("GPL");
3447 module_init(isert_init
);
3448 module_exit(isert_exit
);