1 /*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
4 * (c) Copyright 2013 RisingTide Systems LLC.
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
24 #include <linux/in6.h>
25 #include <rdma/ib_verbs.h>
26 #include <rdma/rdma_cm.h>
27 #include <target/target_core_base.h>
28 #include <target/target_core_fabric.h>
29 #include <target/iscsi/iscsi_transport.h>
31 #include "isert_proto.h"
34 #define ISERT_MAX_CONN 8
35 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
36 #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
38 static DEFINE_MUTEX(device_list_mutex
);
39 static LIST_HEAD(device_list
);
40 static struct workqueue_struct
*isert_rx_wq
;
41 static struct workqueue_struct
*isert_comp_wq
;
42 static struct kmem_cache
*isert_cmd_cache
;
45 isert_qp_event_callback(struct ib_event
*e
, void *context
)
47 struct isert_conn
*isert_conn
= (struct isert_conn
*)context
;
49 pr_err("isert_qp_event_callback event: %d\n", e
->event
);
51 case IB_EVENT_COMM_EST
:
52 rdma_notify(isert_conn
->conn_cm_id
, IB_EVENT_COMM_EST
);
54 case IB_EVENT_QP_LAST_WQE_REACHED
:
55 pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
63 isert_query_device(struct ib_device
*ib_dev
, struct ib_device_attr
*devattr
)
67 ret
= ib_query_device(ib_dev
, devattr
);
69 pr_err("ib_query_device() failed: %d\n", ret
);
72 pr_debug("devattr->max_sge: %d\n", devattr
->max_sge
);
73 pr_debug("devattr->max_sge_rd: %d\n", devattr
->max_sge_rd
);
79 isert_conn_setup_qp(struct isert_conn
*isert_conn
, struct rdma_cm_id
*cma_id
)
81 struct isert_device
*device
= isert_conn
->conn_device
;
82 struct ib_qp_init_attr attr
;
83 struct ib_device_attr devattr
;
84 int ret
, index
, min_index
= 0;
86 memset(&devattr
, 0, sizeof(struct ib_device_attr
));
87 ret
= isert_query_device(cma_id
->device
, &devattr
);
91 mutex_lock(&device_list_mutex
);
92 for (index
= 0; index
< device
->cqs_used
; index
++)
93 if (device
->cq_active_qps
[index
] <
94 device
->cq_active_qps
[min_index
])
96 device
->cq_active_qps
[min_index
]++;
97 pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index
);
98 mutex_unlock(&device_list_mutex
);
100 memset(&attr
, 0, sizeof(struct ib_qp_init_attr
));
101 attr
.event_handler
= isert_qp_event_callback
;
102 attr
.qp_context
= isert_conn
;
103 attr
.send_cq
= device
->dev_tx_cq
[min_index
];
104 attr
.recv_cq
= device
->dev_rx_cq
[min_index
];
105 attr
.cap
.max_send_wr
= ISERT_QP_MAX_REQ_DTOS
;
106 attr
.cap
.max_recv_wr
= ISERT_QP_MAX_RECV_DTOS
;
108 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
109 * work-around for RDMA_READ..
111 attr
.cap
.max_send_sge
= devattr
.max_sge
- 2;
112 isert_conn
->max_sge
= attr
.cap
.max_send_sge
;
114 attr
.cap
.max_recv_sge
= 1;
115 attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
116 attr
.qp_type
= IB_QPT_RC
;
118 pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
120 pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
121 isert_conn
->conn_pd
->device
);
123 ret
= rdma_create_qp(cma_id
, isert_conn
->conn_pd
, &attr
);
125 pr_err("rdma_create_qp failed for cma_id %d\n", ret
);
128 isert_conn
->conn_qp
= cma_id
->qp
;
129 pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
135 isert_cq_event_callback(struct ib_event
*e
, void *context
)
137 pr_debug("isert_cq_event_callback event: %d\n", e
->event
);
141 isert_alloc_rx_descriptors(struct isert_conn
*isert_conn
)
143 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
144 struct iser_rx_desc
*rx_desc
;
145 struct ib_sge
*rx_sg
;
149 isert_conn
->conn_rx_descs
= kzalloc(ISERT_QP_MAX_RECV_DTOS
*
150 sizeof(struct iser_rx_desc
), GFP_KERNEL
);
151 if (!isert_conn
->conn_rx_descs
)
154 rx_desc
= isert_conn
->conn_rx_descs
;
156 for (i
= 0; i
< ISERT_QP_MAX_RECV_DTOS
; i
++, rx_desc
++) {
157 dma_addr
= ib_dma_map_single(ib_dev
, (void *)rx_desc
,
158 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
159 if (ib_dma_mapping_error(ib_dev
, dma_addr
))
162 rx_desc
->dma_addr
= dma_addr
;
164 rx_sg
= &rx_desc
->rx_sg
;
165 rx_sg
->addr
= rx_desc
->dma_addr
;
166 rx_sg
->length
= ISER_RX_PAYLOAD_SIZE
;
167 rx_sg
->lkey
= isert_conn
->conn_mr
->lkey
;
170 isert_conn
->conn_rx_desc_head
= 0;
174 rx_desc
= isert_conn
->conn_rx_descs
;
175 for (j
= 0; j
< i
; j
++, rx_desc
++) {
176 ib_dma_unmap_single(ib_dev
, rx_desc
->dma_addr
,
177 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
179 kfree(isert_conn
->conn_rx_descs
);
180 isert_conn
->conn_rx_descs
= NULL
;
186 isert_free_rx_descriptors(struct isert_conn
*isert_conn
)
188 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
189 struct iser_rx_desc
*rx_desc
;
192 if (!isert_conn
->conn_rx_descs
)
195 rx_desc
= isert_conn
->conn_rx_descs
;
196 for (i
= 0; i
< ISERT_QP_MAX_RECV_DTOS
; i
++, rx_desc
++) {
197 ib_dma_unmap_single(ib_dev
, rx_desc
->dma_addr
,
198 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
201 kfree(isert_conn
->conn_rx_descs
);
202 isert_conn
->conn_rx_descs
= NULL
;
205 static void isert_cq_tx_callback(struct ib_cq
*, void *);
206 static void isert_cq_rx_callback(struct ib_cq
*, void *);
209 isert_create_device_ib_res(struct isert_device
*device
)
211 struct ib_device
*ib_dev
= device
->ib_device
;
212 struct isert_cq_desc
*cq_desc
;
215 device
->cqs_used
= min_t(int, num_online_cpus(),
216 device
->ib_device
->num_comp_vectors
);
217 device
->cqs_used
= min(ISERT_MAX_CQ
, device
->cqs_used
);
218 pr_debug("Using %d CQs, device %s supports %d vectors\n",
219 device
->cqs_used
, device
->ib_device
->name
,
220 device
->ib_device
->num_comp_vectors
);
221 device
->cq_desc
= kzalloc(sizeof(struct isert_cq_desc
) *
222 device
->cqs_used
, GFP_KERNEL
);
223 if (!device
->cq_desc
) {
224 pr_err("Unable to allocate device->cq_desc\n");
227 cq_desc
= device
->cq_desc
;
229 device
->dev_pd
= ib_alloc_pd(ib_dev
);
230 if (IS_ERR(device
->dev_pd
)) {
231 ret
= PTR_ERR(device
->dev_pd
);
232 pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret
);
236 for (i
= 0; i
< device
->cqs_used
; i
++) {
237 cq_desc
[i
].device
= device
;
238 cq_desc
[i
].cq_index
= i
;
240 device
->dev_rx_cq
[i
] = ib_create_cq(device
->ib_device
,
241 isert_cq_rx_callback
,
242 isert_cq_event_callback
,
244 ISER_MAX_RX_CQ_LEN
, i
);
245 if (IS_ERR(device
->dev_rx_cq
[i
]))
248 device
->dev_tx_cq
[i
] = ib_create_cq(device
->ib_device
,
249 isert_cq_tx_callback
,
250 isert_cq_event_callback
,
252 ISER_MAX_TX_CQ_LEN
, i
);
253 if (IS_ERR(device
->dev_tx_cq
[i
]))
256 if (ib_req_notify_cq(device
->dev_rx_cq
[i
], IB_CQ_NEXT_COMP
))
259 if (ib_req_notify_cq(device
->dev_tx_cq
[i
], IB_CQ_NEXT_COMP
))
263 device
->dev_mr
= ib_get_dma_mr(device
->dev_pd
, IB_ACCESS_LOCAL_WRITE
);
264 if (IS_ERR(device
->dev_mr
)) {
265 ret
= PTR_ERR(device
->dev_mr
);
266 pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret
);
273 for (j
= 0; j
< i
; j
++) {
274 cq_desc
= &device
->cq_desc
[j
];
276 if (device
->dev_rx_cq
[j
]) {
277 cancel_work_sync(&cq_desc
->cq_rx_work
);
278 ib_destroy_cq(device
->dev_rx_cq
[j
]);
280 if (device
->dev_tx_cq
[j
]) {
281 cancel_work_sync(&cq_desc
->cq_tx_work
);
282 ib_destroy_cq(device
->dev_tx_cq
[j
]);
285 ib_dealloc_pd(device
->dev_pd
);
288 kfree(device
->cq_desc
);
294 isert_free_device_ib_res(struct isert_device
*device
)
296 struct isert_cq_desc
*cq_desc
;
299 for (i
= 0; i
< device
->cqs_used
; i
++) {
300 cq_desc
= &device
->cq_desc
[i
];
302 cancel_work_sync(&cq_desc
->cq_rx_work
);
303 cancel_work_sync(&cq_desc
->cq_tx_work
);
304 ib_destroy_cq(device
->dev_rx_cq
[i
]);
305 ib_destroy_cq(device
->dev_tx_cq
[i
]);
306 device
->dev_rx_cq
[i
] = NULL
;
307 device
->dev_tx_cq
[i
] = NULL
;
310 ib_dereg_mr(device
->dev_mr
);
311 ib_dealloc_pd(device
->dev_pd
);
312 kfree(device
->cq_desc
);
316 isert_device_try_release(struct isert_device
*device
)
318 mutex_lock(&device_list_mutex
);
320 if (!device
->refcount
) {
321 isert_free_device_ib_res(device
);
322 list_del(&device
->dev_node
);
325 mutex_unlock(&device_list_mutex
);
328 static struct isert_device
*
329 isert_device_find_by_ib_dev(struct rdma_cm_id
*cma_id
)
331 struct isert_device
*device
;
334 mutex_lock(&device_list_mutex
);
335 list_for_each_entry(device
, &device_list
, dev_node
) {
336 if (device
->ib_device
->node_guid
== cma_id
->device
->node_guid
) {
338 mutex_unlock(&device_list_mutex
);
343 device
= kzalloc(sizeof(struct isert_device
), GFP_KERNEL
);
345 mutex_unlock(&device_list_mutex
);
346 return ERR_PTR(-ENOMEM
);
349 INIT_LIST_HEAD(&device
->dev_node
);
351 device
->ib_device
= cma_id
->device
;
352 ret
= isert_create_device_ib_res(device
);
355 mutex_unlock(&device_list_mutex
);
360 list_add_tail(&device
->dev_node
, &device_list
);
361 mutex_unlock(&device_list_mutex
);
367 isert_connect_request(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
369 struct iscsi_np
*np
= cma_id
->context
;
370 struct isert_np
*isert_np
= np
->np_context
;
371 struct isert_conn
*isert_conn
;
372 struct isert_device
*device
;
373 struct ib_device
*ib_dev
= cma_id
->device
;
376 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
377 cma_id
, cma_id
->context
);
379 isert_conn
= kzalloc(sizeof(struct isert_conn
), GFP_KERNEL
);
381 pr_err("Unable to allocate isert_conn\n");
384 isert_conn
->state
= ISER_CONN_INIT
;
385 INIT_LIST_HEAD(&isert_conn
->conn_accept_node
);
386 init_completion(&isert_conn
->conn_login_comp
);
387 init_waitqueue_head(&isert_conn
->conn_wait
);
388 init_waitqueue_head(&isert_conn
->conn_wait_comp_err
);
389 kref_init(&isert_conn
->conn_kref
);
390 kref_get(&isert_conn
->conn_kref
);
392 cma_id
->context
= isert_conn
;
393 isert_conn
->conn_cm_id
= cma_id
;
394 isert_conn
->responder_resources
= event
->param
.conn
.responder_resources
;
395 isert_conn
->initiator_depth
= event
->param
.conn
.initiator_depth
;
396 pr_debug("Using responder_resources: %u initiator_depth: %u\n",
397 isert_conn
->responder_resources
, isert_conn
->initiator_depth
);
399 isert_conn
->login_buf
= kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN
+
400 ISER_RX_LOGIN_SIZE
, GFP_KERNEL
);
401 if (!isert_conn
->login_buf
) {
402 pr_err("Unable to allocate isert_conn->login_buf\n");
407 isert_conn
->login_req_buf
= isert_conn
->login_buf
;
408 isert_conn
->login_rsp_buf
= isert_conn
->login_buf
+
409 ISCSI_DEF_MAX_RECV_SEG_LEN
;
410 pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
411 isert_conn
->login_buf
, isert_conn
->login_req_buf
,
412 isert_conn
->login_rsp_buf
);
414 isert_conn
->login_req_dma
= ib_dma_map_single(ib_dev
,
415 (void *)isert_conn
->login_req_buf
,
416 ISCSI_DEF_MAX_RECV_SEG_LEN
, DMA_FROM_DEVICE
);
418 ret
= ib_dma_mapping_error(ib_dev
, isert_conn
->login_req_dma
);
420 pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
422 isert_conn
->login_req_dma
= 0;
426 isert_conn
->login_rsp_dma
= ib_dma_map_single(ib_dev
,
427 (void *)isert_conn
->login_rsp_buf
,
428 ISER_RX_LOGIN_SIZE
, DMA_TO_DEVICE
);
430 ret
= ib_dma_mapping_error(ib_dev
, isert_conn
->login_rsp_dma
);
432 pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
434 isert_conn
->login_rsp_dma
= 0;
435 goto out_req_dma_map
;
438 device
= isert_device_find_by_ib_dev(cma_id
);
439 if (IS_ERR(device
)) {
440 ret
= PTR_ERR(device
);
441 goto out_rsp_dma_map
;
444 isert_conn
->conn_device
= device
;
445 isert_conn
->conn_pd
= device
->dev_pd
;
446 isert_conn
->conn_mr
= device
->dev_mr
;
448 ret
= isert_conn_setup_qp(isert_conn
, cma_id
);
452 mutex_lock(&isert_np
->np_accept_mutex
);
453 list_add_tail(&isert_np
->np_accept_list
, &isert_conn
->conn_accept_node
);
454 mutex_unlock(&isert_np
->np_accept_mutex
);
456 pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np
);
457 wake_up(&isert_np
->np_accept_wq
);
461 isert_device_try_release(device
);
463 ib_dma_unmap_single(ib_dev
, isert_conn
->login_rsp_dma
,
464 ISER_RX_LOGIN_SIZE
, DMA_TO_DEVICE
);
466 ib_dma_unmap_single(ib_dev
, isert_conn
->login_req_dma
,
467 ISCSI_DEF_MAX_RECV_SEG_LEN
, DMA_FROM_DEVICE
);
469 kfree(isert_conn
->login_buf
);
476 isert_connect_release(struct isert_conn
*isert_conn
)
478 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
479 struct isert_device
*device
= isert_conn
->conn_device
;
482 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
484 if (isert_conn
->conn_qp
) {
485 cq_index
= ((struct isert_cq_desc
*)
486 isert_conn
->conn_qp
->recv_cq
->cq_context
)->cq_index
;
487 pr_debug("isert_connect_release: cq_index: %d\n", cq_index
);
488 isert_conn
->conn_device
->cq_active_qps
[cq_index
]--;
490 rdma_destroy_qp(isert_conn
->conn_cm_id
);
493 isert_free_rx_descriptors(isert_conn
);
494 rdma_destroy_id(isert_conn
->conn_cm_id
);
496 if (isert_conn
->login_buf
) {
497 ib_dma_unmap_single(ib_dev
, isert_conn
->login_rsp_dma
,
498 ISER_RX_LOGIN_SIZE
, DMA_TO_DEVICE
);
499 ib_dma_unmap_single(ib_dev
, isert_conn
->login_req_dma
,
500 ISCSI_DEF_MAX_RECV_SEG_LEN
,
502 kfree(isert_conn
->login_buf
);
507 isert_device_try_release(device
);
509 pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
513 isert_connected_handler(struct rdma_cm_id
*cma_id
)
519 isert_release_conn_kref(struct kref
*kref
)
521 struct isert_conn
*isert_conn
= container_of(kref
,
522 struct isert_conn
, conn_kref
);
524 pr_debug("Calling isert_connect_release for final kref %s/%d\n",
525 current
->comm
, current
->pid
);
527 isert_connect_release(isert_conn
);
531 isert_put_conn(struct isert_conn
*isert_conn
)
533 kref_put(&isert_conn
->conn_kref
, isert_release_conn_kref
);
537 isert_disconnect_work(struct work_struct
*work
)
539 struct isert_conn
*isert_conn
= container_of(work
,
540 struct isert_conn
, conn_logout_work
);
542 pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
544 isert_conn
->state
= ISER_CONN_DOWN
;
546 if (isert_conn
->post_recv_buf_count
== 0 &&
547 atomic_read(&isert_conn
->post_send_buf_count
) == 0) {
548 pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
549 wake_up(&isert_conn
->conn_wait
);
552 isert_put_conn(isert_conn
);
556 isert_disconnected_handler(struct rdma_cm_id
*cma_id
)
558 struct isert_conn
*isert_conn
= (struct isert_conn
*)cma_id
->context
;
560 INIT_WORK(&isert_conn
->conn_logout_work
, isert_disconnect_work
);
561 schedule_work(&isert_conn
->conn_logout_work
);
565 isert_cma_handler(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
569 pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
570 event
->event
, event
->status
, cma_id
->context
, cma_id
);
572 switch (event
->event
) {
573 case RDMA_CM_EVENT_CONNECT_REQUEST
:
574 pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
575 ret
= isert_connect_request(cma_id
, event
);
577 case RDMA_CM_EVENT_ESTABLISHED
:
578 pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
579 isert_connected_handler(cma_id
);
581 case RDMA_CM_EVENT_DISCONNECTED
:
582 pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n");
583 isert_disconnected_handler(cma_id
);
585 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
586 case RDMA_CM_EVENT_ADDR_CHANGE
:
588 case RDMA_CM_EVENT_CONNECT_ERROR
:
590 pr_err("Unknown RDMA CMA event: %d\n", event
->event
);
595 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
604 isert_post_recv(struct isert_conn
*isert_conn
, u32 count
)
606 struct ib_recv_wr
*rx_wr
, *rx_wr_failed
;
608 unsigned int rx_head
= isert_conn
->conn_rx_desc_head
;
609 struct iser_rx_desc
*rx_desc
;
611 for (rx_wr
= isert_conn
->conn_rx_wr
, i
= 0; i
< count
; i
++, rx_wr
++) {
612 rx_desc
= &isert_conn
->conn_rx_descs
[rx_head
];
613 rx_wr
->wr_id
= (unsigned long)rx_desc
;
614 rx_wr
->sg_list
= &rx_desc
->rx_sg
;
616 rx_wr
->next
= rx_wr
+ 1;
617 rx_head
= (rx_head
+ 1) & (ISERT_QP_MAX_RECV_DTOS
- 1);
621 rx_wr
->next
= NULL
; /* mark end of work requests list */
623 isert_conn
->post_recv_buf_count
+= count
;
624 ret
= ib_post_recv(isert_conn
->conn_qp
, isert_conn
->conn_rx_wr
,
627 pr_err("ib_post_recv() failed with ret: %d\n", ret
);
628 isert_conn
->post_recv_buf_count
-= count
;
630 pr_debug("isert_post_recv(): Posted %d RX buffers\n", count
);
631 isert_conn
->conn_rx_desc_head
= rx_head
;
637 isert_post_send(struct isert_conn
*isert_conn
, struct iser_tx_desc
*tx_desc
)
639 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
640 struct ib_send_wr send_wr
, *send_wr_failed
;
643 ib_dma_sync_single_for_device(ib_dev
, tx_desc
->dma_addr
,
644 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
647 send_wr
.wr_id
= (unsigned long)tx_desc
;
648 send_wr
.sg_list
= tx_desc
->tx_sg
;
649 send_wr
.num_sge
= tx_desc
->num_sge
;
650 send_wr
.opcode
= IB_WR_SEND
;
651 send_wr
.send_flags
= IB_SEND_SIGNALED
;
653 atomic_inc(&isert_conn
->post_send_buf_count
);
655 ret
= ib_post_send(isert_conn
->conn_qp
, &send_wr
, &send_wr_failed
);
657 pr_err("ib_post_send() failed, ret: %d\n", ret
);
658 atomic_dec(&isert_conn
->post_send_buf_count
);
665 isert_create_send_desc(struct isert_conn
*isert_conn
,
666 struct isert_cmd
*isert_cmd
,
667 struct iser_tx_desc
*tx_desc
)
669 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
671 ib_dma_sync_single_for_cpu(ib_dev
, tx_desc
->dma_addr
,
672 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
674 memset(&tx_desc
->iser_header
, 0, sizeof(struct iser_hdr
));
675 tx_desc
->iser_header
.flags
= ISER_VER
;
677 tx_desc
->num_sge
= 1;
678 tx_desc
->isert_cmd
= isert_cmd
;
680 if (tx_desc
->tx_sg
[0].lkey
!= isert_conn
->conn_mr
->lkey
) {
681 tx_desc
->tx_sg
[0].lkey
= isert_conn
->conn_mr
->lkey
;
682 pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc
);
687 isert_init_tx_hdrs(struct isert_conn
*isert_conn
,
688 struct iser_tx_desc
*tx_desc
)
690 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
693 dma_addr
= ib_dma_map_single(ib_dev
, (void *)tx_desc
,
694 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
695 if (ib_dma_mapping_error(ib_dev
, dma_addr
)) {
696 pr_err("ib_dma_mapping_error() failed\n");
700 tx_desc
->dma_addr
= dma_addr
;
701 tx_desc
->tx_sg
[0].addr
= tx_desc
->dma_addr
;
702 tx_desc
->tx_sg
[0].length
= ISER_HEADERS_LEN
;
703 tx_desc
->tx_sg
[0].lkey
= isert_conn
->conn_mr
->lkey
;
705 pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
706 " lkey: 0x%08x\n", tx_desc
->tx_sg
[0].addr
,
707 tx_desc
->tx_sg
[0].length
, tx_desc
->tx_sg
[0].lkey
);
713 isert_init_send_wr(struct isert_cmd
*isert_cmd
, struct ib_send_wr
*send_wr
)
715 isert_cmd
->rdma_wr
.iser_ib_op
= ISER_IB_SEND
;
716 send_wr
->wr_id
= (unsigned long)&isert_cmd
->tx_desc
;
717 send_wr
->opcode
= IB_WR_SEND
;
718 send_wr
->send_flags
= IB_SEND_SIGNALED
;
719 send_wr
->sg_list
= &isert_cmd
->tx_desc
.tx_sg
[0];
720 send_wr
->num_sge
= isert_cmd
->tx_desc
.num_sge
;
724 isert_rdma_post_recvl(struct isert_conn
*isert_conn
)
726 struct ib_recv_wr rx_wr
, *rx_wr_fail
;
730 memset(&sge
, 0, sizeof(struct ib_sge
));
731 sge
.addr
= isert_conn
->login_req_dma
;
732 sge
.length
= ISER_RX_LOGIN_SIZE
;
733 sge
.lkey
= isert_conn
->conn_mr
->lkey
;
735 pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
736 sge
.addr
, sge
.length
, sge
.lkey
);
738 memset(&rx_wr
, 0, sizeof(struct ib_recv_wr
));
739 rx_wr
.wr_id
= (unsigned long)isert_conn
->login_req_buf
;
740 rx_wr
.sg_list
= &sge
;
743 isert_conn
->post_recv_buf_count
++;
744 ret
= ib_post_recv(isert_conn
->conn_qp
, &rx_wr
, &rx_wr_fail
);
746 pr_err("ib_post_recv() failed: %d\n", ret
);
747 isert_conn
->post_recv_buf_count
--;
750 pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
755 isert_put_login_tx(struct iscsi_conn
*conn
, struct iscsi_login
*login
,
758 struct isert_conn
*isert_conn
= conn
->context
;
759 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
760 struct iser_tx_desc
*tx_desc
= &isert_conn
->conn_login_tx_desc
;
763 isert_create_send_desc(isert_conn
, NULL
, tx_desc
);
765 memcpy(&tx_desc
->iscsi_header
, &login
->rsp
[0],
766 sizeof(struct iscsi_hdr
));
768 isert_init_tx_hdrs(isert_conn
, tx_desc
);
771 struct ib_sge
*tx_dsg
= &tx_desc
->tx_sg
[1];
773 ib_dma_sync_single_for_cpu(ib_dev
, isert_conn
->login_rsp_dma
,
774 length
, DMA_TO_DEVICE
);
776 memcpy(isert_conn
->login_rsp_buf
, login
->rsp_buf
, length
);
778 ib_dma_sync_single_for_device(ib_dev
, isert_conn
->login_rsp_dma
,
779 length
, DMA_TO_DEVICE
);
781 tx_dsg
->addr
= isert_conn
->login_rsp_dma
;
782 tx_dsg
->length
= length
;
783 tx_dsg
->lkey
= isert_conn
->conn_mr
->lkey
;
784 tx_desc
->num_sge
= 2;
786 if (!login
->login_failed
) {
787 if (login
->login_complete
) {
788 ret
= isert_alloc_rx_descriptors(isert_conn
);
792 ret
= isert_post_recv(isert_conn
, ISERT_MIN_POSTED_RX
);
796 isert_conn
->state
= ISER_CONN_UP
;
800 ret
= isert_rdma_post_recvl(isert_conn
);
805 ret
= isert_post_send(isert_conn
, tx_desc
);
813 isert_rx_login_req(struct iser_rx_desc
*rx_desc
, int rx_buflen
,
814 struct isert_conn
*isert_conn
)
816 struct iscsi_conn
*conn
= isert_conn
->conn
;
817 struct iscsi_login
*login
= conn
->conn_login
;
821 pr_err("conn->conn_login is NULL\n");
826 if (login
->first_request
) {
827 struct iscsi_login_req
*login_req
=
828 (struct iscsi_login_req
*)&rx_desc
->iscsi_header
;
830 * Setup the initial iscsi_login values from the leading
833 login
->leading_connection
= (!login_req
->tsih
) ? 1 : 0;
834 login
->current_stage
=
835 (login_req
->flags
& ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK
)
837 login
->version_min
= login_req
->min_version
;
838 login
->version_max
= login_req
->max_version
;
839 memcpy(login
->isid
, login_req
->isid
, 6);
840 login
->cmd_sn
= be32_to_cpu(login_req
->cmdsn
);
841 login
->init_task_tag
= login_req
->itt
;
842 login
->initial_exp_statsn
= be32_to_cpu(login_req
->exp_statsn
);
843 login
->cid
= be16_to_cpu(login_req
->cid
);
844 login
->tsih
= be16_to_cpu(login_req
->tsih
);
847 memcpy(&login
->req
[0], (void *)&rx_desc
->iscsi_header
, ISCSI_HDR_LEN
);
849 size
= min(rx_buflen
, MAX_KEY_VALUE_PAIRS
);
850 pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
851 size
, rx_buflen
, MAX_KEY_VALUE_PAIRS
);
852 memcpy(login
->req_buf
, &rx_desc
->data
[0], size
);
854 complete(&isert_conn
->conn_login_comp
);
858 isert_release_cmd(struct iscsi_cmd
*cmd
)
860 struct isert_cmd
*isert_cmd
= container_of(cmd
, struct isert_cmd
,
863 pr_debug("Entering isert_release_cmd %p >>>>>>>>>>>>>>>.\n", isert_cmd
);
868 kmem_cache_free(isert_cmd_cache
, isert_cmd
);
871 static struct iscsi_cmd
872 *isert_alloc_cmd(struct iscsi_conn
*conn
, gfp_t gfp
)
874 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
875 struct isert_cmd
*isert_cmd
;
877 isert_cmd
= kmem_cache_zalloc(isert_cmd_cache
, gfp
);
879 pr_err("Unable to allocate isert_cmd\n");
882 isert_cmd
->conn
= isert_conn
;
883 isert_cmd
->iscsi_cmd
.release_cmd
= &isert_release_cmd
;
885 return &isert_cmd
->iscsi_cmd
;
889 isert_handle_scsi_cmd(struct isert_conn
*isert_conn
,
890 struct isert_cmd
*isert_cmd
, struct iser_rx_desc
*rx_desc
,
893 struct iscsi_cmd
*cmd
= &isert_cmd
->iscsi_cmd
;
894 struct iscsi_conn
*conn
= isert_conn
->conn
;
895 struct iscsi_scsi_req
*hdr
= (struct iscsi_scsi_req
*)buf
;
896 struct scatterlist
*sg
;
897 int imm_data
, imm_data_len
, unsol_data
, sg_nents
, rc
;
898 bool dump_payload
= false;
900 rc
= iscsit_setup_scsi_cmd(conn
, cmd
, buf
);
904 imm_data
= cmd
->immediate_data
;
905 imm_data_len
= cmd
->first_burst_len
;
906 unsol_data
= cmd
->unsolicited_data
;
908 rc
= iscsit_process_scsi_cmd(conn
, cmd
, hdr
);
919 sg
= &cmd
->se_cmd
.t_data_sg
[0];
920 sg_nents
= max(1UL, DIV_ROUND_UP(imm_data_len
, PAGE_SIZE
));
922 pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
923 sg
, sg_nents
, &rx_desc
->data
[0], imm_data_len
);
925 sg_copy_from_buffer(sg
, sg_nents
, &rx_desc
->data
[0], imm_data_len
);
927 cmd
->write_data_done
+= imm_data_len
;
929 if (cmd
->write_data_done
== cmd
->se_cmd
.data_length
) {
930 spin_lock_bh(&cmd
->istate_lock
);
931 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
932 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
933 spin_unlock_bh(&cmd
->istate_lock
);
937 rc
= iscsit_sequence_cmd(conn
, cmd
, hdr
->cmdsn
);
939 if (!rc
&& dump_payload
== false && unsol_data
)
940 iscsit_set_unsoliticed_dataout(cmd
);
942 if (rc
== CMDSN_ERROR_CANNOT_RECOVER
)
943 return iscsit_add_reject_from_cmd(
944 ISCSI_REASON_PROTOCOL_ERROR
,
945 1, 0, (unsigned char *)hdr
, cmd
);
951 isert_handle_iscsi_dataout(struct isert_conn
*isert_conn
,
952 struct iser_rx_desc
*rx_desc
, unsigned char *buf
)
954 struct scatterlist
*sg_start
;
955 struct iscsi_conn
*conn
= isert_conn
->conn
;
956 struct iscsi_cmd
*cmd
= NULL
;
957 struct iscsi_data
*hdr
= (struct iscsi_data
*)buf
;
958 u32 unsol_data_len
= ntoh24(hdr
->dlength
);
959 int rc
, sg_nents
, sg_off
, page_off
;
961 rc
= iscsit_check_dataout_hdr(conn
, buf
, &cmd
);
967 * FIXME: Unexpected unsolicited_data out
969 if (!cmd
->unsolicited_data
) {
970 pr_err("Received unexpected solicited data payload\n");
975 pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
976 unsol_data_len
, cmd
->write_data_done
, cmd
->se_cmd
.data_length
);
978 sg_off
= cmd
->write_data_done
/ PAGE_SIZE
;
979 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
980 sg_nents
= max(1UL, DIV_ROUND_UP(unsol_data_len
, PAGE_SIZE
));
981 page_off
= cmd
->write_data_done
% PAGE_SIZE
;
983 * FIXME: Non page-aligned unsolicited_data out
986 pr_err("Received unexpected non-page aligned data payload\n");
990 pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
991 sg_start
, sg_off
, sg_nents
, &rx_desc
->data
[0], unsol_data_len
);
993 sg_copy_from_buffer(sg_start
, sg_nents
, &rx_desc
->data
[0],
996 rc
= iscsit_check_dataout_payload(cmd
, hdr
, false);
1004 isert_rx_opcode(struct isert_conn
*isert_conn
, struct iser_rx_desc
*rx_desc
,
1005 uint32_t read_stag
, uint64_t read_va
,
1006 uint32_t write_stag
, uint64_t write_va
)
1008 struct iscsi_hdr
*hdr
= &rx_desc
->iscsi_header
;
1009 struct iscsi_conn
*conn
= isert_conn
->conn
;
1010 struct iscsi_cmd
*cmd
;
1011 struct isert_cmd
*isert_cmd
;
1013 u8 opcode
= (hdr
->opcode
& ISCSI_OPCODE_MASK
);
1016 case ISCSI_OP_SCSI_CMD
:
1017 cmd
= iscsit_allocate_cmd(conn
, GFP_KERNEL
);
1021 isert_cmd
= container_of(cmd
, struct isert_cmd
, iscsi_cmd
);
1022 isert_cmd
->read_stag
= read_stag
;
1023 isert_cmd
->read_va
= read_va
;
1024 isert_cmd
->write_stag
= write_stag
;
1025 isert_cmd
->write_va
= write_va
;
1027 ret
= isert_handle_scsi_cmd(isert_conn
, isert_cmd
,
1028 rx_desc
, (unsigned char *)hdr
);
1030 case ISCSI_OP_NOOP_OUT
:
1031 cmd
= iscsit_allocate_cmd(conn
, GFP_KERNEL
);
1035 ret
= iscsit_handle_nop_out(conn
, cmd
, (unsigned char *)hdr
);
1037 case ISCSI_OP_SCSI_DATA_OUT
:
1038 ret
= isert_handle_iscsi_dataout(isert_conn
, rx_desc
,
1039 (unsigned char *)hdr
);
1041 case ISCSI_OP_SCSI_TMFUNC
:
1042 cmd
= iscsit_allocate_cmd(conn
, GFP_KERNEL
);
1046 ret
= iscsit_handle_task_mgt_cmd(conn
, cmd
,
1047 (unsigned char *)hdr
);
1049 case ISCSI_OP_LOGOUT
:
1050 cmd
= iscsit_allocate_cmd(conn
, GFP_KERNEL
);
1054 ret
= iscsit_handle_logout_cmd(conn
, cmd
, (unsigned char *)hdr
);
1056 wait_for_completion_timeout(&conn
->conn_logout_comp
,
1057 SECONDS_FOR_LOGOUT_COMP
*
1061 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode
);
1070 isert_rx_do_work(struct iser_rx_desc
*rx_desc
, struct isert_conn
*isert_conn
)
1072 struct iser_hdr
*iser_hdr
= &rx_desc
->iser_header
;
1073 uint64_t read_va
= 0, write_va
= 0;
1074 uint32_t read_stag
= 0, write_stag
= 0;
1077 switch (iser_hdr
->flags
& 0xF0) {
1079 if (iser_hdr
->flags
& ISER_RSV
) {
1080 read_stag
= be32_to_cpu(iser_hdr
->read_stag
);
1081 read_va
= be64_to_cpu(iser_hdr
->read_va
);
1082 pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1083 read_stag
, (unsigned long long)read_va
);
1085 if (iser_hdr
->flags
& ISER_WSV
) {
1086 write_stag
= be32_to_cpu(iser_hdr
->write_stag
);
1087 write_va
= be64_to_cpu(iser_hdr
->write_va
);
1088 pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1089 write_stag
, (unsigned long long)write_va
);
1092 pr_debug("ISER ISCSI_CTRL PDU\n");
1095 pr_err("iSER Hello message\n");
1098 pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr
->flags
);
1102 rc
= isert_rx_opcode(isert_conn
, rx_desc
,
1103 read_stag
, read_va
, write_stag
, write_va
);
1107 isert_rx_completion(struct iser_rx_desc
*desc
, struct isert_conn
*isert_conn
,
1108 unsigned long xfer_len
)
1110 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1111 struct iscsi_hdr
*hdr
;
1113 int rx_buflen
, outstanding
;
1115 if ((char *)desc
== isert_conn
->login_req_buf
) {
1116 rx_dma
= isert_conn
->login_req_dma
;
1117 rx_buflen
= ISER_RX_LOGIN_SIZE
;
1118 pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1121 rx_dma
= desc
->dma_addr
;
1122 rx_buflen
= ISER_RX_PAYLOAD_SIZE
;
1123 pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1127 ib_dma_sync_single_for_cpu(ib_dev
, rx_dma
, rx_buflen
, DMA_FROM_DEVICE
);
1129 hdr
= &desc
->iscsi_header
;
1130 pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1131 hdr
->opcode
, hdr
->itt
, hdr
->flags
,
1132 (int)(xfer_len
- ISER_HEADERS_LEN
));
1134 if ((char *)desc
== isert_conn
->login_req_buf
)
1135 isert_rx_login_req(desc
, xfer_len
- ISER_HEADERS_LEN
,
1138 isert_rx_do_work(desc
, isert_conn
);
1140 ib_dma_sync_single_for_device(ib_dev
, rx_dma
, rx_buflen
,
1143 isert_conn
->post_recv_buf_count
--;
1144 pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
1145 isert_conn
->post_recv_buf_count
);
1147 if ((char *)desc
== isert_conn
->login_req_buf
)
1150 outstanding
= isert_conn
->post_recv_buf_count
;
1151 if (outstanding
+ ISERT_MIN_POSTED_RX
<= ISERT_QP_MAX_RECV_DTOS
) {
1152 int err
, count
= min(ISERT_QP_MAX_RECV_DTOS
- outstanding
,
1153 ISERT_MIN_POSTED_RX
);
1154 err
= isert_post_recv(isert_conn
, count
);
1156 pr_err("isert_post_recv() count: %d failed, %d\n",
1163 isert_unmap_cmd(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
)
1165 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1166 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1168 pr_debug("isert_unmap_cmd >>>>>>>>>>>>>>>>>>>>>>>\n");
1171 ib_dma_unmap_sg(ib_dev
, wr
->sge
, wr
->num_sge
, DMA_TO_DEVICE
);
1178 kfree(isert_cmd
->ib_sge
);
1179 isert_cmd
->ib_sge
= NULL
;
1183 isert_put_cmd(struct isert_cmd
*isert_cmd
)
1185 struct iscsi_cmd
*cmd
= &isert_cmd
->iscsi_cmd
;
1186 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1187 struct iscsi_conn
*conn
;
1189 pr_debug("Entering isert_put_cmd: %p\n", isert_cmd
);
1191 switch (cmd
->iscsi_opcode
) {
1192 case ISCSI_OP_SCSI_CMD
:
1193 conn
= isert_conn
->conn
;
1195 spin_lock_bh(&conn
->cmd_lock
);
1196 if (!list_empty(&cmd
->i_conn_node
))
1197 list_del(&cmd
->i_conn_node
);
1198 spin_unlock_bh(&conn
->cmd_lock
);
1200 if (cmd
->data_direction
== DMA_TO_DEVICE
)
1201 iscsit_stop_dataout_timer(cmd
);
1203 isert_unmap_cmd(isert_cmd
, isert_conn
);
1207 case ISCSI_OP_SCSI_TMFUNC
:
1208 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1210 case ISCSI_OP_REJECT
:
1211 case ISCSI_OP_NOOP_OUT
:
1212 conn
= isert_conn
->conn
;
1214 spin_lock_bh(&conn
->cmd_lock
);
1215 if (!list_empty(&cmd
->i_conn_node
))
1216 list_del(&cmd
->i_conn_node
);
1217 spin_unlock_bh(&conn
->cmd_lock
);
1220 * Handle special case for REJECT when iscsi_add_reject*() has
1221 * overwritten the original iscsi_opcode assignment, and the
1222 * associated cmd->se_cmd needs to be released.
1224 if (cmd
->se_cmd
.se_tfo
!= NULL
) {
1225 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1232 isert_release_cmd(cmd
);
1238 isert_unmap_tx_desc(struct iser_tx_desc
*tx_desc
, struct ib_device
*ib_dev
)
1240 if (tx_desc
->dma_addr
!= 0) {
1241 pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1242 ib_dma_unmap_single(ib_dev
, tx_desc
->dma_addr
,
1243 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
1244 tx_desc
->dma_addr
= 0;
1249 isert_completion_put(struct iser_tx_desc
*tx_desc
, struct isert_cmd
*isert_cmd
,
1250 struct ib_device
*ib_dev
)
1252 if (isert_cmd
->sense_buf_dma
!= 0) {
1253 pr_debug("Calling ib_dma_unmap_single for isert_cmd->sense_buf_dma\n");
1254 ib_dma_unmap_single(ib_dev
, isert_cmd
->sense_buf_dma
,
1255 isert_cmd
->sense_buf_len
, DMA_TO_DEVICE
);
1256 isert_cmd
->sense_buf_dma
= 0;
1259 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1260 isert_put_cmd(isert_cmd
);
1264 isert_completion_rdma_read(struct iser_tx_desc
*tx_desc
,
1265 struct isert_cmd
*isert_cmd
)
1267 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1268 struct iscsi_cmd
*cmd
= &isert_cmd
->iscsi_cmd
;
1269 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1270 struct ib_device
*ib_dev
= isert_cmd
->conn
->conn_cm_id
->device
;
1272 iscsit_stop_dataout_timer(cmd
);
1275 pr_debug("isert_do_rdma_read_comp: Unmapping wr->sge from t_data_sg\n");
1276 ib_dma_unmap_sg(ib_dev
, wr
->sge
, wr
->num_sge
, DMA_TO_DEVICE
);
1280 if (isert_cmd
->ib_sge
) {
1281 pr_debug("isert_do_rdma_read_comp: Freeing isert_cmd->ib_sge\n");
1282 kfree(isert_cmd
->ib_sge
);
1283 isert_cmd
->ib_sge
= NULL
;
1286 cmd
->write_data_done
= se_cmd
->data_length
;
1288 pr_debug("isert_do_rdma_read_comp, calling target_execute_cmd\n");
1289 spin_lock_bh(&cmd
->istate_lock
);
1290 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
1291 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
1292 spin_unlock_bh(&cmd
->istate_lock
);
1294 target_execute_cmd(se_cmd
);
1298 isert_do_control_comp(struct work_struct
*work
)
1300 struct isert_cmd
*isert_cmd
= container_of(work
,
1301 struct isert_cmd
, comp_work
);
1302 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1303 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1304 struct iscsi_cmd
*cmd
= &isert_cmd
->iscsi_cmd
;
1306 switch (cmd
->i_state
) {
1307 case ISTATE_SEND_TASKMGTRSP
:
1308 pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1310 atomic_dec(&isert_conn
->post_send_buf_count
);
1311 iscsit_tmr_post_handler(cmd
, cmd
->conn
);
1313 cmd
->i_state
= ISTATE_SENT_STATUS
;
1314 isert_completion_put(&isert_cmd
->tx_desc
, isert_cmd
, ib_dev
);
1316 case ISTATE_SEND_REJECT
:
1317 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1318 atomic_dec(&isert_conn
->post_send_buf_count
);
1320 cmd
->i_state
= ISTATE_SENT_STATUS
;
1321 complete(&cmd
->reject_comp
);
1322 isert_completion_put(&isert_cmd
->tx_desc
, isert_cmd
, ib_dev
);
1323 case ISTATE_SEND_LOGOUTRSP
:
1324 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1326 * Call atomic_dec(&isert_conn->post_send_buf_count)
1327 * from isert_free_conn()
1329 isert_conn
->logout_posted
= true;
1330 iscsit_logout_post_handler(cmd
, cmd
->conn
);
1333 pr_err("Unknown do_control_comp i_state %d\n", cmd
->i_state
);
1340 isert_response_completion(struct iser_tx_desc
*tx_desc
,
1341 struct isert_cmd
*isert_cmd
,
1342 struct isert_conn
*isert_conn
,
1343 struct ib_device
*ib_dev
)
1345 struct iscsi_cmd
*cmd
= &isert_cmd
->iscsi_cmd
;
1347 if (cmd
->i_state
== ISTATE_SEND_TASKMGTRSP
||
1348 cmd
->i_state
== ISTATE_SEND_LOGOUTRSP
) {
1349 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1351 INIT_WORK(&isert_cmd
->comp_work
, isert_do_control_comp
);
1352 queue_work(isert_comp_wq
, &isert_cmd
->comp_work
);
1355 atomic_dec(&isert_conn
->post_send_buf_count
);
1357 cmd
->i_state
= ISTATE_SENT_STATUS
;
1358 isert_completion_put(tx_desc
, isert_cmd
, ib_dev
);
1362 isert_send_completion(struct iser_tx_desc
*tx_desc
,
1363 struct isert_conn
*isert_conn
)
1365 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1366 struct isert_cmd
*isert_cmd
= tx_desc
->isert_cmd
;
1367 struct isert_rdma_wr
*wr
;
1370 atomic_dec(&isert_conn
->post_send_buf_count
);
1371 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1374 wr
= &isert_cmd
->rdma_wr
;
1376 switch (wr
->iser_ib_op
) {
1378 pr_err("isert_send_completion: Got ISER_IB_RECV\n");
1382 pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
1383 isert_response_completion(tx_desc
, isert_cmd
,
1384 isert_conn
, ib_dev
);
1386 case ISER_IB_RDMA_WRITE
:
1387 pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1390 case ISER_IB_RDMA_READ
:
1391 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1393 atomic_dec(&isert_conn
->post_send_buf_count
);
1394 isert_completion_rdma_read(tx_desc
, isert_cmd
);
1397 pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr
->iser_ib_op
);
1404 isert_cq_comp_err(struct iser_tx_desc
*tx_desc
, struct isert_conn
*isert_conn
)
1406 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1409 struct isert_cmd
*isert_cmd
= tx_desc
->isert_cmd
;
1412 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1414 isert_completion_put(tx_desc
, isert_cmd
, ib_dev
);
1417 if (isert_conn
->post_recv_buf_count
== 0 &&
1418 atomic_read(&isert_conn
->post_send_buf_count
) == 0) {
1419 pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
1420 pr_debug("Calling wake_up from isert_cq_comp_err\n");
1422 isert_conn
->state
= ISER_CONN_TERMINATING
;
1423 wake_up(&isert_conn
->conn_wait_comp_err
);
1428 isert_cq_tx_work(struct work_struct
*work
)
1430 struct isert_cq_desc
*cq_desc
= container_of(work
,
1431 struct isert_cq_desc
, cq_tx_work
);
1432 struct isert_device
*device
= cq_desc
->device
;
1433 int cq_index
= cq_desc
->cq_index
;
1434 struct ib_cq
*tx_cq
= device
->dev_tx_cq
[cq_index
];
1435 struct isert_conn
*isert_conn
;
1436 struct iser_tx_desc
*tx_desc
;
1439 while (ib_poll_cq(tx_cq
, 1, &wc
) == 1) {
1440 tx_desc
= (struct iser_tx_desc
*)(unsigned long)wc
.wr_id
;
1441 isert_conn
= wc
.qp
->qp_context
;
1443 if (wc
.status
== IB_WC_SUCCESS
) {
1444 isert_send_completion(tx_desc
, isert_conn
);
1446 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1447 pr_debug("TX wc.status: 0x%08x\n", wc
.status
);
1448 atomic_dec(&isert_conn
->post_send_buf_count
);
1449 isert_cq_comp_err(tx_desc
, isert_conn
);
1453 ib_req_notify_cq(tx_cq
, IB_CQ_NEXT_COMP
);
1457 isert_cq_tx_callback(struct ib_cq
*cq
, void *context
)
1459 struct isert_cq_desc
*cq_desc
= (struct isert_cq_desc
*)context
;
1461 INIT_WORK(&cq_desc
->cq_tx_work
, isert_cq_tx_work
);
1462 queue_work(isert_comp_wq
, &cq_desc
->cq_tx_work
);
1466 isert_cq_rx_work(struct work_struct
*work
)
1468 struct isert_cq_desc
*cq_desc
= container_of(work
,
1469 struct isert_cq_desc
, cq_rx_work
);
1470 struct isert_device
*device
= cq_desc
->device
;
1471 int cq_index
= cq_desc
->cq_index
;
1472 struct ib_cq
*rx_cq
= device
->dev_rx_cq
[cq_index
];
1473 struct isert_conn
*isert_conn
;
1474 struct iser_rx_desc
*rx_desc
;
1476 unsigned long xfer_len
;
1478 while (ib_poll_cq(rx_cq
, 1, &wc
) == 1) {
1479 rx_desc
= (struct iser_rx_desc
*)(unsigned long)wc
.wr_id
;
1480 isert_conn
= wc
.qp
->qp_context
;
1482 if (wc
.status
== IB_WC_SUCCESS
) {
1483 xfer_len
= (unsigned long)wc
.byte_len
;
1484 isert_rx_completion(rx_desc
, isert_conn
, xfer_len
);
1486 pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1487 if (wc
.status
!= IB_WC_WR_FLUSH_ERR
)
1488 pr_debug("RX wc.status: 0x%08x\n", wc
.status
);
1490 isert_conn
->post_recv_buf_count
--;
1491 isert_cq_comp_err(NULL
, isert_conn
);
1495 ib_req_notify_cq(rx_cq
, IB_CQ_NEXT_COMP
);
1499 isert_cq_rx_callback(struct ib_cq
*cq
, void *context
)
1501 struct isert_cq_desc
*cq_desc
= (struct isert_cq_desc
*)context
;
1503 INIT_WORK(&cq_desc
->cq_rx_work
, isert_cq_rx_work
);
1504 queue_work(isert_rx_wq
, &cq_desc
->cq_rx_work
);
1508 isert_post_response(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
)
1510 struct ib_send_wr
*wr_failed
;
1513 atomic_inc(&isert_conn
->post_send_buf_count
);
1515 ret
= ib_post_send(isert_conn
->conn_qp
, &isert_cmd
->tx_desc
.send_wr
,
1518 pr_err("ib_post_send failed with %d\n", ret
);
1519 atomic_dec(&isert_conn
->post_send_buf_count
);
1526 isert_put_response(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
1528 struct isert_cmd
*isert_cmd
= container_of(cmd
,
1529 struct isert_cmd
, iscsi_cmd
);
1530 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1531 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1532 struct iscsi_scsi_rsp
*hdr
= (struct iscsi_scsi_rsp
*)
1533 &isert_cmd
->tx_desc
.iscsi_header
;
1535 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1536 iscsit_build_rsp_pdu(cmd
, conn
, true, hdr
);
1537 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1539 * Attach SENSE DATA payload to iSCSI Response PDU
1541 if (cmd
->se_cmd
.sense_buffer
&&
1542 ((cmd
->se_cmd
.se_cmd_flags
& SCF_TRANSPORT_TASK_SENSE
) ||
1543 (cmd
->se_cmd
.se_cmd_flags
& SCF_EMULATED_TASK_SENSE
))) {
1544 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1545 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
1546 u32 padding
, sense_len
;
1548 put_unaligned_be16(cmd
->se_cmd
.scsi_sense_length
,
1550 cmd
->se_cmd
.scsi_sense_length
+= sizeof(__be16
);
1552 padding
= -(cmd
->se_cmd
.scsi_sense_length
) & 3;
1553 hton24(hdr
->dlength
, (u32
)cmd
->se_cmd
.scsi_sense_length
);
1554 sense_len
= cmd
->se_cmd
.scsi_sense_length
+ padding
;
1556 isert_cmd
->sense_buf_dma
= ib_dma_map_single(ib_dev
,
1557 (void *)cmd
->sense_buffer
, sense_len
,
1560 isert_cmd
->sense_buf_len
= sense_len
;
1561 tx_dsg
->addr
= isert_cmd
->sense_buf_dma
;
1562 tx_dsg
->length
= sense_len
;
1563 tx_dsg
->lkey
= isert_conn
->conn_mr
->lkey
;
1564 isert_cmd
->tx_desc
.num_sge
= 2;
1567 isert_init_send_wr(isert_cmd
, send_wr
);
1569 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1571 return isert_post_response(isert_conn
, isert_cmd
);
1575 isert_put_nopin(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
,
1576 bool nopout_response
)
1578 struct isert_cmd
*isert_cmd
= container_of(cmd
,
1579 struct isert_cmd
, iscsi_cmd
);
1580 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1581 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1583 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1584 iscsit_build_nopin_rsp(cmd
, conn
, (struct iscsi_nopin
*)
1585 &isert_cmd
->tx_desc
.iscsi_header
,
1587 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1588 isert_init_send_wr(isert_cmd
, send_wr
);
1590 pr_debug("Posting NOPIN Reponse IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1592 return isert_post_response(isert_conn
, isert_cmd
);
1596 isert_put_logout_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
1598 struct isert_cmd
*isert_cmd
= container_of(cmd
,
1599 struct isert_cmd
, iscsi_cmd
);
1600 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1601 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1603 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1604 iscsit_build_logout_rsp(cmd
, conn
, (struct iscsi_logout_rsp
*)
1605 &isert_cmd
->tx_desc
.iscsi_header
);
1606 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1607 isert_init_send_wr(isert_cmd
, send_wr
);
1609 pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1611 return isert_post_response(isert_conn
, isert_cmd
);
1615 isert_put_tm_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
1617 struct isert_cmd
*isert_cmd
= container_of(cmd
,
1618 struct isert_cmd
, iscsi_cmd
);
1619 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1620 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1622 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1623 iscsit_build_task_mgt_rsp(cmd
, conn
, (struct iscsi_tm_rsp
*)
1624 &isert_cmd
->tx_desc
.iscsi_header
);
1625 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1626 isert_init_send_wr(isert_cmd
, send_wr
);
1628 pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1630 return isert_post_response(isert_conn
, isert_cmd
);
1634 isert_put_reject(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
1636 struct isert_cmd
*isert_cmd
= container_of(cmd
,
1637 struct isert_cmd
, iscsi_cmd
);
1638 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1639 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1641 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1642 iscsit_build_reject(cmd
, conn
, (struct iscsi_reject
*)
1643 &isert_cmd
->tx_desc
.iscsi_header
);
1644 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1645 isert_init_send_wr(isert_cmd
, send_wr
);
1647 pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1649 return isert_post_response(isert_conn
, isert_cmd
);
1653 isert_build_rdma_wr(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1654 struct ib_sge
*ib_sge
, struct ib_send_wr
*send_wr
,
1655 u32 data_left
, u32 offset
)
1657 struct iscsi_cmd
*cmd
= &isert_cmd
->iscsi_cmd
;
1658 struct scatterlist
*sg_start
, *tmp_sg
;
1659 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1660 u32 sg_off
, page_off
;
1661 int i
= 0, sg_nents
;
1663 sg_off
= offset
/ PAGE_SIZE
;
1664 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
1665 sg_nents
= min(cmd
->se_cmd
.t_data_nents
- sg_off
, isert_conn
->max_sge
);
1666 page_off
= offset
% PAGE_SIZE
;
1668 send_wr
->sg_list
= ib_sge
;
1669 send_wr
->num_sge
= sg_nents
;
1670 send_wr
->wr_id
= (unsigned long)&isert_cmd
->tx_desc
;
1672 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
1674 for_each_sg(sg_start
, tmp_sg
, sg_nents
, i
) {
1675 pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
1676 (unsigned long long)tmp_sg
->dma_address
,
1677 tmp_sg
->length
, page_off
);
1679 ib_sge
->addr
= ib_sg_dma_address(ib_dev
, tmp_sg
) + page_off
;
1680 ib_sge
->length
= min_t(u32
, data_left
,
1681 ib_sg_dma_len(ib_dev
, tmp_sg
) - page_off
);
1682 ib_sge
->lkey
= isert_conn
->conn_mr
->lkey
;
1684 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u\n",
1685 ib_sge
->addr
, ib_sge
->length
);
1687 data_left
-= ib_sge
->length
;
1689 pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge
);
1692 pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
1693 send_wr
->sg_list
, send_wr
->num_sge
);
1699 isert_put_datain(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
1701 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1702 struct isert_cmd
*isert_cmd
= container_of(cmd
,
1703 struct isert_cmd
, iscsi_cmd
);
1704 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1705 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1706 struct ib_send_wr
*wr_failed
, *send_wr
;
1707 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1708 struct ib_sge
*ib_sge
;
1709 struct scatterlist
*sg
;
1710 u32 offset
= 0, data_len
, data_left
, rdma_write_max
;
1711 int rc
, ret
= 0, count
, sg_nents
, i
, ib_sge_cnt
;
1713 pr_debug("RDMA_WRITE: data_length: %u\n", se_cmd
->data_length
);
1715 sg
= &se_cmd
->t_data_sg
[0];
1716 sg_nents
= se_cmd
->t_data_nents
;
1718 count
= ib_dma_map_sg(ib_dev
, sg
, sg_nents
, DMA_TO_DEVICE
);
1719 if (unlikely(!count
)) {
1720 pr_err("Unable to map put_datain SGs\n");
1724 wr
->num_sge
= sg_nents
;
1725 pr_debug("Mapped IB count: %u sg: %p sg_nents: %u for RDMA_WRITE\n",
1726 count
, sg
, sg_nents
);
1728 ib_sge
= kzalloc(sizeof(struct ib_sge
) * sg_nents
, GFP_KERNEL
);
1730 pr_warn("Unable to allocate datain ib_sge\n");
1734 isert_cmd
->ib_sge
= ib_sge
;
1736 pr_debug("Allocated ib_sge: %p from t_data_ents: %d for RDMA_WRITE\n",
1737 ib_sge
, se_cmd
->t_data_nents
);
1739 wr
->send_wr_num
= DIV_ROUND_UP(sg_nents
, isert_conn
->max_sge
);
1740 wr
->send_wr
= kzalloc(sizeof(struct ib_send_wr
) * wr
->send_wr_num
,
1743 pr_err("Unable to allocate wr->send_wr\n");
1747 pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
1748 wr
->send_wr
, wr
->send_wr_num
);
1750 iscsit_increment_maxcmdsn(cmd
, conn
->sess
);
1751 cmd
->stat_sn
= conn
->stat_sn
++;
1753 wr
->isert_cmd
= isert_cmd
;
1754 rdma_write_max
= isert_conn
->max_sge
* PAGE_SIZE
;
1755 data_left
= se_cmd
->data_length
;
1757 for (i
= 0; i
< wr
->send_wr_num
; i
++) {
1758 send_wr
= &isert_cmd
->rdma_wr
.send_wr
[i
];
1759 data_len
= min(data_left
, rdma_write_max
);
1761 send_wr
->opcode
= IB_WR_RDMA_WRITE
;
1762 send_wr
->send_flags
= 0;
1763 send_wr
->wr
.rdma
.remote_addr
= isert_cmd
->read_va
+ offset
;
1764 send_wr
->wr
.rdma
.rkey
= isert_cmd
->read_stag
;
1766 ib_sge_cnt
= isert_build_rdma_wr(isert_conn
, isert_cmd
, ib_sge
,
1767 send_wr
, data_len
, offset
);
1768 ib_sge
+= ib_sge_cnt
;
1770 if (i
+ 1 == wr
->send_wr_num
)
1771 send_wr
->next
= &isert_cmd
->tx_desc
.send_wr
;
1773 send_wr
->next
= &wr
->send_wr
[i
+ 1];
1776 data_left
-= data_len
;
1779 * Build isert_conn->tx_desc for iSCSI response PDU and attach
1781 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1782 iscsit_build_rsp_pdu(cmd
, conn
, false, (struct iscsi_scsi_rsp
*)
1783 &isert_cmd
->tx_desc
.iscsi_header
);
1784 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1785 isert_init_send_wr(isert_cmd
, &isert_cmd
->tx_desc
.send_wr
);
1787 atomic_inc(&isert_conn
->post_send_buf_count
);
1789 rc
= ib_post_send(isert_conn
->conn_qp
, wr
->send_wr
, &wr_failed
);
1791 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
1792 atomic_dec(&isert_conn
->post_send_buf_count
);
1794 pr_debug("Posted RDMA_WRITE + Response for iSER Data READ\n");
1798 ib_dma_unmap_sg(ib_dev
, sg
, sg_nents
, DMA_TO_DEVICE
);
1803 isert_get_dataout(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, bool recovery
)
1805 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1806 struct isert_cmd
*isert_cmd
= container_of(cmd
,
1807 struct isert_cmd
, iscsi_cmd
);
1808 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1809 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1810 struct ib_send_wr
*wr_failed
, *send_wr
;
1811 struct ib_sge
*ib_sge
;
1812 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1813 struct scatterlist
*sg_start
;
1814 u32 sg_off
, sg_nents
, page_off
, va_offset
= 0;
1815 u32 offset
= 0, data_len
, data_left
, rdma_write_max
;
1816 int rc
, ret
= 0, count
, i
, ib_sge_cnt
;
1818 pr_debug("RDMA_READ: data_length: %u write_data_done: %u\n",
1819 se_cmd
->data_length
, cmd
->write_data_done
);
1821 sg_off
= cmd
->write_data_done
/ PAGE_SIZE
;
1822 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
1823 page_off
= cmd
->write_data_done
% PAGE_SIZE
;
1825 pr_debug("RDMA_READ: sg_off: %d, sg_start: %p page_off: %d\n",
1826 sg_off
, sg_start
, page_off
);
1828 data_left
= se_cmd
->data_length
- cmd
->write_data_done
;
1829 sg_nents
= se_cmd
->t_data_nents
- sg_off
;
1831 pr_debug("RDMA_READ: data_left: %d, sg_nents: %d\n",
1832 data_left
, sg_nents
);
1834 count
= ib_dma_map_sg(ib_dev
, sg_start
, sg_nents
, DMA_FROM_DEVICE
);
1835 if (unlikely(!count
)) {
1836 pr_err("Unable to map get_dataout SGs\n");
1840 wr
->num_sge
= sg_nents
;
1841 pr_debug("Mapped IB count: %u sg_start: %p sg_nents: %u for RDMA_READ\n",
1842 count
, sg_start
, sg_nents
);
1844 ib_sge
= kzalloc(sizeof(struct ib_sge
) * sg_nents
, GFP_KERNEL
);
1846 pr_warn("Unable to allocate dataout ib_sge\n");
1850 isert_cmd
->ib_sge
= ib_sge
;
1852 pr_debug("Using ib_sge: %p from sg_ents: %d for RDMA_READ\n",
1855 wr
->send_wr_num
= DIV_ROUND_UP(sg_nents
, isert_conn
->max_sge
);
1856 wr
->send_wr
= kzalloc(sizeof(struct ib_send_wr
) * wr
->send_wr_num
,
1859 pr_debug("Unable to allocate wr->send_wr\n");
1863 pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
1864 wr
->send_wr
, wr
->send_wr_num
);
1866 isert_cmd
->tx_desc
.isert_cmd
= isert_cmd
;
1868 wr
->iser_ib_op
= ISER_IB_RDMA_READ
;
1869 wr
->isert_cmd
= isert_cmd
;
1870 rdma_write_max
= isert_conn
->max_sge
* PAGE_SIZE
;
1871 offset
= cmd
->write_data_done
;
1873 for (i
= 0; i
< wr
->send_wr_num
; i
++) {
1874 send_wr
= &isert_cmd
->rdma_wr
.send_wr
[i
];
1875 data_len
= min(data_left
, rdma_write_max
);
1877 send_wr
->opcode
= IB_WR_RDMA_READ
;
1878 send_wr
->wr
.rdma
.remote_addr
= isert_cmd
->write_va
+ va_offset
;
1879 send_wr
->wr
.rdma
.rkey
= isert_cmd
->write_stag
;
1881 ib_sge_cnt
= isert_build_rdma_wr(isert_conn
, isert_cmd
, ib_sge
,
1882 send_wr
, data_len
, offset
);
1883 ib_sge
+= ib_sge_cnt
;
1885 if (i
+ 1 == wr
->send_wr_num
)
1886 send_wr
->send_flags
= IB_SEND_SIGNALED
;
1888 send_wr
->next
= &wr
->send_wr
[i
+ 1];
1891 va_offset
+= data_len
;
1892 data_left
-= data_len
;
1895 atomic_inc(&isert_conn
->post_send_buf_count
);
1897 rc
= ib_post_send(isert_conn
->conn_qp
, wr
->send_wr
, &wr_failed
);
1899 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
1900 atomic_dec(&isert_conn
->post_send_buf_count
);
1902 pr_debug("Posted RDMA_READ memory for ISER Data WRITE\n");
1906 ib_dma_unmap_sg(ib_dev
, sg_start
, sg_nents
, DMA_FROM_DEVICE
);
1911 isert_immediate_queue(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, int state
)
1916 case ISTATE_SEND_NOPIN_WANT_RESPONSE
:
1917 ret
= isert_put_nopin(cmd
, conn
, false);
1920 pr_err("Unknown immediate state: 0x%02x\n", state
);
1929 isert_response_queue(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, int state
)
1934 case ISTATE_SEND_LOGOUTRSP
:
1935 ret
= isert_put_logout_rsp(cmd
, conn
);
1937 pr_debug("Returning iSER Logout -EAGAIN\n");
1941 case ISTATE_SEND_NOPIN
:
1942 ret
= isert_put_nopin(cmd
, conn
, true);
1944 case ISTATE_SEND_TASKMGTRSP
:
1945 ret
= isert_put_tm_rsp(cmd
, conn
);
1947 case ISTATE_SEND_REJECT
:
1948 ret
= isert_put_reject(cmd
, conn
);
1950 case ISTATE_SEND_STATUS
:
1952 * Special case for sending non GOOD SCSI status from TX thread
1953 * context during pre se_cmd excecution failure.
1955 ret
= isert_put_response(conn
, cmd
);
1958 pr_err("Unknown response state: 0x%02x\n", state
);
1967 isert_setup_np(struct iscsi_np
*np
,
1968 struct __kernel_sockaddr_storage
*ksockaddr
)
1970 struct isert_np
*isert_np
;
1971 struct rdma_cm_id
*isert_lid
;
1972 struct sockaddr
*sa
;
1975 isert_np
= kzalloc(sizeof(struct isert_np
), GFP_KERNEL
);
1977 pr_err("Unable to allocate struct isert_np\n");
1980 init_waitqueue_head(&isert_np
->np_accept_wq
);
1981 mutex_init(&isert_np
->np_accept_mutex
);
1982 INIT_LIST_HEAD(&isert_np
->np_accept_list
);
1983 init_completion(&isert_np
->np_login_comp
);
1985 sa
= (struct sockaddr
*)ksockaddr
;
1986 pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr
, sa
);
1988 * Setup the np->np_sockaddr from the passed sockaddr setup
1989 * in iscsi_target_configfs.c code..
1991 memcpy(&np
->np_sockaddr
, ksockaddr
,
1992 sizeof(struct __kernel_sockaddr_storage
));
1994 isert_lid
= rdma_create_id(isert_cma_handler
, np
, RDMA_PS_TCP
,
1996 if (IS_ERR(isert_lid
)) {
1997 pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
1998 PTR_ERR(isert_lid
));
1999 ret
= PTR_ERR(isert_lid
);
2003 ret
= rdma_bind_addr(isert_lid
, sa
);
2005 pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret
);
2009 ret
= rdma_listen(isert_lid
, ISERT_RDMA_LISTEN_BACKLOG
);
2011 pr_err("rdma_listen() for isert_lid failed: %d\n", ret
);
2015 isert_np
->np_cm_id
= isert_lid
;
2016 np
->np_context
= isert_np
;
2017 pr_debug("Setup isert_lid->context: %p\n", isert_lid
->context
);
2022 rdma_destroy_id(isert_lid
);
2029 isert_check_accept_queue(struct isert_np
*isert_np
)
2033 mutex_lock(&isert_np
->np_accept_mutex
);
2034 empty
= list_empty(&isert_np
->np_accept_list
);
2035 mutex_unlock(&isert_np
->np_accept_mutex
);
2041 isert_rdma_accept(struct isert_conn
*isert_conn
)
2043 struct rdma_cm_id
*cm_id
= isert_conn
->conn_cm_id
;
2044 struct rdma_conn_param cp
;
2047 memset(&cp
, 0, sizeof(struct rdma_conn_param
));
2048 cp
.responder_resources
= isert_conn
->responder_resources
;
2049 cp
.initiator_depth
= isert_conn
->initiator_depth
;
2051 cp
.rnr_retry_count
= 7;
2053 pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
2055 ret
= rdma_accept(cm_id
, &cp
);
2057 pr_err("rdma_accept() failed with: %d\n", ret
);
2061 pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
2067 isert_get_login_rx(struct iscsi_conn
*conn
, struct iscsi_login
*login
)
2069 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2072 pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn
);
2074 ret
= wait_for_completion_interruptible(&isert_conn
->conn_login_comp
);
2078 pr_debug("isert_get_login_rx processing login->req: %p\n", login
->req
);
2083 isert_set_conn_info(struct iscsi_np
*np
, struct iscsi_conn
*conn
,
2084 struct isert_conn
*isert_conn
)
2086 struct rdma_cm_id
*cm_id
= isert_conn
->conn_cm_id
;
2087 struct rdma_route
*cm_route
= &cm_id
->route
;
2088 struct sockaddr_in
*sock_in
;
2089 struct sockaddr_in6
*sock_in6
;
2091 conn
->login_family
= np
->np_sockaddr
.ss_family
;
2093 if (np
->np_sockaddr
.ss_family
== AF_INET6
) {
2094 sock_in6
= (struct sockaddr_in6
*)&cm_route
->addr
.dst_addr
;
2095 snprintf(conn
->login_ip
, sizeof(conn
->login_ip
), "%pI6c",
2096 &sock_in6
->sin6_addr
.in6_u
);
2097 conn
->login_port
= ntohs(sock_in6
->sin6_port
);
2099 sock_in6
= (struct sockaddr_in6
*)&cm_route
->addr
.src_addr
;
2100 snprintf(conn
->local_ip
, sizeof(conn
->local_ip
), "%pI6c",
2101 &sock_in6
->sin6_addr
.in6_u
);
2102 conn
->local_port
= ntohs(sock_in6
->sin6_port
);
2104 sock_in
= (struct sockaddr_in
*)&cm_route
->addr
.dst_addr
;
2105 sprintf(conn
->login_ip
, "%pI4",
2106 &sock_in
->sin_addr
.s_addr
);
2107 conn
->login_port
= ntohs(sock_in
->sin_port
);
2109 sock_in
= (struct sockaddr_in
*)&cm_route
->addr
.src_addr
;
2110 sprintf(conn
->local_ip
, "%pI4",
2111 &sock_in
->sin_addr
.s_addr
);
2112 conn
->local_port
= ntohs(sock_in
->sin_port
);
2117 isert_accept_np(struct iscsi_np
*np
, struct iscsi_conn
*conn
)
2119 struct isert_np
*isert_np
= (struct isert_np
*)np
->np_context
;
2120 struct isert_conn
*isert_conn
;
2121 int max_accept
= 0, ret
;
2124 ret
= wait_event_interruptible(isert_np
->np_accept_wq
,
2125 !isert_check_accept_queue(isert_np
) ||
2126 np
->np_thread_state
== ISCSI_NP_THREAD_RESET
);
2130 spin_lock_bh(&np
->np_thread_lock
);
2131 if (np
->np_thread_state
== ISCSI_NP_THREAD_RESET
) {
2132 spin_unlock_bh(&np
->np_thread_lock
);
2133 pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
2136 spin_unlock_bh(&np
->np_thread_lock
);
2138 mutex_lock(&isert_np
->np_accept_mutex
);
2139 if (list_empty(&isert_np
->np_accept_list
)) {
2140 mutex_unlock(&isert_np
->np_accept_mutex
);
2144 isert_conn
= list_first_entry(&isert_np
->np_accept_list
,
2145 struct isert_conn
, conn_accept_node
);
2146 list_del_init(&isert_conn
->conn_accept_node
);
2147 mutex_unlock(&isert_np
->np_accept_mutex
);
2149 conn
->context
= isert_conn
;
2150 isert_conn
->conn
= conn
;
2153 ret
= isert_rdma_post_recvl(isert_conn
);
2157 ret
= isert_rdma_accept(isert_conn
);
2161 isert_set_conn_info(np
, conn
, isert_conn
);
2163 pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn
);
2168 isert_free_np(struct iscsi_np
*np
)
2170 struct isert_np
*isert_np
= (struct isert_np
*)np
->np_context
;
2172 rdma_destroy_id(isert_np
->np_cm_id
);
2174 np
->np_context
= NULL
;
2178 static void isert_free_conn(struct iscsi_conn
*conn
)
2180 struct isert_conn
*isert_conn
= conn
->context
;
2182 pr_debug("isert_free_conn: Starting \n");
2184 * Decrement post_send_buf_count for special case when called
2185 * from isert_do_control_comp() -> iscsit_logout_post_handler()
2187 if (isert_conn
->logout_posted
)
2188 atomic_dec(&isert_conn
->post_send_buf_count
);
2190 if (isert_conn
->conn_cm_id
)
2191 rdma_disconnect(isert_conn
->conn_cm_id
);
2193 * Only wait for conn_wait_comp_err if the isert_conn made it
2194 * into full feature phase..
2196 if (isert_conn
->state
> ISER_CONN_INIT
) {
2197 pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
2199 wait_event(isert_conn
->conn_wait_comp_err
,
2200 isert_conn
->state
== ISER_CONN_TERMINATING
);
2201 pr_debug("isert_free_conn: After wait_event #1 >>>>>>>>>>>>\n");
2204 pr_debug("isert_free_conn: wait_event conn_wait %d\n", isert_conn
->state
);
2205 wait_event(isert_conn
->conn_wait
, isert_conn
->state
== ISER_CONN_DOWN
);
2206 pr_debug("isert_free_conn: After wait_event #2 >>>>>>>>>>>>>>>>>>>>\n");
2208 isert_put_conn(isert_conn
);
2211 static struct iscsit_transport iser_target_transport
= {
2213 .transport_type
= ISCSI_INFINIBAND
,
2214 .owner
= THIS_MODULE
,
2215 .iscsit_setup_np
= isert_setup_np
,
2216 .iscsit_accept_np
= isert_accept_np
,
2217 .iscsit_free_np
= isert_free_np
,
2218 .iscsit_free_conn
= isert_free_conn
,
2219 .iscsit_alloc_cmd
= isert_alloc_cmd
,
2220 .iscsit_get_login_rx
= isert_get_login_rx
,
2221 .iscsit_put_login_tx
= isert_put_login_tx
,
2222 .iscsit_immediate_queue
= isert_immediate_queue
,
2223 .iscsit_response_queue
= isert_response_queue
,
2224 .iscsit_get_dataout
= isert_get_dataout
,
2225 .iscsit_queue_data_in
= isert_put_datain
,
2226 .iscsit_queue_status
= isert_put_response
,
2229 static int __init
isert_init(void)
2233 isert_rx_wq
= alloc_workqueue("isert_rx_wq", 0, 0);
2235 pr_err("Unable to allocate isert_rx_wq\n");
2239 isert_comp_wq
= alloc_workqueue("isert_comp_wq", 0, 0);
2240 if (!isert_comp_wq
) {
2241 pr_err("Unable to allocate isert_comp_wq\n");
2246 isert_cmd_cache
= kmem_cache_create("isert_cmd_cache",
2247 sizeof(struct isert_cmd
), __alignof__(struct isert_cmd
),
2249 if (!isert_cmd_cache
) {
2250 pr_err("Unable to create isert_cmd_cache\n");
2255 iscsit_register_transport(&iser_target_transport
);
2256 pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
2260 destroy_workqueue(isert_comp_wq
);
2262 destroy_workqueue(isert_rx_wq
);
2266 static void __exit
isert_exit(void)
2268 kmem_cache_destroy(isert_cmd_cache
);
2269 destroy_workqueue(isert_comp_wq
);
2270 destroy_workqueue(isert_rx_wq
);
2271 iscsit_unregister_transport(&iser_target_transport
);
2272 pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
2275 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2276 MODULE_VERSION("0.1");
2277 MODULE_AUTHOR("nab@Linux-iSCSI.org");
2278 MODULE_LICENSE("GPL");
2280 module_init(isert_init
);
2281 module_exit(isert_exit
);