]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/infiniband/ulp/isert/ib_isert.c
iser-target: Work-around live target stack shutdown resource cleanup
[mirror_ubuntu-zesty-kernel.git] / drivers / infiniband / ulp / isert / ib_isert.c
CommitLineData
b8d26b3b
NB
1/*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
3 *
4c76251e 4 * (c) Copyright 2013 Datera, Inc.
b8d26b3b
NB
5 *
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
18
19#include <linux/string.h>
20#include <linux/module.h>
21#include <linux/scatterlist.h>
22#include <linux/socket.h>
23#include <linux/in.h>
24#include <linux/in6.h>
95b60f07 25#include <linux/llist.h>
b8d26b3b
NB
26#include <rdma/ib_verbs.h>
27#include <rdma/rdma_cm.h>
28#include <target/target_core_base.h>
29#include <target/target_core_fabric.h>
30#include <target/iscsi/iscsi_transport.h>
531b7bf4 31#include <linux/semaphore.h>
b8d26b3b
NB
32
33#include "isert_proto.h"
34#include "ib_isert.h"
35
36#define ISERT_MAX_CONN 8
37#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
38#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
39
40static DEFINE_MUTEX(device_list_mutex);
41static LIST_HEAD(device_list);
42static struct workqueue_struct *isert_rx_wq;
43static struct workqueue_struct *isert_comp_wq;
b02efbfc 44static struct workqueue_struct *isert_release_wq;
b8d26b3b 45
d40945d8
VP
46static void
47isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
48static int
49isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
50 struct isert_rdma_wr *wr);
59464ef4 51static void
a3a5a826 52isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
59464ef4 53static int
a3a5a826
SG
54isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
55 struct isert_rdma_wr *wr);
f93f3a70
SG
56static int
57isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
2371e5da
SG
58static int
59isert_rdma_post_recvl(struct isert_conn *isert_conn);
60static int
61isert_rdma_accept(struct isert_conn *isert_conn);
ca6c1d82 62struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
d40945d8 63
302cc7c3
SG
64static inline bool
65isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
66{
23a548ee 67 return (conn->pi_support &&
302cc7c3
SG
68 cmd->prot_op != TARGET_PROT_NORMAL);
69}
70
71
b8d26b3b
NB
72static void
73isert_qp_event_callback(struct ib_event *e, void *context)
74{
75 struct isert_conn *isert_conn = (struct isert_conn *)context;
76
77 pr_err("isert_qp_event_callback event: %d\n", e->event);
78 switch (e->event) {
79 case IB_EVENT_COMM_EST:
80 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
81 break;
82 case IB_EVENT_QP_LAST_WQE_REACHED:
83 pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
84 break;
85 default:
86 break;
87 }
88}
89
90static int
91isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
92{
93 int ret;
94
95 ret = ib_query_device(ib_dev, devattr);
96 if (ret) {
97 pr_err("ib_query_device() failed: %d\n", ret);
98 return ret;
99 }
100 pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
101 pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
102
103 return 0;
104}
105
106static int
570db170 107isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
b8d26b3b
NB
108{
109 struct isert_device *device = isert_conn->conn_device;
110 struct ib_qp_init_attr attr;
b8d26b3b
NB
111 int ret, index, min_index = 0;
112
b8d26b3b
NB
113 mutex_lock(&device_list_mutex);
114 for (index = 0; index < device->cqs_used; index++)
115 if (device->cq_active_qps[index] <
116 device->cq_active_qps[min_index])
117 min_index = index;
118 device->cq_active_qps[min_index]++;
119 pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
120 mutex_unlock(&device_list_mutex);
121
122 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
123 attr.event_handler = isert_qp_event_callback;
124 attr.qp_context = isert_conn;
125 attr.send_cq = device->dev_tx_cq[min_index];
126 attr.recv_cq = device->dev_rx_cq[min_index];
127 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
128 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
129 /*
130 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
f57915cf
OG
131 * work-around for RDMA_READs with ConnectX-2.
132 *
133 * Also, still make sure to have at least two SGEs for
134 * outgoing control PDU responses.
b8d26b3b 135 */
f57915cf 136 attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
b8d26b3b
NB
137 isert_conn->max_sge = attr.cap.max_send_sge;
138
139 attr.cap.max_recv_sge = 1;
140 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
141 attr.qp_type = IB_QPT_RC;
570db170 142 if (device->pi_capable)
d3e125da 143 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
b8d26b3b
NB
144
145 pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
146 cma_id->device);
147 pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
148 isert_conn->conn_pd->device);
149
150 ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
151 if (ret) {
152 pr_err("rdma_create_qp failed for cma_id %d\n", ret);
19e2090f 153 goto err;
b8d26b3b
NB
154 }
155 isert_conn->conn_qp = cma_id->qp;
156 pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
157
158 return 0;
19e2090f
SG
159err:
160 mutex_lock(&device_list_mutex);
161 device->cq_active_qps[min_index]--;
162 mutex_unlock(&device_list_mutex);
163
164 return ret;
b8d26b3b
NB
165}
166
167static void
168isert_cq_event_callback(struct ib_event *e, void *context)
169{
170 pr_debug("isert_cq_event_callback event: %d\n", e->event);
171}
172
173static int
174isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
175{
176 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
177 struct iser_rx_desc *rx_desc;
178 struct ib_sge *rx_sg;
179 u64 dma_addr;
180 int i, j;
181
182 isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
183 sizeof(struct iser_rx_desc), GFP_KERNEL);
184 if (!isert_conn->conn_rx_descs)
185 goto fail;
186
187 rx_desc = isert_conn->conn_rx_descs;
188
189 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
190 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
191 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
192 if (ib_dma_mapping_error(ib_dev, dma_addr))
193 goto dma_map_fail;
194
195 rx_desc->dma_addr = dma_addr;
196
197 rx_sg = &rx_desc->rx_sg;
198 rx_sg->addr = rx_desc->dma_addr;
199 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
200 rx_sg->lkey = isert_conn->conn_mr->lkey;
201 }
202
203 isert_conn->conn_rx_desc_head = 0;
204 return 0;
205
206dma_map_fail:
207 rx_desc = isert_conn->conn_rx_descs;
208 for (j = 0; j < i; j++, rx_desc++) {
209 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
210 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
211 }
212 kfree(isert_conn->conn_rx_descs);
213 isert_conn->conn_rx_descs = NULL;
214fail:
215 return -ENOMEM;
216}
217
218static void
219isert_free_rx_descriptors(struct isert_conn *isert_conn)
220{
221 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
222 struct iser_rx_desc *rx_desc;
223 int i;
224
225 if (!isert_conn->conn_rx_descs)
226 return;
227
228 rx_desc = isert_conn->conn_rx_descs;
229 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
230 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
231 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
232 }
233
234 kfree(isert_conn->conn_rx_descs);
235 isert_conn->conn_rx_descs = NULL;
236}
237
2853c2b6 238static void isert_cq_tx_work(struct work_struct *);
b8d26b3b 239static void isert_cq_tx_callback(struct ib_cq *, void *);
2853c2b6 240static void isert_cq_rx_work(struct work_struct *);
b8d26b3b
NB
241static void isert_cq_rx_callback(struct ib_cq *, void *);
242
243static int
244isert_create_device_ib_res(struct isert_device *device)
245{
246 struct ib_device *ib_dev = device->ib_device;
247 struct isert_cq_desc *cq_desc;
59464ef4 248 struct ib_device_attr *dev_attr;
b8d26b3b 249 int ret = 0, i, j;
b1a5ad00 250 int max_rx_cqe, max_tx_cqe;
b8d26b3b 251
59464ef4
VP
252 dev_attr = &device->dev_attr;
253 ret = isert_query_device(ib_dev, dev_attr);
254 if (ret)
255 return ret;
256
b1a5ad00
CM
257 max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe);
258 max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe);
259
d40945d8 260 /* asign function handlers */
f2252258
SG
261 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
262 dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
a3a5a826
SG
263 device->use_fastreg = 1;
264 device->reg_rdma_mem = isert_reg_rdma;
265 device->unreg_rdma_mem = isert_unreg_rdma;
59464ef4 266 } else {
a3a5a826 267 device->use_fastreg = 0;
59464ef4
VP
268 device->reg_rdma_mem = isert_map_rdma;
269 device->unreg_rdma_mem = isert_unmap_cmd;
270 }
d40945d8 271
d3e125da
SG
272 /* Check signature cap */
273 device->pi_capable = dev_attr->device_cap_flags &
274 IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
275
b8d26b3b
NB
276 device->cqs_used = min_t(int, num_online_cpus(),
277 device->ib_device->num_comp_vectors);
278 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
a3a5a826 279 pr_debug("Using %d CQs, device %s supports %d vectors support "
d3e125da 280 "Fast registration %d pi_capable %d\n",
b8d26b3b 281 device->cqs_used, device->ib_device->name,
d3e125da
SG
282 device->ib_device->num_comp_vectors, device->use_fastreg,
283 device->pi_capable);
b8d26b3b
NB
284 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
285 device->cqs_used, GFP_KERNEL);
286 if (!device->cq_desc) {
287 pr_err("Unable to allocate device->cq_desc\n");
288 return -ENOMEM;
289 }
290 cq_desc = device->cq_desc;
291
b8d26b3b
NB
292 for (i = 0; i < device->cqs_used; i++) {
293 cq_desc[i].device = device;
294 cq_desc[i].cq_index = i;
295
2853c2b6 296 INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
b8d26b3b
NB
297 device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
298 isert_cq_rx_callback,
299 isert_cq_event_callback,
300 (void *)&cq_desc[i],
b1a5ad00 301 max_rx_cqe, i);
94a71110
WY
302 if (IS_ERR(device->dev_rx_cq[i])) {
303 ret = PTR_ERR(device->dev_rx_cq[i]);
304 device->dev_rx_cq[i] = NULL;
b8d26b3b 305 goto out_cq;
94a71110 306 }
b8d26b3b 307
2853c2b6 308 INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
b8d26b3b
NB
309 device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
310 isert_cq_tx_callback,
311 isert_cq_event_callback,
312 (void *)&cq_desc[i],
b1a5ad00 313 max_tx_cqe, i);
94a71110
WY
314 if (IS_ERR(device->dev_tx_cq[i])) {
315 ret = PTR_ERR(device->dev_tx_cq[i]);
316 device->dev_tx_cq[i] = NULL;
b8d26b3b 317 goto out_cq;
94a71110 318 }
b8d26b3b 319
94a71110
WY
320 ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
321 if (ret)
b8d26b3b
NB
322 goto out_cq;
323
94a71110
WY
324 ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
325 if (ret)
b8d26b3b
NB
326 goto out_cq;
327 }
328
b8d26b3b
NB
329 return 0;
330
331out_cq:
332 for (j = 0; j < i; j++) {
333 cq_desc = &device->cq_desc[j];
334
335 if (device->dev_rx_cq[j]) {
336 cancel_work_sync(&cq_desc->cq_rx_work);
337 ib_destroy_cq(device->dev_rx_cq[j]);
338 }
339 if (device->dev_tx_cq[j]) {
340 cancel_work_sync(&cq_desc->cq_tx_work);
341 ib_destroy_cq(device->dev_tx_cq[j]);
342 }
343 }
b8d26b3b
NB
344 kfree(device->cq_desc);
345
346 return ret;
347}
348
349static void
350isert_free_device_ib_res(struct isert_device *device)
351{
352 struct isert_cq_desc *cq_desc;
353 int i;
354
355 for (i = 0; i < device->cqs_used; i++) {
356 cq_desc = &device->cq_desc[i];
357
358 cancel_work_sync(&cq_desc->cq_rx_work);
359 cancel_work_sync(&cq_desc->cq_tx_work);
360 ib_destroy_cq(device->dev_rx_cq[i]);
361 ib_destroy_cq(device->dev_tx_cq[i]);
362 device->dev_rx_cq[i] = NULL;
363 device->dev_tx_cq[i] = NULL;
364 }
365
b8d26b3b
NB
366 kfree(device->cq_desc);
367}
368
369static void
370isert_device_try_release(struct isert_device *device)
371{
372 mutex_lock(&device_list_mutex);
373 device->refcount--;
374 if (!device->refcount) {
375 isert_free_device_ib_res(device);
376 list_del(&device->dev_node);
377 kfree(device);
378 }
379 mutex_unlock(&device_list_mutex);
380}
381
382static struct isert_device *
383isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
384{
385 struct isert_device *device;
386 int ret;
387
388 mutex_lock(&device_list_mutex);
389 list_for_each_entry(device, &device_list, dev_node) {
390 if (device->ib_device->node_guid == cma_id->device->node_guid) {
391 device->refcount++;
392 mutex_unlock(&device_list_mutex);
393 return device;
394 }
395 }
396
397 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
398 if (!device) {
399 mutex_unlock(&device_list_mutex);
400 return ERR_PTR(-ENOMEM);
401 }
402
403 INIT_LIST_HEAD(&device->dev_node);
404
405 device->ib_device = cma_id->device;
406 ret = isert_create_device_ib_res(device);
407 if (ret) {
408 kfree(device);
409 mutex_unlock(&device_list_mutex);
410 return ERR_PTR(ret);
411 }
412
413 device->refcount++;
414 list_add_tail(&device->dev_node, &device_list);
415 mutex_unlock(&device_list_mutex);
416
417 return device;
418}
419
59464ef4 420static void
a3a5a826 421isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
59464ef4
VP
422{
423 struct fast_reg_descriptor *fr_desc, *tmp;
424 int i = 0;
425
a3a5a826 426 if (list_empty(&isert_conn->conn_fr_pool))
59464ef4
VP
427 return;
428
a3a5a826 429 pr_debug("Freeing conn %p fastreg pool", isert_conn);
59464ef4
VP
430
431 list_for_each_entry_safe(fr_desc, tmp,
a3a5a826 432 &isert_conn->conn_fr_pool, list) {
59464ef4
VP
433 list_del(&fr_desc->list);
434 ib_free_fast_reg_page_list(fr_desc->data_frpl);
435 ib_dereg_mr(fr_desc->data_mr);
d3e125da
SG
436 if (fr_desc->pi_ctx) {
437 ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
438 ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
439 ib_destroy_mr(fr_desc->pi_ctx->sig_mr);
440 kfree(fr_desc->pi_ctx);
441 }
59464ef4
VP
442 kfree(fr_desc);
443 ++i;
444 }
445
a3a5a826 446 if (i < isert_conn->conn_fr_pool_size)
59464ef4 447 pr_warn("Pool still has %d regions registered\n",
a3a5a826 448 isert_conn->conn_fr_pool_size - i);
59464ef4
VP
449}
450
570db170
SG
451static int
452isert_create_pi_ctx(struct fast_reg_descriptor *desc,
453 struct ib_device *device,
454 struct ib_pd *pd)
455{
456 struct ib_mr_init_attr mr_init_attr;
457 struct pi_context *pi_ctx;
458 int ret;
459
460 pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
461 if (!pi_ctx) {
462 pr_err("Failed to allocate pi context\n");
463 return -ENOMEM;
464 }
465
466 pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device,
467 ISCSI_ISER_SG_TABLESIZE);
468 if (IS_ERR(pi_ctx->prot_frpl)) {
469 pr_err("Failed to allocate prot frpl err=%ld\n",
470 PTR_ERR(pi_ctx->prot_frpl));
471 ret = PTR_ERR(pi_ctx->prot_frpl);
472 goto err_pi_ctx;
473 }
474
475 pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
476 if (IS_ERR(pi_ctx->prot_mr)) {
477 pr_err("Failed to allocate prot frmr err=%ld\n",
478 PTR_ERR(pi_ctx->prot_mr));
479 ret = PTR_ERR(pi_ctx->prot_mr);
480 goto err_prot_frpl;
481 }
482 desc->ind |= ISERT_PROT_KEY_VALID;
483
484 memset(&mr_init_attr, 0, sizeof(mr_init_attr));
485 mr_init_attr.max_reg_descriptors = 2;
486 mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
487 pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
488 if (IS_ERR(pi_ctx->sig_mr)) {
489 pr_err("Failed to allocate signature enabled mr err=%ld\n",
490 PTR_ERR(pi_ctx->sig_mr));
491 ret = PTR_ERR(pi_ctx->sig_mr);
492 goto err_prot_mr;
493 }
494
495 desc->pi_ctx = pi_ctx;
496 desc->ind |= ISERT_SIG_KEY_VALID;
497 desc->ind &= ~ISERT_PROTECTED;
498
499 return 0;
500
501err_prot_mr:
502 ib_dereg_mr(desc->pi_ctx->prot_mr);
503err_prot_frpl:
504 ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
505err_pi_ctx:
506 kfree(desc->pi_ctx);
507
508 return ret;
509}
510
dc87a90f
SG
511static int
512isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
570db170 513 struct fast_reg_descriptor *fr_desc)
dc87a90f 514{
d3e125da
SG
515 int ret;
516
dc87a90f
SG
517 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
518 ISCSI_ISER_SG_TABLESIZE);
519 if (IS_ERR(fr_desc->data_frpl)) {
520 pr_err("Failed to allocate data frpl err=%ld\n",
521 PTR_ERR(fr_desc->data_frpl));
522 return PTR_ERR(fr_desc->data_frpl);
523 }
524
525 fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
526 if (IS_ERR(fr_desc->data_mr)) {
527 pr_err("Failed to allocate data frmr err=%ld\n",
528 PTR_ERR(fr_desc->data_mr));
d3e125da
SG
529 ret = PTR_ERR(fr_desc->data_mr);
530 goto err_data_frpl;
dc87a90f 531 }
d3e125da
SG
532 fr_desc->ind |= ISERT_DATA_KEY_VALID;
533
570db170 534 pr_debug("Created fr_desc %p\n", fr_desc);
dc87a90f
SG
535
536 return 0;
570db170 537
d3e125da
SG
538err_data_frpl:
539 ib_free_fast_reg_page_list(fr_desc->data_frpl);
540
541 return ret;
59464ef4
VP
542}
543
544static int
570db170 545isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
59464ef4
VP
546{
547 struct fast_reg_descriptor *fr_desc;
548 struct isert_device *device = isert_conn->conn_device;
f46d6a8a
NB
549 struct se_session *se_sess = isert_conn->conn->sess->se_sess;
550 struct se_node_acl *se_nacl = se_sess->se_node_acl;
551 int i, ret, tag_num;
552 /*
553 * Setup the number of FRMRs based upon the number of tags
554 * available to session in iscsi_target_locate_portal().
555 */
556 tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
557 tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
59464ef4 558
a3a5a826 559 isert_conn->conn_fr_pool_size = 0;
f46d6a8a 560 for (i = 0; i < tag_num; i++) {
59464ef4
VP
561 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
562 if (!fr_desc) {
563 pr_err("Failed to allocate fast_reg descriptor\n");
564 ret = -ENOMEM;
565 goto err;
566 }
567
dc87a90f 568 ret = isert_create_fr_desc(device->ib_device,
570db170 569 isert_conn->conn_pd, fr_desc);
dc87a90f
SG
570 if (ret) {
571 pr_err("Failed to create fastreg descriptor err=%d\n",
572 ret);
a80e21b3 573 kfree(fr_desc);
59464ef4
VP
574 goto err;
575 }
59464ef4 576
a3a5a826
SG
577 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
578 isert_conn->conn_fr_pool_size++;
59464ef4
VP
579 }
580
a3a5a826
SG
581 pr_debug("Creating conn %p fastreg pool size=%d",
582 isert_conn, isert_conn->conn_fr_pool_size);
59464ef4
VP
583
584 return 0;
585
586err:
a3a5a826 587 isert_conn_free_fastreg_pool(isert_conn);
59464ef4
VP
588 return ret;
589}
590
b8d26b3b
NB
591static int
592isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
593{
ca6c1d82
SG
594 struct isert_np *isert_np = cma_id->context;
595 struct iscsi_np *np = isert_np->np;
b8d26b3b
NB
596 struct isert_conn *isert_conn;
597 struct isert_device *device;
598 struct ib_device *ib_dev = cma_id->device;
599 int ret = 0;
14f4b54f
SG
600
601 spin_lock_bh(&np->np_thread_lock);
602 if (!np->enabled) {
603 spin_unlock_bh(&np->np_thread_lock);
604 pr_debug("iscsi_np is not enabled, reject connect request\n");
605 return rdma_reject(cma_id, NULL, 0);
606 }
607 spin_unlock_bh(&np->np_thread_lock);
b8d26b3b
NB
608
609 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
610 cma_id, cma_id->context);
611
612 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
613 if (!isert_conn) {
614 pr_err("Unable to allocate isert_conn\n");
615 return -ENOMEM;
616 }
617 isert_conn->state = ISER_CONN_INIT;
618 INIT_LIST_HEAD(&isert_conn->conn_accept_node);
619 init_completion(&isert_conn->conn_login_comp);
2371e5da 620 init_completion(&isert_conn->login_req_comp);
defd8848
NB
621 init_completion(&isert_conn->conn_wait);
622 init_completion(&isert_conn->conn_wait_comp_err);
b8d26b3b 623 kref_init(&isert_conn->conn_kref);
b2cb9649 624 mutex_init(&isert_conn->conn_mutex);
59464ef4 625 spin_lock_init(&isert_conn->conn_lock);
f46d6a8a 626 INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
b8d26b3b 627
b8d26b3b 628 isert_conn->conn_cm_id = cma_id;
b8d26b3b
NB
629
630 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
631 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
632 if (!isert_conn->login_buf) {
633 pr_err("Unable to allocate isert_conn->login_buf\n");
634 ret = -ENOMEM;
635 goto out;
636 }
637
638 isert_conn->login_req_buf = isert_conn->login_buf;
639 isert_conn->login_rsp_buf = isert_conn->login_buf +
640 ISCSI_DEF_MAX_RECV_SEG_LEN;
641 pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
642 isert_conn->login_buf, isert_conn->login_req_buf,
643 isert_conn->login_rsp_buf);
644
645 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
646 (void *)isert_conn->login_req_buf,
647 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
648
649 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
650 if (ret) {
651 pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
652 ret);
653 isert_conn->login_req_dma = 0;
654 goto out_login_buf;
655 }
656
657 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
658 (void *)isert_conn->login_rsp_buf,
659 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
660
661 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
662 if (ret) {
663 pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
664 ret);
665 isert_conn->login_rsp_dma = 0;
666 goto out_req_dma_map;
667 }
668
669 device = isert_device_find_by_ib_dev(cma_id);
670 if (IS_ERR(device)) {
671 ret = PTR_ERR(device);
672 goto out_rsp_dma_map;
673 }
674
1a92e17e
SG
675 /* Set max inflight RDMA READ requests */
676 isert_conn->initiator_depth = min_t(u8,
677 event->param.conn.initiator_depth,
678 device->dev_attr.max_qp_init_rd_atom);
679 pr_debug("Using initiator_depth: %u\n", isert_conn->initiator_depth);
680
b8d26b3b 681 isert_conn->conn_device = device;
eb6ab132
SG
682 isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
683 if (IS_ERR(isert_conn->conn_pd)) {
684 ret = PTR_ERR(isert_conn->conn_pd);
685 pr_err("ib_alloc_pd failed for conn %p: ret=%d\n",
686 isert_conn, ret);
687 goto out_pd;
688 }
b8d26b3b 689
eb6ab132
SG
690 isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd,
691 IB_ACCESS_LOCAL_WRITE);
692 if (IS_ERR(isert_conn->conn_mr)) {
693 ret = PTR_ERR(isert_conn->conn_mr);
694 pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
695 isert_conn, ret);
696 goto out_mr;
697 }
b8d26b3b 698
570db170 699 ret = isert_conn_setup_qp(isert_conn, cma_id);
b8d26b3b
NB
700 if (ret)
701 goto out_conn_dev;
702
2371e5da
SG
703 ret = isert_rdma_post_recvl(isert_conn);
704 if (ret)
705 goto out_conn_dev;
706
707 ret = isert_rdma_accept(isert_conn);
708 if (ret)
709 goto out_conn_dev;
710
b8d26b3b 711 mutex_lock(&isert_np->np_accept_mutex);
9fe63c88 712 list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
b8d26b3b
NB
713 mutex_unlock(&isert_np->np_accept_mutex);
714
531b7bf4
SG
715 pr_debug("isert_connect_request() up np_sem np: %p\n", np);
716 up(&isert_np->np_sem);
b8d26b3b
NB
717 return 0;
718
719out_conn_dev:
eb6ab132
SG
720 ib_dereg_mr(isert_conn->conn_mr);
721out_mr:
722 ib_dealloc_pd(isert_conn->conn_pd);
723out_pd:
b8d26b3b
NB
724 isert_device_try_release(device);
725out_rsp_dma_map:
726 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
727 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
728out_req_dma_map:
729 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
730 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
731out_login_buf:
732 kfree(isert_conn->login_buf);
733out:
734 kfree(isert_conn);
2371e5da 735 rdma_reject(cma_id, NULL, 0);
b8d26b3b
NB
736 return ret;
737}
738
739static void
740isert_connect_release(struct isert_conn *isert_conn)
741{
742 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
743 struct isert_device *device = isert_conn->conn_device;
744 int cq_index;
745
746 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
747
a3a5a826
SG
748 if (device && device->use_fastreg)
749 isert_conn_free_fastreg_pool(isert_conn);
59464ef4 750
19e2090f
SG
751 isert_free_rx_descriptors(isert_conn);
752 rdma_destroy_id(isert_conn->conn_cm_id);
753
b8d26b3b
NB
754 if (isert_conn->conn_qp) {
755 cq_index = ((struct isert_cq_desc *)
756 isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
757 pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
19e2090f 758 mutex_lock(&device_list_mutex);
b8d26b3b 759 isert_conn->conn_device->cq_active_qps[cq_index]--;
19e2090f 760 mutex_unlock(&device_list_mutex);
b8d26b3b 761
19e2090f 762 ib_destroy_qp(isert_conn->conn_qp);
b8d26b3b
NB
763 }
764
eb6ab132
SG
765 ib_dereg_mr(isert_conn->conn_mr);
766 ib_dealloc_pd(isert_conn->conn_pd);
767
b8d26b3b
NB
768 if (isert_conn->login_buf) {
769 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
770 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
771 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
772 ISCSI_DEF_MAX_RECV_SEG_LEN,
773 DMA_FROM_DEVICE);
774 kfree(isert_conn->login_buf);
775 }
776 kfree(isert_conn);
777
778 if (device)
779 isert_device_try_release(device);
780
781 pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
782}
783
784static void
785isert_connected_handler(struct rdma_cm_id *cma_id)
786{
19e2090f 787 struct isert_conn *isert_conn = cma_id->qp->qp_context;
c2f88b17 788
128e9cc8
SG
789 pr_info("conn %p\n", isert_conn);
790
2371e5da
SG
791 if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
792 pr_warn("conn %p connect_release is running\n", isert_conn);
793 return;
794 }
795
796 mutex_lock(&isert_conn->conn_mutex);
797 if (isert_conn->state != ISER_CONN_FULL_FEATURE)
798 isert_conn->state = ISER_CONN_UP;
799 mutex_unlock(&isert_conn->conn_mutex);
b8d26b3b
NB
800}
801
802static void
803isert_release_conn_kref(struct kref *kref)
804{
805 struct isert_conn *isert_conn = container_of(kref,
806 struct isert_conn, conn_kref);
807
808 pr_debug("Calling isert_connect_release for final kref %s/%d\n",
809 current->comm, current->pid);
810
811 isert_connect_release(isert_conn);
812}
813
814static void
815isert_put_conn(struct isert_conn *isert_conn)
816{
817 kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
818}
819
954f2372
SG
820/**
821 * isert_conn_terminate() - Initiate connection termination
822 * @isert_conn: isert connection struct
823 *
824 * Notes:
128e9cc8 825 * In case the connection state is FULL_FEATURE, move state
954f2372 826 * to TEMINATING and start teardown sequence (rdma_disconnect).
128e9cc8 827 * In case the connection state is UP, complete flush as well.
954f2372
SG
828 *
829 * This routine must be called with conn_mutex held. Thus it is
830 * safe to call multiple times.
831 */
832static void
833isert_conn_terminate(struct isert_conn *isert_conn)
834{
835 int err;
836
128e9cc8
SG
837 switch (isert_conn->state) {
838 case ISER_CONN_TERMINATING:
839 break;
840 case ISER_CONN_UP:
841 /*
842 * No flush completions will occur as we didn't
843 * get to ISER_CONN_FULL_FEATURE yet, complete
844 * to allow teardown progress.
845 */
846 complete(&isert_conn->conn_wait_comp_err);
847 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
954f2372
SG
848 pr_info("Terminating conn %p state %d\n",
849 isert_conn, isert_conn->state);
128e9cc8 850 isert_conn->state = ISER_CONN_TERMINATING;
954f2372
SG
851 err = rdma_disconnect(isert_conn->conn_cm_id);
852 if (err)
853 pr_warn("Failed rdma_disconnect isert_conn %p\n",
854 isert_conn);
128e9cc8
SG
855 break;
856 default:
857 pr_warn("conn %p teminating in state %d\n",
858 isert_conn, isert_conn->state);
954f2372
SG
859 }
860}
861
3b726ae2 862static int
ca6c1d82
SG
863isert_np_cma_handler(struct isert_np *isert_np,
864 enum rdma_cm_event_type event)
b8d26b3b 865{
ca6c1d82 866 pr_debug("isert np %p, handling event %d\n", isert_np, event);
3b726ae2 867
ca6c1d82
SG
868 switch (event) {
869 case RDMA_CM_EVENT_DEVICE_REMOVAL:
3b726ae2 870 isert_np->np_cm_id = NULL;
ca6c1d82
SG
871 break;
872 case RDMA_CM_EVENT_ADDR_CHANGE:
873 isert_np->np_cm_id = isert_setup_id(isert_np);
874 if (IS_ERR(isert_np->np_cm_id)) {
875 pr_err("isert np %p setup id failed: %ld\n",
876 isert_np, PTR_ERR(isert_np->np_cm_id));
877 isert_np->np_cm_id = NULL;
878 }
879 break;
880 default:
881 pr_err("isert np %p Unexpected event %d\n",
882 isert_np, event);
3b726ae2
SG
883 }
884
ca6c1d82
SG
885 return -1;
886}
887
888static int
889isert_disconnected_handler(struct rdma_cm_id *cma_id,
890 enum rdma_cm_event_type event)
891{
892 struct isert_np *isert_np = cma_id->context;
893 struct isert_conn *isert_conn;
894
895 if (isert_np->np_cm_id == cma_id)
896 return isert_np_cma_handler(cma_id->context, event);
897
19e2090f 898 isert_conn = cma_id->qp->qp_context;
b8d26b3b 899
128e9cc8
SG
900 mutex_lock(&isert_conn->conn_mutex);
901 isert_conn_terminate(isert_conn);
902 mutex_unlock(&isert_conn->conn_mutex);
903
904 pr_info("conn %p completing conn_wait\n", isert_conn);
905 complete(&isert_conn->conn_wait);
3b726ae2
SG
906
907 return 0;
b8d26b3b
NB
908}
909
954f2372
SG
910static void
911isert_connect_error(struct rdma_cm_id *cma_id)
912{
19e2090f 913 struct isert_conn *isert_conn = cma_id->qp->qp_context;
954f2372
SG
914
915 isert_put_conn(isert_conn);
916}
917
b8d26b3b
NB
918static int
919isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
920{
921 int ret = 0;
922
923 pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
924 event->event, event->status, cma_id->context, cma_id);
925
926 switch (event->event) {
927 case RDMA_CM_EVENT_CONNECT_REQUEST:
b8d26b3b 928 ret = isert_connect_request(cma_id, event);
3b726ae2
SG
929 if (ret)
930 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
931 event->event, ret);
b8d26b3b
NB
932 break;
933 case RDMA_CM_EVENT_ESTABLISHED:
b8d26b3b
NB
934 isert_connected_handler(cma_id);
935 break;
88c4015f
SG
936 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
937 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
938 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
88c4015f 939 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
ca6c1d82 940 ret = isert_disconnected_handler(cma_id, event->event);
b8d26b3b 941 break;
954f2372
SG
942 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
943 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
b8d26b3b 944 case RDMA_CM_EVENT_CONNECT_ERROR:
954f2372
SG
945 isert_connect_error(cma_id);
946 break;
b8d26b3b 947 default:
88c4015f 948 pr_err("Unhandled RDMA CMA event: %d\n", event->event);
b8d26b3b
NB
949 break;
950 }
951
b8d26b3b
NB
952 return ret;
953}
954
955static int
956isert_post_recv(struct isert_conn *isert_conn, u32 count)
957{
958 struct ib_recv_wr *rx_wr, *rx_wr_failed;
959 int i, ret;
960 unsigned int rx_head = isert_conn->conn_rx_desc_head;
961 struct iser_rx_desc *rx_desc;
962
963 for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
964 rx_desc = &isert_conn->conn_rx_descs[rx_head];
965 rx_wr->wr_id = (unsigned long)rx_desc;
966 rx_wr->sg_list = &rx_desc->rx_sg;
967 rx_wr->num_sge = 1;
968 rx_wr->next = rx_wr + 1;
969 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
970 }
971
972 rx_wr--;
973 rx_wr->next = NULL; /* mark end of work requests list */
974
975 isert_conn->post_recv_buf_count += count;
976 ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
977 &rx_wr_failed);
978 if (ret) {
979 pr_err("ib_post_recv() failed with ret: %d\n", ret);
980 isert_conn->post_recv_buf_count -= count;
981 } else {
982 pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
983 isert_conn->conn_rx_desc_head = rx_head;
984 }
985 return ret;
986}
987
988static int
989isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
990{
991 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
992 struct ib_send_wr send_wr, *send_wr_failed;
993 int ret;
994
995 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
996 ISER_HEADERS_LEN, DMA_TO_DEVICE);
997
998 send_wr.next = NULL;
999 send_wr.wr_id = (unsigned long)tx_desc;
1000 send_wr.sg_list = tx_desc->tx_sg;
1001 send_wr.num_sge = tx_desc->num_sge;
1002 send_wr.opcode = IB_WR_SEND;
1003 send_wr.send_flags = IB_SEND_SIGNALED;
1004
1005 atomic_inc(&isert_conn->post_send_buf_count);
1006
1007 ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
1008 if (ret) {
1009 pr_err("ib_post_send() failed, ret: %d\n", ret);
1010 atomic_dec(&isert_conn->post_send_buf_count);
1011 }
1012
1013 return ret;
1014}
1015
1016static void
1017isert_create_send_desc(struct isert_conn *isert_conn,
1018 struct isert_cmd *isert_cmd,
1019 struct iser_tx_desc *tx_desc)
1020{
1021 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1022
1023 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
1024 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1025
1026 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
1027 tx_desc->iser_header.flags = ISER_VER;
1028
1029 tx_desc->num_sge = 1;
1030 tx_desc->isert_cmd = isert_cmd;
1031
1032 if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
1033 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
1034 pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
1035 }
1036}
1037
1038static int
1039isert_init_tx_hdrs(struct isert_conn *isert_conn,
1040 struct iser_tx_desc *tx_desc)
1041{
1042 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1043 u64 dma_addr;
1044
1045 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
1046 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1047 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
1048 pr_err("ib_dma_mapping_error() failed\n");
1049 return -ENOMEM;
1050 }
1051
1052 tx_desc->dma_addr = dma_addr;
1053 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
1054 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
1055 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
1056
1057 pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
1058 " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
1059 tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
1060
1061 return 0;
1062}
1063
1064static void
95b60f07
NB
1065isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1066 struct ib_send_wr *send_wr, bool coalesce)
b8d26b3b 1067{
95b60f07
NB
1068 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
1069
b8d26b3b
NB
1070 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
1071 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
1072 send_wr->opcode = IB_WR_SEND;
95b60f07 1073 send_wr->sg_list = &tx_desc->tx_sg[0];
b8d26b3b 1074 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
95b60f07
NB
1075 /*
1076 * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
1077 * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
1078 */
ebbe4421 1079 mutex_lock(&isert_conn->conn_mutex);
128e9cc8 1080 if (coalesce && isert_conn->state == ISER_CONN_FULL_FEATURE &&
95b60f07 1081 ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
ebbe4421 1082 tx_desc->llnode_active = true;
95b60f07 1083 llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
ebbe4421 1084 mutex_unlock(&isert_conn->conn_mutex);
95b60f07
NB
1085 return;
1086 }
1087 isert_conn->conn_comp_batch = 0;
1088 tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
ebbe4421 1089 mutex_unlock(&isert_conn->conn_mutex);
95b60f07
NB
1090
1091 send_wr->send_flags = IB_SEND_SIGNALED;
b8d26b3b
NB
1092}
1093
1094static int
1095isert_rdma_post_recvl(struct isert_conn *isert_conn)
1096{
1097 struct ib_recv_wr rx_wr, *rx_wr_fail;
1098 struct ib_sge sge;
1099 int ret;
1100
1101 memset(&sge, 0, sizeof(struct ib_sge));
1102 sge.addr = isert_conn->login_req_dma;
1103 sge.length = ISER_RX_LOGIN_SIZE;
1104 sge.lkey = isert_conn->conn_mr->lkey;
1105
1106 pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
1107 sge.addr, sge.length, sge.lkey);
1108
1109 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
1110 rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
1111 rx_wr.sg_list = &sge;
1112 rx_wr.num_sge = 1;
1113
1114 isert_conn->post_recv_buf_count++;
1115 ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
1116 if (ret) {
1117 pr_err("ib_post_recv() failed: %d\n", ret);
1118 isert_conn->post_recv_buf_count--;
1119 }
1120
1121 pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
1122 return ret;
1123}
1124
1125static int
1126isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1127 u32 length)
1128{
1129 struct isert_conn *isert_conn = conn->context;
1130 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1131 struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
1132 int ret;
1133
1134 isert_create_send_desc(isert_conn, NULL, tx_desc);
1135
1136 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
1137 sizeof(struct iscsi_hdr));
1138
1139 isert_init_tx_hdrs(isert_conn, tx_desc);
1140
1141 if (length > 0) {
1142 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
1143
1144 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
1145 length, DMA_TO_DEVICE);
1146
1147 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
1148
1149 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
1150 length, DMA_TO_DEVICE);
1151
1152 tx_dsg->addr = isert_conn->login_rsp_dma;
1153 tx_dsg->length = length;
1154 tx_dsg->lkey = isert_conn->conn_mr->lkey;
1155 tx_desc->num_sge = 2;
1156 }
1157 if (!login->login_failed) {
1158 if (login->login_complete) {
e0546fc1
SG
1159 if (!conn->sess->sess_ops->SessionType &&
1160 isert_conn->conn_device->use_fastreg) {
570db170 1161 ret = isert_conn_create_fastreg_pool(isert_conn);
f46d6a8a
NB
1162 if (ret) {
1163 pr_err("Conn: %p failed to create"
1164 " fastreg pool\n", isert_conn);
1165 return ret;
1166 }
1167 }
1168
b8d26b3b
NB
1169 ret = isert_alloc_rx_descriptors(isert_conn);
1170 if (ret)
1171 return ret;
1172
1173 ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
1174 if (ret)
1175 return ret;
1176
128e9cc8 1177 /* Now we are in FULL_FEATURE phase */
2371e5da 1178 mutex_lock(&isert_conn->conn_mutex);
128e9cc8 1179 isert_conn->state = ISER_CONN_FULL_FEATURE;
2371e5da 1180 mutex_unlock(&isert_conn->conn_mutex);
b8d26b3b
NB
1181 goto post_send;
1182 }
1183
1184 ret = isert_rdma_post_recvl(isert_conn);
1185 if (ret)
1186 return ret;
1187 }
1188post_send:
1189 ret = isert_post_send(isert_conn, tx_desc);
1190 if (ret)
1191 return ret;
1192
1193 return 0;
1194}
1195
1196static void
2371e5da 1197isert_rx_login_req(struct isert_conn *isert_conn)
b8d26b3b 1198{
2371e5da
SG
1199 struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
1200 int rx_buflen = isert_conn->login_req_len;
b8d26b3b
NB
1201 struct iscsi_conn *conn = isert_conn->conn;
1202 struct iscsi_login *login = conn->conn_login;
1203 int size;
1204
2371e5da
SG
1205 pr_info("conn %p\n", isert_conn);
1206
1207 WARN_ON_ONCE(!login);
b8d26b3b
NB
1208
1209 if (login->first_request) {
1210 struct iscsi_login_req *login_req =
1211 (struct iscsi_login_req *)&rx_desc->iscsi_header;
1212 /*
1213 * Setup the initial iscsi_login values from the leading
1214 * login request PDU.
1215 */
1216 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1217 login->current_stage =
1218 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
1219 >> 2;
1220 login->version_min = login_req->min_version;
1221 login->version_max = login_req->max_version;
1222 memcpy(login->isid, login_req->isid, 6);
1223 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
1224 login->init_task_tag = login_req->itt;
1225 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1226 login->cid = be16_to_cpu(login_req->cid);
1227 login->tsih = be16_to_cpu(login_req->tsih);
1228 }
1229
1230 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1231
1232 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1233 pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
1234 size, rx_buflen, MAX_KEY_VALUE_PAIRS);
1235 memcpy(login->req_buf, &rx_desc->data[0], size);
1236
6faaa85f
NB
1237 if (login->first_request) {
1238 complete(&isert_conn->conn_login_comp);
1239 return;
1240 }
1241 schedule_delayed_work(&conn->login_work, 0);
b8d26b3b
NB
1242}
1243
b8d26b3b 1244static struct iscsi_cmd
676687c6 1245*isert_allocate_cmd(struct iscsi_conn *conn)
b8d26b3b
NB
1246{
1247 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1248 struct isert_cmd *isert_cmd;
d703ce2f 1249 struct iscsi_cmd *cmd;
b8d26b3b 1250
676687c6 1251 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
d703ce2f
NB
1252 if (!cmd) {
1253 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
b8d26b3b
NB
1254 return NULL;
1255 }
d703ce2f 1256 isert_cmd = iscsit_priv_cmd(cmd);
b8d26b3b 1257 isert_cmd->conn = isert_conn;
d703ce2f 1258 isert_cmd->iscsi_cmd = cmd;
b8d26b3b 1259
d703ce2f 1260 return cmd;
b8d26b3b
NB
1261}
1262
1263static int
1264isert_handle_scsi_cmd(struct isert_conn *isert_conn,
d703ce2f
NB
1265 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1266 struct iser_rx_desc *rx_desc, unsigned char *buf)
b8d26b3b 1267{
b8d26b3b
NB
1268 struct iscsi_conn *conn = isert_conn->conn;
1269 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1270 struct scatterlist *sg;
1271 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1272 bool dump_payload = false;
1273
1274 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1275 if (rc < 0)
1276 return rc;
1277
1278 imm_data = cmd->immediate_data;
1279 imm_data_len = cmd->first_burst_len;
1280 unsol_data = cmd->unsolicited_data;
1281
1282 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1283 if (rc < 0) {
1284 return 0;
1285 } else if (rc > 0) {
1286 dump_payload = true;
1287 goto sequence_cmd;
1288 }
1289
1290 if (!imm_data)
1291 return 0;
1292
1293 sg = &cmd->se_cmd.t_data_sg[0];
1294 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1295
1296 pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
1297 sg, sg_nents, &rx_desc->data[0], imm_data_len);
1298
1299 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
1300
1301 cmd->write_data_done += imm_data_len;
1302
1303 if (cmd->write_data_done == cmd->se_cmd.data_length) {
1304 spin_lock_bh(&cmd->istate_lock);
1305 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1306 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1307 spin_unlock_bh(&cmd->istate_lock);
1308 }
1309
1310sequence_cmd:
561bf158 1311 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
b8d26b3b
NB
1312
1313 if (!rc && dump_payload == false && unsol_data)
1314 iscsit_set_unsoliticed_dataout(cmd);
6cc44a6f
NB
1315 else if (dump_payload && imm_data)
1316 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
b8d26b3b 1317
b8d26b3b
NB
1318 return 0;
1319}
1320
1321static int
1322isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1323 struct iser_rx_desc *rx_desc, unsigned char *buf)
1324{
1325 struct scatterlist *sg_start;
1326 struct iscsi_conn *conn = isert_conn->conn;
1327 struct iscsi_cmd *cmd = NULL;
1328 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1329 u32 unsol_data_len = ntoh24(hdr->dlength);
1330 int rc, sg_nents, sg_off, page_off;
1331
1332 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1333 if (rc < 0)
1334 return rc;
1335 else if (!cmd)
1336 return 0;
1337 /*
1338 * FIXME: Unexpected unsolicited_data out
1339 */
1340 if (!cmd->unsolicited_data) {
1341 pr_err("Received unexpected solicited data payload\n");
1342 dump_stack();
1343 return -1;
1344 }
1345
1346 pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
1347 unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
1348
1349 sg_off = cmd->write_data_done / PAGE_SIZE;
1350 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1351 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1352 page_off = cmd->write_data_done % PAGE_SIZE;
1353 /*
1354 * FIXME: Non page-aligned unsolicited_data out
1355 */
1356 if (page_off) {
1357 pr_err("Received unexpected non-page aligned data payload\n");
1358 dump_stack();
1359 return -1;
1360 }
1361 pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
1362 sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
1363
1364 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1365 unsol_data_len);
1366
1367 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1368 if (rc < 0)
1369 return rc;
1370
1371 return 0;
1372}
1373
778de368
NB
1374static int
1375isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
d703ce2f
NB
1376 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1377 unsigned char *buf)
778de368 1378{
778de368
NB
1379 struct iscsi_conn *conn = isert_conn->conn;
1380 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1381 int rc;
1382
1383 rc = iscsit_setup_nop_out(conn, cmd, hdr);
1384 if (rc < 0)
1385 return rc;
1386 /*
1387 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1388 */
1389
1390 return iscsit_process_nop_out(conn, cmd, hdr);
1391}
1392
adb54c29
NB
1393static int
1394isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
d703ce2f
NB
1395 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1396 struct iscsi_text *hdr)
adb54c29 1397{
adb54c29
NB
1398 struct iscsi_conn *conn = isert_conn->conn;
1399 u32 payload_length = ntoh24(hdr->dlength);
1400 int rc;
1401 unsigned char *text_in;
1402
1403 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1404 if (rc < 0)
1405 return rc;
1406
1407 text_in = kzalloc(payload_length, GFP_KERNEL);
1408 if (!text_in) {
1409 pr_err("Unable to allocate text_in of payload_length: %u\n",
1410 payload_length);
1411 return -ENOMEM;
1412 }
1413 cmd->text_in_ptr = text_in;
1414
1415 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1416
1417 return iscsit_process_text_cmd(conn, cmd, hdr);
1418}
1419
b8d26b3b
NB
1420static int
1421isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1422 uint32_t read_stag, uint64_t read_va,
1423 uint32_t write_stag, uint64_t write_va)
1424{
1425 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1426 struct iscsi_conn *conn = isert_conn->conn;
ca40d24e 1427 struct iscsi_session *sess = conn->sess;
b8d26b3b
NB
1428 struct iscsi_cmd *cmd;
1429 struct isert_cmd *isert_cmd;
1430 int ret = -EINVAL;
1431 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1432
ca40d24e
NB
1433 if (sess->sess_ops->SessionType &&
1434 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1435 pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1436 " ignoring\n", opcode);
1437 return 0;
1438 }
1439
b8d26b3b
NB
1440 switch (opcode) {
1441 case ISCSI_OP_SCSI_CMD:
676687c6 1442 cmd = isert_allocate_cmd(conn);
b8d26b3b
NB
1443 if (!cmd)
1444 break;
1445
d703ce2f 1446 isert_cmd = iscsit_priv_cmd(cmd);
b8d26b3b
NB
1447 isert_cmd->read_stag = read_stag;
1448 isert_cmd->read_va = read_va;
1449 isert_cmd->write_stag = write_stag;
1450 isert_cmd->write_va = write_va;
1451
d703ce2f 1452 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
b8d26b3b
NB
1453 rx_desc, (unsigned char *)hdr);
1454 break;
1455 case ISCSI_OP_NOOP_OUT:
676687c6 1456 cmd = isert_allocate_cmd(conn);
b8d26b3b
NB
1457 if (!cmd)
1458 break;
1459
d703ce2f
NB
1460 isert_cmd = iscsit_priv_cmd(cmd);
1461 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
778de368 1462 rx_desc, (unsigned char *)hdr);
b8d26b3b
NB
1463 break;
1464 case ISCSI_OP_SCSI_DATA_OUT:
1465 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1466 (unsigned char *)hdr);
1467 break;
1468 case ISCSI_OP_SCSI_TMFUNC:
676687c6 1469 cmd = isert_allocate_cmd(conn);
b8d26b3b
NB
1470 if (!cmd)
1471 break;
1472
1473 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1474 (unsigned char *)hdr);
1475 break;
1476 case ISCSI_OP_LOGOUT:
676687c6 1477 cmd = isert_allocate_cmd(conn);
b8d26b3b
NB
1478 if (!cmd)
1479 break;
1480
1481 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1482 if (ret > 0)
1483 wait_for_completion_timeout(&conn->conn_logout_comp,
1484 SECONDS_FOR_LOGOUT_COMP *
1485 HZ);
1486 break;
adb54c29 1487 case ISCSI_OP_TEXT:
676687c6 1488 cmd = isert_allocate_cmd(conn);
adb54c29
NB
1489 if (!cmd)
1490 break;
1491
d703ce2f
NB
1492 isert_cmd = iscsit_priv_cmd(cmd);
1493 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
adb54c29
NB
1494 rx_desc, (struct iscsi_text *)hdr);
1495 break;
b8d26b3b
NB
1496 default:
1497 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1498 dump_stack();
1499 break;
1500 }
1501
1502 return ret;
1503}
1504
1505static void
1506isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1507{
1508 struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1509 uint64_t read_va = 0, write_va = 0;
1510 uint32_t read_stag = 0, write_stag = 0;
1511 int rc;
1512
1513 switch (iser_hdr->flags & 0xF0) {
1514 case ISCSI_CTRL:
1515 if (iser_hdr->flags & ISER_RSV) {
1516 read_stag = be32_to_cpu(iser_hdr->read_stag);
1517 read_va = be64_to_cpu(iser_hdr->read_va);
1518 pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1519 read_stag, (unsigned long long)read_va);
1520 }
1521 if (iser_hdr->flags & ISER_WSV) {
1522 write_stag = be32_to_cpu(iser_hdr->write_stag);
1523 write_va = be64_to_cpu(iser_hdr->write_va);
1524 pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1525 write_stag, (unsigned long long)write_va);
1526 }
1527
1528 pr_debug("ISER ISCSI_CTRL PDU\n");
1529 break;
1530 case ISER_HELLO:
1531 pr_err("iSER Hello message\n");
1532 break;
1533 default:
1534 pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1535 break;
1536 }
1537
1538 rc = isert_rx_opcode(isert_conn, rx_desc,
1539 read_stag, read_va, write_stag, write_va);
1540}
1541
1542static void
1543isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1544 unsigned long xfer_len)
1545{
1546 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1547 struct iscsi_hdr *hdr;
1548 u64 rx_dma;
1549 int rx_buflen, outstanding;
1550
1551 if ((char *)desc == isert_conn->login_req_buf) {
1552 rx_dma = isert_conn->login_req_dma;
1553 rx_buflen = ISER_RX_LOGIN_SIZE;
1554 pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1555 rx_dma, rx_buflen);
1556 } else {
1557 rx_dma = desc->dma_addr;
1558 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1559 pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1560 rx_dma, rx_buflen);
1561 }
1562
1563 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1564
1565 hdr = &desc->iscsi_header;
1566 pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1567 hdr->opcode, hdr->itt, hdr->flags,
1568 (int)(xfer_len - ISER_HEADERS_LEN));
1569
2371e5da
SG
1570 if ((char *)desc == isert_conn->login_req_buf) {
1571 isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
1572 if (isert_conn->conn) {
1573 struct iscsi_login *login = isert_conn->conn->conn_login;
1574
1575 if (login && !login->first_request)
1576 isert_rx_login_req(isert_conn);
1577 }
1578 mutex_lock(&isert_conn->conn_mutex);
1579 complete(&isert_conn->login_req_comp);
1580 mutex_unlock(&isert_conn->conn_mutex);
1581 } else {
b8d26b3b 1582 isert_rx_do_work(desc, isert_conn);
2371e5da 1583 }
b8d26b3b
NB
1584
1585 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1586 DMA_FROM_DEVICE);
1587
1588 isert_conn->post_recv_buf_count--;
1589 pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
1590 isert_conn->post_recv_buf_count);
1591
1592 if ((char *)desc == isert_conn->login_req_buf)
1593 return;
1594
1595 outstanding = isert_conn->post_recv_buf_count;
1596 if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1597 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1598 ISERT_MIN_POSTED_RX);
1599 err = isert_post_recv(isert_conn, count);
1600 if (err) {
1601 pr_err("isert_post_recv() count: %d failed, %d\n",
1602 count, err);
1603 }
1604 }
1605}
1606
e3d7e4c3
SG
1607static int
1608isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1609 struct scatterlist *sg, u32 nents, u32 length, u32 offset,
1610 enum iser_ib_op_code op, struct isert_data_buf *data)
1611{
1612 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1613
1614 data->dma_dir = op == ISER_IB_RDMA_WRITE ?
1615 DMA_TO_DEVICE : DMA_FROM_DEVICE;
1616
1617 data->len = length - offset;
1618 data->offset = offset;
1619 data->sg_off = data->offset / PAGE_SIZE;
1620
1621 data->sg = &sg[data->sg_off];
1622 data->nents = min_t(unsigned int, nents - data->sg_off,
1623 ISCSI_ISER_SG_TABLESIZE);
1624 data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
1625 PAGE_SIZE);
1626
1627 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
1628 data->dma_dir);
1629 if (unlikely(!data->dma_nents)) {
1630 pr_err("Cmd: unable to dma map SGs %p\n", sg);
1631 return -EINVAL;
1632 }
1633
1634 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1635 isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
1636
1637 return 0;
1638}
1639
1640static void
1641isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
1642{
1643 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1644
1645 ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
1646 memset(data, 0, sizeof(*data));
1647}
1648
1649
1650
b8d26b3b
NB
1651static void
1652isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1653{
1654 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
b8d26b3b 1655
90ecc6e2 1656 pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
e3d7e4c3
SG
1657
1658 if (wr->data.sg) {
90ecc6e2 1659 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
e3d7e4c3 1660 isert_unmap_data_buf(isert_conn, &wr->data);
b8d26b3b
NB
1661 }
1662
90ecc6e2
VP
1663 if (wr->send_wr) {
1664 pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd);
1665 kfree(wr->send_wr);
1666 wr->send_wr = NULL;
1667 }
b8d26b3b 1668
90ecc6e2
VP
1669 if (wr->ib_sge) {
1670 pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd);
1671 kfree(wr->ib_sge);
1672 wr->ib_sge = NULL;
1673 }
b8d26b3b
NB
1674}
1675
59464ef4 1676static void
a3a5a826 1677isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
59464ef4
VP
1678{
1679 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
59464ef4
VP
1680 LIST_HEAD(unmap_list);
1681
a3a5a826 1682 pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
59464ef4
VP
1683
1684 if (wr->fr_desc) {
a3a5a826 1685 pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
59464ef4 1686 isert_cmd, wr->fr_desc);
9e961ae7
SG
1687 if (wr->fr_desc->ind & ISERT_PROTECTED) {
1688 isert_unmap_data_buf(isert_conn, &wr->prot);
1689 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1690 }
59464ef4 1691 spin_lock_bh(&isert_conn->conn_lock);
a3a5a826 1692 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
59464ef4
VP
1693 spin_unlock_bh(&isert_conn->conn_lock);
1694 wr->fr_desc = NULL;
1695 }
1696
e3d7e4c3 1697 if (wr->data.sg) {
a3a5a826 1698 pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
e3d7e4c3 1699 isert_unmap_data_buf(isert_conn, &wr->data);
59464ef4
VP
1700 }
1701
1702 wr->ib_sge = NULL;
1703 wr->send_wr = NULL;
1704}
1705
b8d26b3b 1706static void
03e7848a 1707isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
b8d26b3b 1708{
d703ce2f 1709 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
b8d26b3b 1710 struct isert_conn *isert_conn = isert_cmd->conn;
186a9647 1711 struct iscsi_conn *conn = isert_conn->conn;
d40945d8 1712 struct isert_device *device = isert_conn->conn_device;
b8d26b3b
NB
1713
1714 pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
1715
1716 switch (cmd->iscsi_opcode) {
1717 case ISCSI_OP_SCSI_CMD:
b8d26b3b
NB
1718 spin_lock_bh(&conn->cmd_lock);
1719 if (!list_empty(&cmd->i_conn_node))
5159d763 1720 list_del_init(&cmd->i_conn_node);
b8d26b3b
NB
1721 spin_unlock_bh(&conn->cmd_lock);
1722
03e7848a 1723 if (cmd->data_direction == DMA_TO_DEVICE) {
b8d26b3b 1724 iscsit_stop_dataout_timer(cmd);
03e7848a
NB
1725 /*
1726 * Check for special case during comp_err where
1727 * WRITE_PENDING has been handed off from core,
1728 * but requires an extra target_put_sess_cmd()
1729 * before transport_generic_free_cmd() below.
1730 */
1731 if (comp_err &&
1732 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1733 struct se_cmd *se_cmd = &cmd->se_cmd;
1734
1735 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1736 }
1737 }
b8d26b3b 1738
d40945d8 1739 device->unreg_rdma_mem(isert_cmd, isert_conn);
186a9647
NB
1740 transport_generic_free_cmd(&cmd->se_cmd, 0);
1741 break;
b8d26b3b 1742 case ISCSI_OP_SCSI_TMFUNC:
186a9647
NB
1743 spin_lock_bh(&conn->cmd_lock);
1744 if (!list_empty(&cmd->i_conn_node))
5159d763 1745 list_del_init(&cmd->i_conn_node);
186a9647
NB
1746 spin_unlock_bh(&conn->cmd_lock);
1747
b8d26b3b
NB
1748 transport_generic_free_cmd(&cmd->se_cmd, 0);
1749 break;
1750 case ISCSI_OP_REJECT:
1751 case ISCSI_OP_NOOP_OUT:
adb54c29 1752 case ISCSI_OP_TEXT:
b8d26b3b
NB
1753 spin_lock_bh(&conn->cmd_lock);
1754 if (!list_empty(&cmd->i_conn_node))
5159d763 1755 list_del_init(&cmd->i_conn_node);
b8d26b3b
NB
1756 spin_unlock_bh(&conn->cmd_lock);
1757
1758 /*
1759 * Handle special case for REJECT when iscsi_add_reject*() has
1760 * overwritten the original iscsi_opcode assignment, and the
1761 * associated cmd->se_cmd needs to be released.
1762 */
1763 if (cmd->se_cmd.se_tfo != NULL) {
3df8f68a
NB
1764 pr_debug("Calling transport_generic_free_cmd from"
1765 " isert_put_cmd for 0x%02x\n",
1766 cmd->iscsi_opcode);
b8d26b3b
NB
1767 transport_generic_free_cmd(&cmd->se_cmd, 0);
1768 break;
1769 }
1770 /*
1771 * Fall-through
1772 */
1773 default:
d703ce2f 1774 iscsit_release_cmd(cmd);
b8d26b3b
NB
1775 break;
1776 }
1777}
1778
1779static void
1780isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1781{
1782 if (tx_desc->dma_addr != 0) {
1783 pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1784 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1785 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1786 tx_desc->dma_addr = 0;
1787 }
1788}
1789
1790static void
1791isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
03e7848a 1792 struct ib_device *ib_dev, bool comp_err)
b8d26b3b 1793{
dbbc5d11
NB
1794 if (isert_cmd->pdu_buf_dma != 0) {
1795 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
1796 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1797 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1798 isert_cmd->pdu_buf_dma = 0;
b8d26b3b
NB
1799 }
1800
1801 isert_unmap_tx_desc(tx_desc, ib_dev);
03e7848a 1802 isert_put_cmd(isert_cmd, comp_err);
b8d26b3b
NB
1803}
1804
96b7973e
SG
1805static int
1806isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1807{
1808 struct ib_mr_status mr_status;
1809 int ret;
1810
1811 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1812 if (ret) {
1813 pr_err("ib_check_mr_status failed, ret %d\n", ret);
1814 goto fail_mr_status;
1815 }
1816
1817 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1818 u64 sec_offset_err;
1819 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1820
1821 switch (mr_status.sig_err.err_type) {
1822 case IB_SIG_BAD_GUARD:
1823 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1824 break;
1825 case IB_SIG_BAD_REFTAG:
1826 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1827 break;
1828 case IB_SIG_BAD_APPTAG:
1829 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1830 break;
1831 }
1832 sec_offset_err = mr_status.sig_err.sig_err_offset;
1833 do_div(sec_offset_err, block_size);
1834 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1835
1836 pr_err("isert: PI error found type %d at sector 0x%llx "
1837 "expected 0x%x vs actual 0x%x\n",
1838 mr_status.sig_err.err_type,
1839 (unsigned long long)se_cmd->bad_sector,
1840 mr_status.sig_err.expected,
1841 mr_status.sig_err.actual);
1842 ret = 1;
1843 }
1844
1845fail_mr_status:
1846 return ret;
1847}
1848
f93f3a70
SG
1849static void
1850isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
1851 struct isert_cmd *isert_cmd)
1852{
9e961ae7 1853 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
f93f3a70 1854 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
9e961ae7 1855 struct se_cmd *se_cmd = &cmd->se_cmd;
f93f3a70
SG
1856 struct isert_conn *isert_conn = isert_cmd->conn;
1857 struct isert_device *device = isert_conn->conn_device;
9e961ae7
SG
1858 int ret = 0;
1859
1860 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
96b7973e
SG
1861 ret = isert_check_pi_status(se_cmd,
1862 wr->fr_desc->pi_ctx->sig_mr);
1863 wr->fr_desc->ind &= ~ISERT_PROTECTED;
9e961ae7 1864 }
f93f3a70
SG
1865
1866 device->unreg_rdma_mem(isert_cmd, isert_conn);
897bb2c9 1867 wr->send_wr_num = 0;
9e961ae7
SG
1868 if (ret)
1869 transport_send_check_condition_and_sense(se_cmd,
1870 se_cmd->pi_err, 0);
1871 else
1872 isert_put_response(isert_conn->conn, cmd);
f93f3a70
SG
1873}
1874
b8d26b3b
NB
1875static void
1876isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1877 struct isert_cmd *isert_cmd)
1878{
1879 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
d703ce2f 1880 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
b8d26b3b 1881 struct se_cmd *se_cmd = &cmd->se_cmd;
90ecc6e2 1882 struct isert_conn *isert_conn = isert_cmd->conn;
d40945d8 1883 struct isert_device *device = isert_conn->conn_device;
5bac4b1a 1884 int ret = 0;
b8d26b3b 1885
9e961ae7 1886 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
96b7973e
SG
1887 ret = isert_check_pi_status(se_cmd,
1888 wr->fr_desc->pi_ctx->sig_mr);
1889 wr->fr_desc->ind &= ~ISERT_PROTECTED;
9e961ae7
SG
1890 }
1891
b8d26b3b 1892 iscsit_stop_dataout_timer(cmd);
d40945d8 1893 device->unreg_rdma_mem(isert_cmd, isert_conn);
e3d7e4c3 1894 cmd->write_data_done = wr->data.len;
b6b87a1d 1895 wr->send_wr_num = 0;
b8d26b3b 1896
90ecc6e2 1897 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
b8d26b3b
NB
1898 spin_lock_bh(&cmd->istate_lock);
1899 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1900 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1901 spin_unlock_bh(&cmd->istate_lock);
1902
5bac4b1a
SG
1903 if (ret)
1904 transport_send_check_condition_and_sense(se_cmd,
1905 se_cmd->pi_err, 0);
1906 else
1907 target_execute_cmd(se_cmd);
b8d26b3b
NB
1908}
1909
1910static void
1911isert_do_control_comp(struct work_struct *work)
1912{
1913 struct isert_cmd *isert_cmd = container_of(work,
1914 struct isert_cmd, comp_work);
1915 struct isert_conn *isert_conn = isert_cmd->conn;
1916 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
d703ce2f 1917 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
b8d26b3b
NB
1918
1919 switch (cmd->i_state) {
1920 case ISTATE_SEND_TASKMGTRSP:
1921 pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1922
1923 atomic_dec(&isert_conn->post_send_buf_count);
1924 iscsit_tmr_post_handler(cmd, cmd->conn);
1925
1926 cmd->i_state = ISTATE_SENT_STATUS;
03e7848a 1927 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
b8d26b3b
NB
1928 break;
1929 case ISTATE_SEND_REJECT:
1930 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1931 atomic_dec(&isert_conn->post_send_buf_count);
1932
1933 cmd->i_state = ISTATE_SENT_STATUS;
03e7848a 1934 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
3df8f68a 1935 break;
b8d26b3b
NB
1936 case ISTATE_SEND_LOGOUTRSP:
1937 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
9d49f5e2
SG
1938
1939 atomic_dec(&isert_conn->post_send_buf_count);
b8d26b3b
NB
1940 iscsit_logout_post_handler(cmd, cmd->conn);
1941 break;
adb54c29
NB
1942 case ISTATE_SEND_TEXTRSP:
1943 atomic_dec(&isert_conn->post_send_buf_count);
1944 cmd->i_state = ISTATE_SENT_STATUS;
03e7848a 1945 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
adb54c29 1946 break;
b8d26b3b
NB
1947 default:
1948 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
1949 dump_stack();
1950 break;
1951 }
1952}
1953
1954static void
1955isert_response_completion(struct iser_tx_desc *tx_desc,
1956 struct isert_cmd *isert_cmd,
1957 struct isert_conn *isert_conn,
1958 struct ib_device *ib_dev)
1959{
d703ce2f 1960 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
b6b87a1d 1961 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
b8d26b3b
NB
1962
1963 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
3df8f68a 1964 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
adb54c29
NB
1965 cmd->i_state == ISTATE_SEND_REJECT ||
1966 cmd->i_state == ISTATE_SEND_TEXTRSP) {
b8d26b3b
NB
1967 isert_unmap_tx_desc(tx_desc, ib_dev);
1968
1969 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1970 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1971 return;
1972 }
897bb2c9
SG
1973
1974 /**
1975 * If send_wr_num is 0 this means that we got
1976 * RDMA completion and we cleared it and we should
1977 * simply decrement the response post. else the
1978 * response is incorporated in send_wr_num, just
1979 * sub it.
1980 **/
1981 if (wr->send_wr_num)
1982 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1983 else
1984 atomic_dec(&isert_conn->post_send_buf_count);
b8d26b3b
NB
1985
1986 cmd->i_state = ISTATE_SENT_STATUS;
03e7848a 1987 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
b8d26b3b
NB
1988}
1989
1990static void
95b60f07
NB
1991__isert_send_completion(struct iser_tx_desc *tx_desc,
1992 struct isert_conn *isert_conn)
b8d26b3b
NB
1993{
1994 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1995 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1996 struct isert_rdma_wr *wr;
1997
1998 if (!isert_cmd) {
1999 atomic_dec(&isert_conn->post_send_buf_count);
2000 isert_unmap_tx_desc(tx_desc, ib_dev);
2001 return;
2002 }
2003 wr = &isert_cmd->rdma_wr;
2004
2005 switch (wr->iser_ib_op) {
2006 case ISER_IB_RECV:
2007 pr_err("isert_send_completion: Got ISER_IB_RECV\n");
2008 dump_stack();
2009 break;
2010 case ISER_IB_SEND:
2011 pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
2012 isert_response_completion(tx_desc, isert_cmd,
2013 isert_conn, ib_dev);
2014 break;
2015 case ISER_IB_RDMA_WRITE:
f93f3a70 2016 pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
897bb2c9 2017 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
f93f3a70 2018 isert_completion_rdma_write(tx_desc, isert_cmd);
b8d26b3b
NB
2019 break;
2020 case ISER_IB_RDMA_READ:
2021 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
2022
b6b87a1d 2023 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
b8d26b3b
NB
2024 isert_completion_rdma_read(tx_desc, isert_cmd);
2025 break;
2026 default:
2027 pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
2028 dump_stack();
2029 break;
2030 }
2031}
2032
95b60f07
NB
2033static void
2034isert_send_completion(struct iser_tx_desc *tx_desc,
2035 struct isert_conn *isert_conn)
2036{
2037 struct llist_node *llnode = tx_desc->comp_llnode_batch;
2038 struct iser_tx_desc *t;
2039 /*
2040 * Drain coalesced completion llist starting from comp_llnode_batch
2041 * setup in isert_init_send_wr(), and then complete trailing tx_desc.
2042 */
2043 while (llnode) {
2044 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
2045 llnode = llist_next(llnode);
2046 __isert_send_completion(t, isert_conn);
2047 }
2048 __isert_send_completion(tx_desc, isert_conn);
2049}
2050
ebbe4421
NB
2051static void
2052isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev)
2053{
2054 struct llist_node *llnode;
2055 struct isert_rdma_wr *wr;
2056 struct iser_tx_desc *t;
2057
2058 mutex_lock(&isert_conn->conn_mutex);
2059 llnode = llist_del_all(&isert_conn->conn_comp_llist);
2060 isert_conn->conn_comp_batch = 0;
2061 mutex_unlock(&isert_conn->conn_mutex);
2062
2063 while (llnode) {
2064 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
2065 llnode = llist_next(llnode);
2066 wr = &t->isert_cmd->rdma_wr;
2067
897bb2c9
SG
2068 /**
2069 * If send_wr_num is 0 this means that we got
2070 * RDMA completion and we cleared it and we should
2071 * simply decrement the response post. else the
2072 * response is incorporated in send_wr_num, just
2073 * sub it.
2074 **/
2075 if (wr->send_wr_num)
2076 atomic_sub(wr->send_wr_num,
2077 &isert_conn->post_send_buf_count);
2078 else
2079 atomic_dec(&isert_conn->post_send_buf_count);
03e7848a
NB
2080
2081 isert_completion_put(t, t->isert_cmd, ib_dev, true);
ebbe4421
NB
2082 }
2083}
2084
b8d26b3b 2085static void
defd8848 2086isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
b8d26b3b
NB
2087{
2088 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
defd8848 2089 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
ebbe4421
NB
2090 struct llist_node *llnode = tx_desc->comp_llnode_batch;
2091 struct isert_rdma_wr *wr;
2092 struct iser_tx_desc *t;
2093
2094 while (llnode) {
2095 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
2096 llnode = llist_next(llnode);
2097 wr = &t->isert_cmd->rdma_wr;
2098
897bb2c9
SG
2099 /**
2100 * If send_wr_num is 0 this means that we got
2101 * RDMA completion and we cleared it and we should
2102 * simply decrement the response post. else the
2103 * response is incorporated in send_wr_num, just
2104 * sub it.
2105 **/
2106 if (wr->send_wr_num)
2107 atomic_sub(wr->send_wr_num,
2108 &isert_conn->post_send_buf_count);
2109 else
2110 atomic_dec(&isert_conn->post_send_buf_count);
03e7848a
NB
2111
2112 isert_completion_put(t, t->isert_cmd, ib_dev, true);
ebbe4421
NB
2113 }
2114 tx_desc->comp_llnode_batch = NULL;
defd8848
NB
2115
2116 if (!isert_cmd)
2117 isert_unmap_tx_desc(tx_desc, ib_dev);
2118 else
03e7848a 2119 isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
defd8848
NB
2120}
2121
2122static void
2123isert_cq_rx_comp_err(struct isert_conn *isert_conn)
2124{
2125 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2126 struct iscsi_conn *conn = isert_conn->conn;
b8d26b3b 2127
defd8848
NB
2128 if (isert_conn->post_recv_buf_count)
2129 return;
b8d26b3b 2130
ebbe4421
NB
2131 isert_cq_drain_comp_llist(isert_conn, ib_dev);
2132
defd8848
NB
2133 if (conn->sess) {
2134 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
2135 target_wait_for_sess_cmds(conn->sess->se_sess);
b8d26b3b
NB
2136 }
2137
defd8848
NB
2138 while (atomic_read(&isert_conn->post_send_buf_count))
2139 msleep(3000);
b8d26b3b 2140
defd8848 2141 mutex_lock(&isert_conn->conn_mutex);
954f2372 2142 isert_conn_terminate(isert_conn);
defd8848 2143 mutex_unlock(&isert_conn->conn_mutex);
b2cb9649 2144
9d49f5e2
SG
2145 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
2146
defd8848 2147 complete(&isert_conn->conn_wait_comp_err);
b8d26b3b
NB
2148}
2149
2150static void
2151isert_cq_tx_work(struct work_struct *work)
2152{
2153 struct isert_cq_desc *cq_desc = container_of(work,
2154 struct isert_cq_desc, cq_tx_work);
2155 struct isert_device *device = cq_desc->device;
2156 int cq_index = cq_desc->cq_index;
2157 struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
2158 struct isert_conn *isert_conn;
2159 struct iser_tx_desc *tx_desc;
2160 struct ib_wc wc;
2161
2162 while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
2163 tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
2164 isert_conn = wc.qp->qp_context;
2165
2166 if (wc.status == IB_WC_SUCCESS) {
2167 isert_send_completion(tx_desc, isert_conn);
2168 } else {
2169 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
2170 pr_debug("TX wc.status: 0x%08x\n", wc.status);
c5a2adbf 2171 pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
defd8848 2172
9bb4ca68 2173 if (wc.wr_id != ISER_FASTREG_LI_WRID) {
ebbe4421
NB
2174 if (tx_desc->llnode_active)
2175 continue;
2176
9bb4ca68
NB
2177 atomic_dec(&isert_conn->post_send_buf_count);
2178 isert_cq_tx_comp_err(tx_desc, isert_conn);
2179 }
b8d26b3b
NB
2180 }
2181 }
2182
2183 ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
2184}
2185
2186static void
2187isert_cq_tx_callback(struct ib_cq *cq, void *context)
2188{
2189 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
2190
b8d26b3b
NB
2191 queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
2192}
2193
2194static void
2195isert_cq_rx_work(struct work_struct *work)
2196{
2197 struct isert_cq_desc *cq_desc = container_of(work,
2198 struct isert_cq_desc, cq_rx_work);
2199 struct isert_device *device = cq_desc->device;
2200 int cq_index = cq_desc->cq_index;
2201 struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
2202 struct isert_conn *isert_conn;
2203 struct iser_rx_desc *rx_desc;
2204 struct ib_wc wc;
2205 unsigned long xfer_len;
2206
2207 while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
2208 rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
2209 isert_conn = wc.qp->qp_context;
2210
2211 if (wc.status == IB_WC_SUCCESS) {
2212 xfer_len = (unsigned long)wc.byte_len;
2213 isert_rx_completion(rx_desc, isert_conn, xfer_len);
2214 } else {
2215 pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
c5a2adbf 2216 if (wc.status != IB_WC_WR_FLUSH_ERR) {
b8d26b3b 2217 pr_debug("RX wc.status: 0x%08x\n", wc.status);
c5a2adbf
NB
2218 pr_debug("RX wc.vendor_err: 0x%08x\n",
2219 wc.vendor_err);
2220 }
b8d26b3b 2221 isert_conn->post_recv_buf_count--;
defd8848 2222 isert_cq_rx_comp_err(isert_conn);
b8d26b3b
NB
2223 }
2224 }
2225
2226 ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
2227}
2228
2229static void
2230isert_cq_rx_callback(struct ib_cq *cq, void *context)
2231{
2232 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
2233
b8d26b3b
NB
2234 queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
2235}
2236
2237static int
2238isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
2239{
2240 struct ib_send_wr *wr_failed;
2241 int ret;
2242
2243 atomic_inc(&isert_conn->post_send_buf_count);
2244
2245 ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
2246 &wr_failed);
2247 if (ret) {
2248 pr_err("ib_post_send failed with %d\n", ret);
2249 atomic_dec(&isert_conn->post_send_buf_count);
2250 return ret;
2251 }
2252 return ret;
2253}
2254
2255static int
2256isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2257{
d703ce2f 2258 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
b8d26b3b
NB
2259 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2260 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2261 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
2262 &isert_cmd->tx_desc.iscsi_header;
2263
2264 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2265 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
2266 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2267 /*
2268 * Attach SENSE DATA payload to iSCSI Response PDU
2269 */
2270 if (cmd->se_cmd.sense_buffer &&
2271 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
2272 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
2273 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2274 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
dbbc5d11 2275 u32 padding, pdu_len;
b8d26b3b
NB
2276
2277 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
2278 cmd->sense_buffer);
2279 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
2280
2281 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
2282 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
dbbc5d11 2283 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
b8d26b3b 2284
dbbc5d11
NB
2285 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2286 (void *)cmd->sense_buffer, pdu_len,
b8d26b3b
NB
2287 DMA_TO_DEVICE);
2288
dbbc5d11
NB
2289 isert_cmd->pdu_buf_len = pdu_len;
2290 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2291 tx_dsg->length = pdu_len;
b8d26b3b
NB
2292 tx_dsg->lkey = isert_conn->conn_mr->lkey;
2293 isert_cmd->tx_desc.num_sge = 2;
2294 }
2295
0d0f660d 2296 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
b8d26b3b
NB
2297
2298 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2299
2300 return isert_post_response(isert_conn, isert_cmd);
2301}
2302
131e6abc
NB
2303static void
2304isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2305{
2306 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2307 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2308 struct isert_device *device = isert_conn->conn_device;
2309
2310 spin_lock_bh(&conn->cmd_lock);
2311 if (!list_empty(&cmd->i_conn_node))
2312 list_del_init(&cmd->i_conn_node);
2313 spin_unlock_bh(&conn->cmd_lock);
2314
2315 if (cmd->data_direction == DMA_TO_DEVICE)
2316 iscsit_stop_dataout_timer(cmd);
2317
2318 device->unreg_rdma_mem(isert_cmd, isert_conn);
2319}
2320
e70beee7
NB
2321static enum target_prot_op
2322isert_get_sup_prot_ops(struct iscsi_conn *conn)
2323{
2324 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2325 struct isert_device *device = isert_conn->conn_device;
2326
23a548ee
SG
2327 if (conn->tpg->tpg_attrib.t10_pi) {
2328 if (device->pi_capable) {
2329 pr_info("conn %p PI offload enabled\n", isert_conn);
2330 isert_conn->pi_support = true;
2331 return TARGET_PROT_ALL;
2332 }
2333 }
2334
2335 pr_info("conn %p PI offload disabled\n", isert_conn);
2336 isert_conn->pi_support = false;
e70beee7
NB
2337
2338 return TARGET_PROT_NORMAL;
2339}
2340
b8d26b3b
NB
2341static int
2342isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2343 bool nopout_response)
2344{
d703ce2f 2345 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
b8d26b3b
NB
2346 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2347 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2348
2349 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2350 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
2351 &isert_cmd->tx_desc.iscsi_header,
2352 nopout_response);
2353 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
95b60f07 2354 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
b8d26b3b 2355
8b513d0c 2356 pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
b8d26b3b
NB
2357
2358 return isert_post_response(isert_conn, isert_cmd);
2359}
2360
2361static int
2362isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2363{
d703ce2f 2364 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
b8d26b3b
NB
2365 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2366 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2367
2368 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2369 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
2370 &isert_cmd->tx_desc.iscsi_header);
2371 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
95b60f07 2372 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
b8d26b3b
NB
2373
2374 pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2375
2376 return isert_post_response(isert_conn, isert_cmd);
2377}
2378
2379static int
2380isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2381{
d703ce2f 2382 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
b8d26b3b
NB
2383 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2384 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2385
2386 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2387 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
2388 &isert_cmd->tx_desc.iscsi_header);
2389 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
95b60f07 2390 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
b8d26b3b
NB
2391
2392 pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2393
2394 return isert_post_response(isert_conn, isert_cmd);
2395}
2396
2397static int
2398isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2399{
d703ce2f 2400 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
b8d26b3b
NB
2401 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2402 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
3df8f68a
NB
2403 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2404 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2405 struct iscsi_reject *hdr =
2406 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
b8d26b3b
NB
2407
2408 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
3df8f68a 2409 iscsit_build_reject(cmd, conn, hdr);
b8d26b3b 2410 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
3df8f68a
NB
2411
2412 hton24(hdr->dlength, ISCSI_HDR_LEN);
dbbc5d11 2413 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
3df8f68a
NB
2414 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
2415 DMA_TO_DEVICE);
dbbc5d11
NB
2416 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
2417 tx_dsg->addr = isert_cmd->pdu_buf_dma;
3df8f68a
NB
2418 tx_dsg->length = ISCSI_HDR_LEN;
2419 tx_dsg->lkey = isert_conn->conn_mr->lkey;
2420 isert_cmd->tx_desc.num_sge = 2;
2421
95b60f07 2422 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
b8d26b3b
NB
2423
2424 pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2425
2426 return isert_post_response(isert_conn, isert_cmd);
2427}
2428
adb54c29
NB
2429static int
2430isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2431{
d703ce2f 2432 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
adb54c29
NB
2433 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2434 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2435 struct iscsi_text_rsp *hdr =
2436 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
2437 u32 txt_rsp_len;
2438 int rc;
2439
2440 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
22c7aaa5 2441 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
adb54c29
NB
2442 if (rc < 0)
2443 return rc;
2444
2445 txt_rsp_len = rc;
2446 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2447
2448 if (txt_rsp_len) {
2449 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2450 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2451 void *txt_rsp_buf = cmd->buf_ptr;
2452
2453 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2454 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
2455
2456 isert_cmd->pdu_buf_len = txt_rsp_len;
2457 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2458 tx_dsg->length = txt_rsp_len;
2459 tx_dsg->lkey = isert_conn->conn_mr->lkey;
2460 isert_cmd->tx_desc.num_sge = 2;
2461 }
95b60f07 2462 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
adb54c29
NB
2463
2464 pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2465
2466 return isert_post_response(isert_conn, isert_cmd);
2467}
2468
b8d26b3b
NB
2469static int
2470isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2471 struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
2472 u32 data_left, u32 offset)
2473{
d703ce2f 2474 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
b8d26b3b
NB
2475 struct scatterlist *sg_start, *tmp_sg;
2476 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2477 u32 sg_off, page_off;
2478 int i = 0, sg_nents;
2479
2480 sg_off = offset / PAGE_SIZE;
2481 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2482 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
2483 page_off = offset % PAGE_SIZE;
2484
2485 send_wr->sg_list = ib_sge;
2486 send_wr->num_sge = sg_nents;
2487 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
2488 /*
2489 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2490 */
2491 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2492 pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
2493 (unsigned long long)tmp_sg->dma_address,
2494 tmp_sg->length, page_off);
2495
2496 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
2497 ib_sge->length = min_t(u32, data_left,
2498 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
2499 ib_sge->lkey = isert_conn->conn_mr->lkey;
2500
90ecc6e2
VP
2501 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
2502 ib_sge->addr, ib_sge->length, ib_sge->lkey);
b8d26b3b
NB
2503 page_off = 0;
2504 data_left -= ib_sge->length;
2505 ib_sge++;
2506 pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
2507 }
2508
2509 pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2510 send_wr->sg_list, send_wr->num_sge);
2511
2512 return sg_nents;
2513}
2514
2515static int
90ecc6e2
VP
2516isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2517 struct isert_rdma_wr *wr)
b8d26b3b
NB
2518{
2519 struct se_cmd *se_cmd = &cmd->se_cmd;
d703ce2f 2520 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
b8d26b3b 2521 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
e3d7e4c3 2522 struct isert_data_buf *data = &wr->data;
90ecc6e2 2523 struct ib_send_wr *send_wr;
b8d26b3b 2524 struct ib_sge *ib_sge;
e3d7e4c3
SG
2525 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
2526 int ret = 0, i, ib_sge_cnt;
90ecc6e2 2527
e3d7e4c3 2528 isert_cmd->tx_desc.isert_cmd = isert_cmd;
b8d26b3b 2529
e3d7e4c3
SG
2530 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2531 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2532 se_cmd->t_data_nents, se_cmd->data_length,
2533 offset, wr->iser_ib_op, &wr->data);
2534 if (ret)
2535 return ret;
b8d26b3b 2536
e3d7e4c3
SG
2537 data_left = data->len;
2538 offset = data->offset;
b8d26b3b 2539
e3d7e4c3 2540 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
b8d26b3b 2541 if (!ib_sge) {
90ecc6e2 2542 pr_warn("Unable to allocate ib_sge\n");
b8d26b3b 2543 ret = -ENOMEM;
e3d7e4c3 2544 goto unmap_cmd;
b8d26b3b 2545 }
90ecc6e2 2546 wr->ib_sge = ib_sge;
b8d26b3b 2547
e3d7e4c3 2548 wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
b8d26b3b
NB
2549 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2550 GFP_KERNEL);
2551 if (!wr->send_wr) {
90ecc6e2 2552 pr_debug("Unable to allocate wr->send_wr\n");
b8d26b3b 2553 ret = -ENOMEM;
e3d7e4c3 2554 goto unmap_cmd;
b8d26b3b 2555 }
b8d26b3b
NB
2556
2557 wr->isert_cmd = isert_cmd;
2558 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
b8d26b3b
NB
2559
2560 for (i = 0; i < wr->send_wr_num; i++) {
2561 send_wr = &isert_cmd->rdma_wr.send_wr[i];
2562 data_len = min(data_left, rdma_write_max);
2563
b8d26b3b 2564 send_wr->send_flags = 0;
90ecc6e2
VP
2565 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2566 send_wr->opcode = IB_WR_RDMA_WRITE;
2567 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
2568 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2569 if (i + 1 == wr->send_wr_num)
2570 send_wr->next = &isert_cmd->tx_desc.send_wr;
2571 else
2572 send_wr->next = &wr->send_wr[i + 1];
2573 } else {
2574 send_wr->opcode = IB_WR_RDMA_READ;
2575 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2576 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2577 if (i + 1 == wr->send_wr_num)
2578 send_wr->send_flags = IB_SEND_SIGNALED;
2579 else
2580 send_wr->next = &wr->send_wr[i + 1];
2581 }
b8d26b3b
NB
2582
2583 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2584 send_wr, data_len, offset);
2585 ib_sge += ib_sge_cnt;
2586
b8d26b3b 2587 offset += data_len;
90ecc6e2 2588 va_offset += data_len;
b8d26b3b
NB
2589 data_left -= data_len;
2590 }
90ecc6e2
VP
2591
2592 return 0;
e3d7e4c3
SG
2593unmap_cmd:
2594 isert_unmap_data_buf(isert_conn, data);
2595
90ecc6e2
VP
2596 return ret;
2597}
2598
59464ef4
VP
2599static int
2600isert_map_fr_pagelist(struct ib_device *ib_dev,
2601 struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
2602{
2603 u64 start_addr, end_addr, page, chunk_start = 0;
2604 struct scatterlist *tmp_sg;
2605 int i = 0, new_chunk, last_ent, n_pages;
2606
2607 n_pages = 0;
2608 new_chunk = 1;
2609 last_ent = sg_nents - 1;
2610 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2611 start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
2612 if (new_chunk)
2613 chunk_start = start_addr;
2614 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2615
2616 pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n",
2617 i, (unsigned long long)tmp_sg->dma_address,
2618 tmp_sg->length);
2619
2620 if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2621 new_chunk = 0;
2622 continue;
2623 }
2624 new_chunk = 1;
2625
2626 page = chunk_start & PAGE_MASK;
2627 do {
2628 fr_pl[n_pages++] = page;
2629 pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n",
2630 n_pages - 1, page);
2631 page += PAGE_SIZE;
2632 } while (page < end_addr);
2633 }
2634
2635 return n_pages;
2636}
2637
2638static int
e3d7e4c3
SG
2639isert_fast_reg_mr(struct isert_conn *isert_conn,
2640 struct fast_reg_descriptor *fr_desc,
2641 struct isert_data_buf *mem,
9e961ae7 2642 enum isert_indicator ind,
e3d7e4c3 2643 struct ib_sge *sge)
59464ef4 2644{
59464ef4 2645 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
9e961ae7
SG
2646 struct ib_mr *mr;
2647 struct ib_fast_reg_page_list *frpl;
59464ef4
VP
2648 struct ib_send_wr fr_wr, inv_wr;
2649 struct ib_send_wr *bad_wr, *wr = NULL;
9bd626e7
SG
2650 int ret, pagelist_len;
2651 u32 page_off;
59464ef4 2652 u8 key;
59464ef4 2653
e3d7e4c3
SG
2654 if (mem->dma_nents == 1) {
2655 sge->lkey = isert_conn->conn_mr->lkey;
2656 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
2657 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
9e961ae7
SG
2658 pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
2659 __func__, __LINE__, sge->addr, sge->length,
2660 sge->lkey);
e3d7e4c3
SG
2661 return 0;
2662 }
2663
9e961ae7
SG
2664 if (ind == ISERT_DATA_KEY_VALID) {
2665 /* Registering data buffer */
2666 mr = fr_desc->data_mr;
2667 frpl = fr_desc->data_frpl;
2668 } else {
2669 /* Registering protection buffer */
2670 mr = fr_desc->pi_ctx->prot_mr;
2671 frpl = fr_desc->pi_ctx->prot_frpl;
2672 }
2673
e3d7e4c3 2674 page_off = mem->offset % PAGE_SIZE;
59464ef4 2675
9bd626e7 2676 pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
e3d7e4c3 2677 fr_desc, mem->nents, mem->offset);
59464ef4 2678
e3d7e4c3 2679 pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
9e961ae7 2680 &frpl->page_list[0]);
59464ef4 2681
d3e125da 2682 if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) {
59464ef4 2683 memset(&inv_wr, 0, sizeof(inv_wr));
9bb4ca68 2684 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
59464ef4 2685 inv_wr.opcode = IB_WR_LOCAL_INV;
9e961ae7 2686 inv_wr.ex.invalidate_rkey = mr->rkey;
59464ef4
VP
2687 wr = &inv_wr;
2688 /* Bump the key */
9e961ae7
SG
2689 key = (u8)(mr->rkey & 0x000000FF);
2690 ib_update_fast_reg_key(mr, ++key);
59464ef4
VP
2691 }
2692
2693 /* Prepare FASTREG WR */
2694 memset(&fr_wr, 0, sizeof(fr_wr));
9bb4ca68 2695 fr_wr.wr_id = ISER_FASTREG_LI_WRID;
59464ef4 2696 fr_wr.opcode = IB_WR_FAST_REG_MR;
9e961ae7
SG
2697 fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off;
2698 fr_wr.wr.fast_reg.page_list = frpl;
59464ef4
VP
2699 fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2700 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
e3d7e4c3 2701 fr_wr.wr.fast_reg.length = mem->len;
9e961ae7 2702 fr_wr.wr.fast_reg.rkey = mr->rkey;
59464ef4
VP
2703 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2704
2705 if (!wr)
2706 wr = &fr_wr;
2707 else
2708 wr->next = &fr_wr;
2709
2710 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2711 if (ret) {
2712 pr_err("fast registration failed, ret:%d\n", ret);
2713 return ret;
2714 }
9e961ae7 2715 fr_desc->ind &= ~ind;
59464ef4 2716
9e961ae7
SG
2717 sge->lkey = mr->lkey;
2718 sge->addr = frpl->page_list[0] + page_off;
e3d7e4c3 2719 sge->length = mem->len;
59464ef4 2720
9e961ae7
SG
2721 pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
2722 __func__, __LINE__, sge->addr, sge->length,
2723 sge->lkey);
2724
2725 return ret;
2726}
2727
3d73cf1a
SG
2728static inline void
2729isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
2730 struct ib_sig_domain *domain)
2731{
78eda2bb 2732 domain->sig_type = IB_SIG_TYPE_T10_DIF;
3d73cf1a
SG
2733 domain->sig.dif.bg_type = IB_T10DIF_CRC;
2734 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
2735 domain->sig.dif.ref_tag = se_cmd->reftag_seed;
78eda2bb
SG
2736 /*
2737 * At the moment we hard code those, but if in the future
2738 * the target core would like to use it, we will take it
2739 * from se_cmd.
2740 */
2741 domain->sig.dif.apptag_check_mask = 0xffff;
2742 domain->sig.dif.app_escape = true;
2743 domain->sig.dif.ref_escape = true;
2744 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
2745 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
2746 domain->sig.dif.ref_remap = true;
3d73cf1a
SG
2747};
2748
9e961ae7
SG
2749static int
2750isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2751{
9e961ae7
SG
2752 switch (se_cmd->prot_op) {
2753 case TARGET_PROT_DIN_INSERT:
2754 case TARGET_PROT_DOUT_STRIP:
78eda2bb 2755 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
3d73cf1a 2756 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
9e961ae7
SG
2757 break;
2758 case TARGET_PROT_DOUT_INSERT:
2759 case TARGET_PROT_DIN_STRIP:
78eda2bb 2760 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
3d73cf1a 2761 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
9e961ae7
SG
2762 break;
2763 case TARGET_PROT_DIN_PASS:
2764 case TARGET_PROT_DOUT_PASS:
3d73cf1a
SG
2765 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2766 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
9e961ae7
SG
2767 break;
2768 default:
2769 pr_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2770 return -EINVAL;
2771 }
2772
2773 return 0;
2774}
2775
2776static inline u8
2777isert_set_prot_checks(u8 prot_checks)
2778{
2779 return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2780 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2781 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2782}
2783
2784static int
570db170
SG
2785isert_reg_sig_mr(struct isert_conn *isert_conn,
2786 struct se_cmd *se_cmd,
2787 struct isert_rdma_wr *rdma_wr,
2788 struct fast_reg_descriptor *fr_desc)
9e961ae7
SG
2789{
2790 struct ib_send_wr sig_wr, inv_wr;
2791 struct ib_send_wr *bad_wr, *wr = NULL;
2792 struct pi_context *pi_ctx = fr_desc->pi_ctx;
2793 struct ib_sig_attrs sig_attrs;
2794 int ret;
2795 u32 key;
2796
2797 memset(&sig_attrs, 0, sizeof(sig_attrs));
2798 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2799 if (ret)
2800 goto err;
59464ef4 2801
9e961ae7
SG
2802 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
2803
2804 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
2805 memset(&inv_wr, 0, sizeof(inv_wr));
2806 inv_wr.opcode = IB_WR_LOCAL_INV;
c2caa207 2807 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
9e961ae7
SG
2808 inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
2809 wr = &inv_wr;
2810 /* Bump the key */
2811 key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
2812 ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
2813 }
2814
2815 memset(&sig_wr, 0, sizeof(sig_wr));
2816 sig_wr.opcode = IB_WR_REG_SIG_MR;
c2caa207 2817 sig_wr.wr_id = ISER_FASTREG_LI_WRID;
570db170 2818 sig_wr.sg_list = &rdma_wr->ib_sg[DATA];
9e961ae7
SG
2819 sig_wr.num_sge = 1;
2820 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
2821 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
2822 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
2823 if (se_cmd->t_prot_sg)
570db170 2824 sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT];
9e961ae7
SG
2825
2826 if (!wr)
2827 wr = &sig_wr;
2828 else
2829 wr->next = &sig_wr;
2830
2831 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2832 if (ret) {
2833 pr_err("fast registration failed, ret:%d\n", ret);
2834 goto err;
2835 }
2836 fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2837
570db170
SG
2838 rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
2839 rdma_wr->ib_sg[SIG].addr = 0;
2840 rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
9e961ae7
SG
2841 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2842 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2843 /*
2844 * We have protection guards on the wire
2845 * so we need to set a larget transfer
2846 */
570db170 2847 rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
9e961ae7
SG
2848
2849 pr_debug("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
570db170
SG
2850 rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
2851 rdma_wr->ib_sg[SIG].lkey);
9e961ae7 2852err:
59464ef4
VP
2853 return ret;
2854}
2855
570db170
SG
2856static int
2857isert_handle_prot_cmd(struct isert_conn *isert_conn,
2858 struct isert_cmd *isert_cmd,
2859 struct isert_rdma_wr *wr)
2860{
2861 struct isert_device *device = isert_conn->conn_device;
2862 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
2863 int ret;
2864
2865 if (!wr->fr_desc->pi_ctx) {
2866 ret = isert_create_pi_ctx(wr->fr_desc,
2867 device->ib_device,
2868 isert_conn->conn_pd);
2869 if (ret) {
2870 pr_err("conn %p failed to allocate pi_ctx\n",
2871 isert_conn);
2872 return ret;
2873 }
2874 }
2875
2876 if (se_cmd->t_prot_sg) {
2877 ret = isert_map_data_buf(isert_conn, isert_cmd,
2878 se_cmd->t_prot_sg,
2879 se_cmd->t_prot_nents,
2880 se_cmd->prot_length,
2881 0, wr->iser_ib_op, &wr->prot);
2882 if (ret) {
2883 pr_err("conn %p failed to map protection buffer\n",
2884 isert_conn);
2885 return ret;
2886 }
2887
2888 memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
2889 ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
2890 ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
2891 if (ret) {
2892 pr_err("conn %p failed to fast reg mr\n",
2893 isert_conn);
2894 goto unmap_prot_cmd;
2895 }
2896 }
2897
2898 ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
2899 if (ret) {
2900 pr_err("conn %p failed to fast reg mr\n",
2901 isert_conn);
2902 goto unmap_prot_cmd;
2903 }
2904 wr->fr_desc->ind |= ISERT_PROTECTED;
2905
2906 return 0;
2907
2908unmap_prot_cmd:
2909 if (se_cmd->t_prot_sg)
2910 isert_unmap_data_buf(isert_conn, &wr->prot);
2911
2912 return ret;
2913}
2914
59464ef4 2915static int
a3a5a826
SG
2916isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2917 struct isert_rdma_wr *wr)
59464ef4
VP
2918{
2919 struct se_cmd *se_cmd = &cmd->se_cmd;
2920 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
e3d7e4c3 2921 struct isert_conn *isert_conn = conn->context;
e3d7e4c3 2922 struct fast_reg_descriptor *fr_desc = NULL;
570db170
SG
2923 struct ib_send_wr *send_wr;
2924 struct ib_sge *ib_sg;
e3d7e4c3
SG
2925 u32 offset;
2926 int ret = 0;
59464ef4
VP
2927 unsigned long flags;
2928
e3d7e4c3 2929 isert_cmd->tx_desc.isert_cmd = isert_cmd;
59464ef4 2930
e3d7e4c3
SG
2931 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2932 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2933 se_cmd->t_data_nents, se_cmd->data_length,
2934 offset, wr->iser_ib_op, &wr->data);
2935 if (ret)
2936 return ret;
59464ef4 2937
302cc7c3 2938 if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
e3d7e4c3
SG
2939 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2940 fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2941 struct fast_reg_descriptor, list);
2942 list_del(&fr_desc->list);
2943 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2944 wr->fr_desc = fr_desc;
59464ef4 2945 }
59464ef4 2946
9e961ae7 2947 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
570db170 2948 ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
e3d7e4c3
SG
2949 if (ret)
2950 goto unmap_cmd;
59464ef4 2951
302cc7c3 2952 if (isert_prot_cmd(isert_conn, se_cmd)) {
570db170 2953 ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
9e961ae7 2954 if (ret)
570db170 2955 goto unmap_cmd;
9e961ae7 2956
570db170
SG
2957 ib_sg = &wr->ib_sg[SIG];
2958 } else {
2959 ib_sg = &wr->ib_sg[DATA];
2960 }
9e961ae7 2961
570db170 2962 memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
e3d7e4c3 2963 wr->ib_sge = &wr->s_ib_sge;
59464ef4
VP
2964 wr->send_wr_num = 1;
2965 memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2966 wr->send_wr = &wr->s_send_wr;
59464ef4 2967 wr->isert_cmd = isert_cmd;
59464ef4
VP
2968
2969 send_wr = &isert_cmd->rdma_wr.s_send_wr;
e3d7e4c3 2970 send_wr->sg_list = &wr->s_ib_sge;
59464ef4
VP
2971 send_wr->num_sge = 1;
2972 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
2973 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2974 send_wr->opcode = IB_WR_RDMA_WRITE;
2975 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2976 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
302cc7c3 2977 send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
9e961ae7 2978 0 : IB_SEND_SIGNALED;
59464ef4
VP
2979 } else {
2980 send_wr->opcode = IB_WR_RDMA_READ;
2981 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
2982 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2983 send_wr->send_flags = IB_SEND_SIGNALED;
2984 }
2985
e3d7e4c3 2986 return 0;
570db170 2987
e3d7e4c3
SG
2988unmap_cmd:
2989 if (fr_desc) {
f01b9f73 2990 spin_lock_irqsave(&isert_conn->conn_lock, flags);
e3d7e4c3 2991 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
f01b9f73 2992 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
59464ef4 2993 }
e3d7e4c3 2994 isert_unmap_data_buf(isert_conn, &wr->data);
59464ef4 2995
59464ef4
VP
2996 return ret;
2997}
2998
90ecc6e2
VP
2999static int
3000isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
3001{
3002 struct se_cmd *se_cmd = &cmd->se_cmd;
59464ef4 3003 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
90ecc6e2
VP
3004 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
3005 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
d40945d8 3006 struct isert_device *device = isert_conn->conn_device;
90ecc6e2
VP
3007 struct ib_send_wr *wr_failed;
3008 int rc;
3009
3010 pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
3011 isert_cmd, se_cmd->data_length);
3012 wr->iser_ib_op = ISER_IB_RDMA_WRITE;
d40945d8 3013 rc = device->reg_rdma_mem(conn, cmd, wr);
90ecc6e2
VP
3014 if (rc) {
3015 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
3016 return rc;
3017 }
3018
302cc7c3 3019 if (!isert_prot_cmd(isert_conn, se_cmd)) {
9e961ae7
SG
3020 /*
3021 * Build isert_conn->tx_desc for iSCSI response PDU and attach
3022 */
3023 isert_create_send_desc(isert_conn, isert_cmd,
3024 &isert_cmd->tx_desc);
3025 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
3026 &isert_cmd->tx_desc.iscsi_header);
3027 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
3028 isert_init_send_wr(isert_conn, isert_cmd,
0d0f660d 3029 &isert_cmd->tx_desc.send_wr, false);
9e961ae7 3030 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
897bb2c9 3031 wr->send_wr_num += 1;
9e961ae7 3032 }
b8d26b3b 3033
897bb2c9 3034 atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
b8d26b3b
NB
3035
3036 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
3037 if (rc) {
3038 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
897bb2c9 3039 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
b8d26b3b 3040 }
9e961ae7 3041
302cc7c3 3042 if (!isert_prot_cmd(isert_conn, se_cmd))
9e961ae7
SG
3043 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
3044 "READ\n", isert_cmd);
3045 else
3046 pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
3047 isert_cmd);
b8d26b3b 3048
90ecc6e2 3049 return 1;
b8d26b3b
NB
3050}
3051
3052static int
3053isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
3054{
3055 struct se_cmd *se_cmd = &cmd->se_cmd;
d703ce2f 3056 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
b8d26b3b
NB
3057 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
3058 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
d40945d8 3059 struct isert_device *device = isert_conn->conn_device;
90ecc6e2
VP
3060 struct ib_send_wr *wr_failed;
3061 int rc;
b8d26b3b 3062
90ecc6e2
VP
3063 pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
3064 isert_cmd, se_cmd->data_length, cmd->write_data_done);
b8d26b3b 3065 wr->iser_ib_op = ISER_IB_RDMA_READ;
d40945d8 3066 rc = device->reg_rdma_mem(conn, cmd, wr);
90ecc6e2
VP
3067 if (rc) {
3068 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
3069 return rc;
b8d26b3b
NB
3070 }
3071
b6b87a1d 3072 atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
b8d26b3b
NB
3073
3074 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
3075 if (rc) {
3076 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
b6b87a1d 3077 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
b8d26b3b 3078 }
90ecc6e2
VP
3079 pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
3080 isert_cmd);
b8d26b3b 3081
90ecc6e2 3082 return 0;
b8d26b3b
NB
3083}
3084
3085static int
3086isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
3087{
3088 int ret;
3089
3090 switch (state) {
3091 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3092 ret = isert_put_nopin(cmd, conn, false);
3093 break;
3094 default:
3095 pr_err("Unknown immediate state: 0x%02x\n", state);
3096 ret = -EINVAL;
3097 break;
3098 }
3099
3100 return ret;
3101}
3102
3103static int
3104isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
3105{
3106 int ret;
3107
3108 switch (state) {
3109 case ISTATE_SEND_LOGOUTRSP:
3110 ret = isert_put_logout_rsp(cmd, conn);
3111 if (!ret) {
3112 pr_debug("Returning iSER Logout -EAGAIN\n");
3113 ret = -EAGAIN;
3114 }
3115 break;
3116 case ISTATE_SEND_NOPIN:
3117 ret = isert_put_nopin(cmd, conn, true);
3118 break;
3119 case ISTATE_SEND_TASKMGTRSP:
3120 ret = isert_put_tm_rsp(cmd, conn);
3121 break;
3122 case ISTATE_SEND_REJECT:
3123 ret = isert_put_reject(cmd, conn);
3124 break;
adb54c29
NB
3125 case ISTATE_SEND_TEXTRSP:
3126 ret = isert_put_text_rsp(cmd, conn);
3127 break;
b8d26b3b
NB
3128 case ISTATE_SEND_STATUS:
3129 /*
3130 * Special case for sending non GOOD SCSI status from TX thread
3131 * context during pre se_cmd excecution failure.
3132 */
3133 ret = isert_put_response(conn, cmd);
3134 break;
3135 default:
3136 pr_err("Unknown response state: 0x%02x\n", state);
3137 ret = -EINVAL;
3138 break;
3139 }
3140
3141 return ret;
3142}
3143
ca6c1d82
SG
3144struct rdma_cm_id *
3145isert_setup_id(struct isert_np *isert_np)
3146{
3147 struct iscsi_np *np = isert_np->np;
3148 struct rdma_cm_id *id;
3149 struct sockaddr *sa;
3150 int ret;
3151
3152 sa = (struct sockaddr *)&np->np_sockaddr;
3153 pr_debug("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
3154
3155 id = rdma_create_id(isert_cma_handler, isert_np,
3156 RDMA_PS_TCP, IB_QPT_RC);
3157 if (IS_ERR(id)) {
3158 pr_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
3159 ret = PTR_ERR(id);
3160 goto out;
3161 }
3162 pr_debug("id %p context %p\n", id, id->context);
3163
3164 ret = rdma_bind_addr(id, sa);
3165 if (ret) {
3166 pr_err("rdma_bind_addr() failed: %d\n", ret);
3167 goto out_id;
3168 }
3169
3170 ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
3171 if (ret) {
3172 pr_err("rdma_listen() failed: %d\n", ret);
3173 goto out_id;
3174 }
3175
3176 return id;
3177out_id:
3178 rdma_destroy_id(id);
3179out:
3180 return ERR_PTR(ret);
3181}
3182
b8d26b3b
NB
3183static int
3184isert_setup_np(struct iscsi_np *np,
3185 struct __kernel_sockaddr_storage *ksockaddr)
3186{
3187 struct isert_np *isert_np;
3188 struct rdma_cm_id *isert_lid;
b8d26b3b
NB
3189 int ret;
3190
3191 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
3192 if (!isert_np) {
3193 pr_err("Unable to allocate struct isert_np\n");
3194 return -ENOMEM;
3195 }
531b7bf4 3196 sema_init(&isert_np->np_sem, 0);
b8d26b3b
NB
3197 mutex_init(&isert_np->np_accept_mutex);
3198 INIT_LIST_HEAD(&isert_np->np_accept_list);
3199 init_completion(&isert_np->np_login_comp);
ca6c1d82 3200 isert_np->np = np;
b8d26b3b 3201
b8d26b3b
NB
3202 /*
3203 * Setup the np->np_sockaddr from the passed sockaddr setup
3204 * in iscsi_target_configfs.c code..
3205 */
3206 memcpy(&np->np_sockaddr, ksockaddr,
3207 sizeof(struct __kernel_sockaddr_storage));
3208
ca6c1d82 3209 isert_lid = isert_setup_id(isert_np);
b8d26b3b 3210 if (IS_ERR(isert_lid)) {
b8d26b3b
NB
3211 ret = PTR_ERR(isert_lid);
3212 goto out;
3213 }
3214
b8d26b3b
NB
3215 isert_np->np_cm_id = isert_lid;
3216 np->np_context = isert_np;
b8d26b3b
NB
3217
3218 return 0;
3219
b8d26b3b
NB
3220out:
3221 kfree(isert_np);
ca6c1d82 3222
b8d26b3b
NB
3223 return ret;
3224}
3225
b8d26b3b
NB
3226static int
3227isert_rdma_accept(struct isert_conn *isert_conn)
3228{
3229 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
3230 struct rdma_conn_param cp;
3231 int ret;
3232
3233 memset(&cp, 0, sizeof(struct rdma_conn_param));
b8d26b3b
NB
3234 cp.initiator_depth = isert_conn->initiator_depth;
3235 cp.retry_count = 7;
3236 cp.rnr_retry_count = 7;
3237
3238 pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
3239
3240 ret = rdma_accept(cm_id, &cp);
3241 if (ret) {
3242 pr_err("rdma_accept() failed with: %d\n", ret);
3243 return ret;
3244 }
3245
3246 pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
3247
3248 return 0;
3249}
3250
3251static int
3252isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
3253{
3254 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
3255 int ret;
3256
2371e5da
SG
3257 pr_info("before login_req comp conn: %p\n", isert_conn);
3258 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
3259 if (ret) {
3260 pr_err("isert_conn %p interrupted before got login req\n",
3261 isert_conn);
3262 return ret;
3263 }
3264 reinit_completion(&isert_conn->login_req_comp);
3265
6faaa85f
NB
3266 /*
3267 * For login requests after the first PDU, isert_rx_login_req() will
3268 * kick schedule_delayed_work(&conn->login_work) as the packet is
3269 * received, which turns this callback from iscsi_target_do_login_rx()
3270 * into a NOP.
3271 */
3272 if (!login->first_request)
3273 return 0;
b8d26b3b 3274
2371e5da
SG
3275 isert_rx_login_req(isert_conn);
3276
3277 pr_info("before conn_login_comp conn: %p\n", conn);
b8d26b3b
NB
3278 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
3279 if (ret)
3280 return ret;
3281
2371e5da
SG
3282 pr_info("processing login->req: %p\n", login->req);
3283
b8d26b3b
NB
3284 return 0;
3285}
3286
3287static void
3288isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
3289 struct isert_conn *isert_conn)
3290{
3291 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
3292 struct rdma_route *cm_route = &cm_id->route;
3293 struct sockaddr_in *sock_in;
3294 struct sockaddr_in6 *sock_in6;
3295
3296 conn->login_family = np->np_sockaddr.ss_family;
3297
3298 if (np->np_sockaddr.ss_family == AF_INET6) {
3299 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
3300 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
3301 &sock_in6->sin6_addr.in6_u);
3302 conn->login_port = ntohs(sock_in6->sin6_port);
3303
3304 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
3305 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
3306 &sock_in6->sin6_addr.in6_u);
3307 conn->local_port = ntohs(sock_in6->sin6_port);
3308 } else {
3309 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
3310 sprintf(conn->login_ip, "%pI4",
3311 &sock_in->sin_addr.s_addr);
3312 conn->login_port = ntohs(sock_in->sin_port);
3313
3314 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
3315 sprintf(conn->local_ip, "%pI4",
3316 &sock_in->sin_addr.s_addr);
3317 conn->local_port = ntohs(sock_in->sin_port);
3318 }
3319}
3320
3321static int
3322isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
3323{
3324 struct isert_np *isert_np = (struct isert_np *)np->np_context;
3325 struct isert_conn *isert_conn;
3326 int max_accept = 0, ret;
3327
3328accept_wait:
531b7bf4 3329 ret = down_interruptible(&isert_np->np_sem);
1acff63f 3330 if (ret || max_accept > 5)
b8d26b3b
NB
3331 return -ENODEV;
3332
3333 spin_lock_bh(&np->np_thread_lock);
e346ab34 3334 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
b8d26b3b 3335 spin_unlock_bh(&np->np_thread_lock);
e346ab34
SG
3336 pr_debug("np_thread_state %d for isert_accept_np\n",
3337 np->np_thread_state);
3338 /**
3339 * No point in stalling here when np_thread
3340 * is in state RESET/SHUTDOWN/EXIT - bail
3341 **/
b8d26b3b
NB
3342 return -ENODEV;
3343 }
3344 spin_unlock_bh(&np->np_thread_lock);
3345
3346 mutex_lock(&isert_np->np_accept_mutex);
3347 if (list_empty(&isert_np->np_accept_list)) {
3348 mutex_unlock(&isert_np->np_accept_mutex);
3349 max_accept++;
3350 goto accept_wait;
3351 }
3352 isert_conn = list_first_entry(&isert_np->np_accept_list,
3353 struct isert_conn, conn_accept_node);
3354 list_del_init(&isert_conn->conn_accept_node);
3355 mutex_unlock(&isert_np->np_accept_mutex);
3356
3357 conn->context = isert_conn;
3358 isert_conn->conn = conn;
3359 max_accept = 0;
3360
b8d26b3b
NB
3361 isert_set_conn_info(np, conn, isert_conn);
3362
2371e5da
SG
3363 pr_debug("Processing isert_conn: %p\n", isert_conn);
3364
b8d26b3b
NB
3365 return 0;
3366}
3367
3368static void
3369isert_free_np(struct iscsi_np *np)
3370{
3371 struct isert_np *isert_np = (struct isert_np *)np->np_context;
268e6811 3372 struct isert_conn *isert_conn, *n;
b8d26b3b 3373
3b726ae2
SG
3374 if (isert_np->np_cm_id)
3375 rdma_destroy_id(isert_np->np_cm_id);
b8d26b3b 3376
268e6811
SG
3377 /*
3378 * FIXME: At this point we don't have a good way to insure
3379 * that at this point we don't have hanging connections that
3380 * completed RDMA establishment but didn't start iscsi login
3381 * process. So work-around this by cleaning up what ever piled
3382 * up in np_accept_list.
3383 */
3384 mutex_lock(&isert_np->np_accept_mutex);
3385 if (!list_empty(&isert_np->np_accept_list)) {
3386 pr_info("Still have isert connections, cleaning up...\n");
3387 list_for_each_entry_safe(isert_conn, n,
3388 &isert_np->np_accept_list,
3389 conn_accept_node) {
3390 pr_info("cleaning isert_conn %p state (%d)\n",
3391 isert_conn, isert_conn->state);
3392 isert_connect_release(isert_conn);
3393 }
3394 }
3395 mutex_unlock(&isert_np->np_accept_mutex);
3396
b8d26b3b
NB
3397 np->np_context = NULL;
3398 kfree(isert_np);
3399}
3400
b02efbfc
SG
3401static void isert_release_work(struct work_struct *work)
3402{
3403 struct isert_conn *isert_conn = container_of(work,
3404 struct isert_conn,
3405 release_work);
3406
3407 pr_info("Starting release conn %p\n", isert_conn);
3408
3409 wait_for_completion(&isert_conn->conn_wait);
3410
3411 mutex_lock(&isert_conn->conn_mutex);
3412 isert_conn->state = ISER_CONN_DOWN;
3413 mutex_unlock(&isert_conn->conn_mutex);
3414
3415 pr_info("Destroying conn %p\n", isert_conn);
3416 isert_put_conn(isert_conn);
3417}
3418
defd8848 3419static void isert_wait_conn(struct iscsi_conn *conn)
b8d26b3b
NB
3420{
3421 struct isert_conn *isert_conn = conn->context;
3422
defd8848 3423 pr_debug("isert_wait_conn: Starting \n");
b8d26b3b 3424
9d49f5e2 3425 mutex_lock(&isert_conn->conn_mutex);
b8d26b3b
NB
3426 /*
3427 * Only wait for conn_wait_comp_err if the isert_conn made it
3428 * into full feature phase..
3429 */
b2cb9649
NB
3430 if (isert_conn->state == ISER_CONN_INIT) {
3431 mutex_unlock(&isert_conn->conn_mutex);
b2cb9649 3432 return;
b8d26b3b 3433 }
954f2372 3434 isert_conn_terminate(isert_conn);
b2cb9649 3435 mutex_unlock(&isert_conn->conn_mutex);
b8d26b3b 3436
defd8848 3437 wait_for_completion(&isert_conn->conn_wait_comp_err);
954f2372 3438
b02efbfc
SG
3439 INIT_WORK(&isert_conn->release_work, isert_release_work);
3440 queue_work(isert_release_wq, &isert_conn->release_work);
defd8848
NB
3441}
3442
3443static void isert_free_conn(struct iscsi_conn *conn)
3444{
3445 struct isert_conn *isert_conn = conn->context;
b8d26b3b
NB
3446
3447 isert_put_conn(isert_conn);
3448}
3449
3450static struct iscsit_transport iser_target_transport = {
3451 .name = "IB/iSER",
3452 .transport_type = ISCSI_INFINIBAND,
d703ce2f 3453 .priv_size = sizeof(struct isert_cmd),
b8d26b3b
NB
3454 .owner = THIS_MODULE,
3455 .iscsit_setup_np = isert_setup_np,
3456 .iscsit_accept_np = isert_accept_np,
3457 .iscsit_free_np = isert_free_np,
defd8848 3458 .iscsit_wait_conn = isert_wait_conn,
b8d26b3b 3459 .iscsit_free_conn = isert_free_conn,
b8d26b3b
NB
3460 .iscsit_get_login_rx = isert_get_login_rx,
3461 .iscsit_put_login_tx = isert_put_login_tx,
3462 .iscsit_immediate_queue = isert_immediate_queue,
3463 .iscsit_response_queue = isert_response_queue,
3464 .iscsit_get_dataout = isert_get_dataout,
3465 .iscsit_queue_data_in = isert_put_datain,
3466 .iscsit_queue_status = isert_put_response,
131e6abc 3467 .iscsit_aborted_task = isert_aborted_task,
e70beee7 3468 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
b8d26b3b
NB
3469};
3470
3471static int __init isert_init(void)
3472{
3473 int ret;
3474
3475 isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
3476 if (!isert_rx_wq) {
3477 pr_err("Unable to allocate isert_rx_wq\n");
3478 return -ENOMEM;
3479 }
3480
3481 isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
3482 if (!isert_comp_wq) {
3483 pr_err("Unable to allocate isert_comp_wq\n");
3484 ret = -ENOMEM;
3485 goto destroy_rx_wq;
3486 }
3487
b02efbfc
SG
3488 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
3489 WQ_UNBOUND_MAX_ACTIVE);
3490 if (!isert_release_wq) {
3491 pr_err("Unable to allocate isert_release_wq\n");
3492 ret = -ENOMEM;
3493 goto destroy_comp_wq;
3494 }
3495
b8d26b3b 3496 iscsit_register_transport(&iser_target_transport);
b02efbfc
SG
3497 pr_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
3498
b8d26b3b
NB
3499 return 0;
3500
b02efbfc
SG
3501destroy_comp_wq:
3502 destroy_workqueue(isert_comp_wq);
b8d26b3b
NB
3503destroy_rx_wq:
3504 destroy_workqueue(isert_rx_wq);
3505 return ret;
3506}
3507
3508static void __exit isert_exit(void)
3509{
f5ebec96 3510 flush_scheduled_work();
b02efbfc 3511 destroy_workqueue(isert_release_wq);
b8d26b3b
NB
3512 destroy_workqueue(isert_comp_wq);
3513 destroy_workqueue(isert_rx_wq);
3514 iscsit_unregister_transport(&iser_target_transport);
3515 pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
3516}
3517
3518MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
3519MODULE_VERSION("0.1");
3520MODULE_AUTHOR("nab@Linux-iSCSI.org");
3521MODULE_LICENSE("GPL");
3522
3523module_init(isert_init);
3524module_exit(isert_exit);