]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/infiniband/ulp/isert/ib_isert.c
iser-target: Get rid of redundant max_accept
[mirror_ubuntu-zesty-kernel.git] / drivers / infiniband / ulp / isert / ib_isert.c
CommitLineData
b8d26b3b
NB
1/*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
3 *
4c76251e 4 * (c) Copyright 2013 Datera, Inc.
b8d26b3b
NB
5 *
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
18
19#include <linux/string.h>
20#include <linux/module.h>
21#include <linux/scatterlist.h>
22#include <linux/socket.h>
23#include <linux/in.h>
24#include <linux/in6.h>
25#include <rdma/ib_verbs.h>
26#include <rdma/rdma_cm.h>
27#include <target/target_core_base.h>
28#include <target/target_core_fabric.h>
29#include <target/iscsi/iscsi_transport.h>
531b7bf4 30#include <linux/semaphore.h>
b8d26b3b
NB
31
32#include "isert_proto.h"
33#include "ib_isert.h"
34
35#define ISERT_MAX_CONN 8
36#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
bdf20e72
SG
38#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
39 ISERT_MAX_CONN)
b8d26b3b 40
45678b6b 41static int isert_debug_level;
24f412dd
SG
42module_param_named(debug_level, isert_debug_level, int, 0644);
43MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
44
b8d26b3b
NB
45static DEFINE_MUTEX(device_list_mutex);
46static LIST_HEAD(device_list);
b8d26b3b 47static struct workqueue_struct *isert_comp_wq;
b02efbfc 48static struct workqueue_struct *isert_release_wq;
b8d26b3b 49
d40945d8
VP
50static void
51isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
52static int
53isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
54 struct isert_rdma_wr *wr);
59464ef4 55static void
a3a5a826 56isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
59464ef4 57static int
a3a5a826
SG
58isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
59 struct isert_rdma_wr *wr);
f93f3a70
SG
60static int
61isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
2371e5da
SG
62static int
63isert_rdma_post_recvl(struct isert_conn *isert_conn);
64static int
65isert_rdma_accept(struct isert_conn *isert_conn);
ca6c1d82 66struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
d40945d8 67
302cc7c3
SG
68static inline bool
69isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
70{
23a548ee 71 return (conn->pi_support &&
302cc7c3
SG
72 cmd->prot_op != TARGET_PROT_NORMAL);
73}
74
75
b8d26b3b
NB
76static void
77isert_qp_event_callback(struct ib_event *e, void *context)
78{
6700425e 79 struct isert_conn *isert_conn = context;
b8d26b3b 80
4c22e07f 81 isert_err("conn %p event: %d\n", isert_conn, e->event);
b8d26b3b
NB
82 switch (e->event) {
83 case IB_EVENT_COMM_EST:
84 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
85 break;
86 case IB_EVENT_QP_LAST_WQE_REACHED:
4c22e07f 87 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
b8d26b3b
NB
88 break;
89 default:
90 break;
91 }
92}
93
94static int
95isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
96{
97 int ret;
98
99 ret = ib_query_device(ib_dev, devattr);
100 if (ret) {
24f412dd 101 isert_err("ib_query_device() failed: %d\n", ret);
b8d26b3b
NB
102 return ret;
103 }
24f412dd
SG
104 isert_dbg("devattr->max_sge: %d\n", devattr->max_sge);
105 isert_dbg("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
b8d26b3b
NB
106
107 return 0;
108}
109
40fc069a
SG
110static struct isert_comp *
111isert_comp_get(struct isert_conn *isert_conn)
b8d26b3b
NB
112{
113 struct isert_device *device = isert_conn->conn_device;
4a295bae 114 struct isert_comp *comp;
40fc069a 115 int i, min = 0;
b8d26b3b 116
b8d26b3b 117 mutex_lock(&device_list_mutex);
4a295bae
SG
118 for (i = 0; i < device->comps_used; i++)
119 if (device->comps[i].active_qps <
120 device->comps[min].active_qps)
121 min = i;
122 comp = &device->comps[min];
123 comp->active_qps++;
40fc069a
SG
124 mutex_unlock(&device_list_mutex);
125
24f412dd 126 isert_info("conn %p, using comp %p min_index: %d\n",
4a295bae 127 isert_conn, comp, min);
40fc069a
SG
128
129 return comp;
130}
131
132static void
133isert_comp_put(struct isert_comp *comp)
134{
135 mutex_lock(&device_list_mutex);
136 comp->active_qps--;
b8d26b3b 137 mutex_unlock(&device_list_mutex);
40fc069a
SG
138}
139
140static struct ib_qp *
141isert_create_qp(struct isert_conn *isert_conn,
142 struct isert_comp *comp,
143 struct rdma_cm_id *cma_id)
144{
145 struct isert_device *device = isert_conn->conn_device;
146 struct ib_qp_init_attr attr;
147 int ret;
b8d26b3b
NB
148
149 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
150 attr.event_handler = isert_qp_event_callback;
151 attr.qp_context = isert_conn;
6f0fae3d
SG
152 attr.send_cq = comp->cq;
153 attr.recv_cq = comp->cq;
b8d26b3b 154 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
bdf20e72 155 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
b8d26b3b
NB
156 /*
157 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
f57915cf
OG
158 * work-around for RDMA_READs with ConnectX-2.
159 *
160 * Also, still make sure to have at least two SGEs for
161 * outgoing control PDU responses.
b8d26b3b 162 */
f57915cf 163 attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
b8d26b3b
NB
164 isert_conn->max_sge = attr.cap.max_send_sge;
165
166 attr.cap.max_recv_sge = 1;
167 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
168 attr.qp_type = IB_QPT_RC;
570db170 169 if (device->pi_capable)
d3e125da 170 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
b8d26b3b 171
67cb3949 172 ret = rdma_create_qp(cma_id, device->pd, &attr);
b8d26b3b 173 if (ret) {
24f412dd 174 isert_err("rdma_create_qp failed for cma_id %d\n", ret);
40fc069a
SG
175 return ERR_PTR(ret);
176 }
177
178 return cma_id->qp;
179}
180
181static int
182isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
183{
184 struct isert_comp *comp;
185 int ret;
186
187 comp = isert_comp_get(isert_conn);
188 isert_conn->conn_qp = isert_create_qp(isert_conn, comp, cma_id);
189 if (IS_ERR(isert_conn->conn_qp)) {
190 ret = PTR_ERR(isert_conn->conn_qp);
19e2090f 191 goto err;
b8d26b3b 192 }
b8d26b3b
NB
193
194 return 0;
19e2090f 195err:
40fc069a 196 isert_comp_put(comp);
19e2090f 197 return ret;
b8d26b3b
NB
198}
199
200static void
201isert_cq_event_callback(struct ib_event *e, void *context)
202{
4c22e07f 203 isert_dbg("event: %d\n", e->event);
b8d26b3b
NB
204}
205
206static int
207isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
208{
67cb3949
SG
209 struct isert_device *device = isert_conn->conn_device;
210 struct ib_device *ib_dev = device->ib_device;
b8d26b3b
NB
211 struct iser_rx_desc *rx_desc;
212 struct ib_sge *rx_sg;
213 u64 dma_addr;
214 int i, j;
215
216 isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
217 sizeof(struct iser_rx_desc), GFP_KERNEL);
218 if (!isert_conn->conn_rx_descs)
219 goto fail;
220
221 rx_desc = isert_conn->conn_rx_descs;
222
223 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
224 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
225 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
226 if (ib_dma_mapping_error(ib_dev, dma_addr))
227 goto dma_map_fail;
228
229 rx_desc->dma_addr = dma_addr;
230
231 rx_sg = &rx_desc->rx_sg;
232 rx_sg->addr = rx_desc->dma_addr;
233 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
67cb3949 234 rx_sg->lkey = device->mr->lkey;
b8d26b3b
NB
235 }
236
237 isert_conn->conn_rx_desc_head = 0;
4c22e07f 238
b8d26b3b
NB
239 return 0;
240
241dma_map_fail:
242 rx_desc = isert_conn->conn_rx_descs;
243 for (j = 0; j < i; j++, rx_desc++) {
244 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
245 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
246 }
247 kfree(isert_conn->conn_rx_descs);
248 isert_conn->conn_rx_descs = NULL;
249fail:
4c22e07f
SG
250 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
251
b8d26b3b
NB
252 return -ENOMEM;
253}
254
255static void
256isert_free_rx_descriptors(struct isert_conn *isert_conn)
257{
4a579da2 258 struct ib_device *ib_dev = isert_conn->conn_device->ib_device;
b8d26b3b
NB
259 struct iser_rx_desc *rx_desc;
260 int i;
261
262 if (!isert_conn->conn_rx_descs)
263 return;
264
265 rx_desc = isert_conn->conn_rx_descs;
266 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
267 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
268 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
269 }
270
271 kfree(isert_conn->conn_rx_descs);
272 isert_conn->conn_rx_descs = NULL;
273}
274
6f0fae3d
SG
275static void isert_cq_work(struct work_struct *);
276static void isert_cq_callback(struct ib_cq *, void *);
b8d26b3b 277
172369c5
SG
278static void
279isert_free_comps(struct isert_device *device)
b8d26b3b 280{
172369c5 281 int i;
59464ef4 282
172369c5
SG
283 for (i = 0; i < device->comps_used; i++) {
284 struct isert_comp *comp = &device->comps[i];
b1a5ad00 285
172369c5
SG
286 if (comp->cq) {
287 cancel_work_sync(&comp->work);
288 ib_destroy_cq(comp->cq);
289 }
59464ef4 290 }
172369c5
SG
291 kfree(device->comps);
292}
d40945d8 293
172369c5
SG
294static int
295isert_alloc_comps(struct isert_device *device,
296 struct ib_device_attr *attr)
297{
298 int i, max_cqe, ret = 0;
d3e125da 299
4a295bae 300 device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
172369c5
SG
301 device->ib_device->num_comp_vectors));
302
24f412dd 303 isert_info("Using %d CQs, %s supports %d vectors support "
4a295bae
SG
304 "Fast registration %d pi_capable %d\n",
305 device->comps_used, device->ib_device->name,
306 device->ib_device->num_comp_vectors, device->use_fastreg,
307 device->pi_capable);
308
309 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
310 GFP_KERNEL);
311 if (!device->comps) {
24f412dd 312 isert_err("Unable to allocate completion contexts\n");
b8d26b3b
NB
313 return -ENOMEM;
314 }
4a295bae 315
172369c5
SG
316 max_cqe = min(ISER_MAX_CQ_LEN, attr->max_cqe);
317
4a295bae
SG
318 for (i = 0; i < device->comps_used; i++) {
319 struct isert_comp *comp = &device->comps[i];
320
321 comp->device = device;
6f0fae3d
SG
322 INIT_WORK(&comp->work, isert_cq_work);
323 comp->cq = ib_create_cq(device->ib_device,
324 isert_cq_callback,
325 isert_cq_event_callback,
326 (void *)comp,
327 max_cqe, i);
328 if (IS_ERR(comp->cq)) {
172369c5 329 isert_err("Unable to allocate cq\n");
6f0fae3d
SG
330 ret = PTR_ERR(comp->cq);
331 comp->cq = NULL;
b8d26b3b 332 goto out_cq;
94a71110 333 }
b8d26b3b 334
6f0fae3d 335 ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
94a71110 336 if (ret)
b8d26b3b
NB
337 goto out_cq;
338 }
339
172369c5
SG
340 return 0;
341out_cq:
342 isert_free_comps(device);
343 return ret;
344}
345
346static int
347isert_create_device_ib_res(struct isert_device *device)
348{
172369c5 349 struct ib_device_attr *dev_attr;
fd8205e8 350 int ret;
172369c5
SG
351
352 dev_attr = &device->dev_attr;
fd8205e8 353 ret = isert_query_device(device->ib_device, dev_attr);
172369c5
SG
354 if (ret)
355 return ret;
356
357 /* asign function handlers */
358 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
359 dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
360 device->use_fastreg = 1;
361 device->reg_rdma_mem = isert_reg_rdma;
362 device->unreg_rdma_mem = isert_unreg_rdma;
363 } else {
364 device->use_fastreg = 0;
365 device->reg_rdma_mem = isert_map_rdma;
366 device->unreg_rdma_mem = isert_unmap_cmd;
367 }
368
369 ret = isert_alloc_comps(device, dev_attr);
370 if (ret)
371 return ret;
372
67cb3949
SG
373 device->pd = ib_alloc_pd(device->ib_device);
374 if (IS_ERR(device->pd)) {
375 ret = PTR_ERR(device->pd);
376 isert_err("failed to allocate pd, device %p, ret=%d\n",
377 device, ret);
378 goto out_cq;
379 }
380
381 device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE);
382 if (IS_ERR(device->mr)) {
383 ret = PTR_ERR(device->mr);
384 isert_err("failed to create dma mr, device %p, ret=%d\n",
385 device, ret);
386 goto out_mr;
387 }
388
172369c5
SG
389 /* Check signature cap */
390 device->pi_capable = dev_attr->device_cap_flags &
391 IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
67cb3949 392
b8d26b3b
NB
393 return 0;
394
67cb3949
SG
395out_mr:
396 ib_dealloc_pd(device->pd);
b8d26b3b 397out_cq:
172369c5 398 isert_free_comps(device);
b8d26b3b
NB
399 return ret;
400}
401
402static void
403isert_free_device_ib_res(struct isert_device *device)
404{
24f412dd 405 isert_info("device %p\n", device);
b8d26b3b 406
67cb3949
SG
407 ib_dereg_mr(device->mr);
408 ib_dealloc_pd(device->pd);
172369c5 409 isert_free_comps(device);
b8d26b3b
NB
410}
411
412static void
cf8ae958 413isert_device_put(struct isert_device *device)
b8d26b3b
NB
414{
415 mutex_lock(&device_list_mutex);
416 device->refcount--;
4c22e07f 417 isert_info("device %p refcount %d\n", device, device->refcount);
b8d26b3b
NB
418 if (!device->refcount) {
419 isert_free_device_ib_res(device);
420 list_del(&device->dev_node);
421 kfree(device);
422 }
423 mutex_unlock(&device_list_mutex);
424}
425
426static struct isert_device *
cf8ae958 427isert_device_get(struct rdma_cm_id *cma_id)
b8d26b3b
NB
428{
429 struct isert_device *device;
430 int ret;
431
432 mutex_lock(&device_list_mutex);
433 list_for_each_entry(device, &device_list, dev_node) {
434 if (device->ib_device->node_guid == cma_id->device->node_guid) {
435 device->refcount++;
4c22e07f
SG
436 isert_info("Found iser device %p refcount %d\n",
437 device, device->refcount);
b8d26b3b
NB
438 mutex_unlock(&device_list_mutex);
439 return device;
440 }
441 }
442
443 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
444 if (!device) {
445 mutex_unlock(&device_list_mutex);
446 return ERR_PTR(-ENOMEM);
447 }
448
449 INIT_LIST_HEAD(&device->dev_node);
450
451 device->ib_device = cma_id->device;
452 ret = isert_create_device_ib_res(device);
453 if (ret) {
454 kfree(device);
455 mutex_unlock(&device_list_mutex);
456 return ERR_PTR(ret);
457 }
458
459 device->refcount++;
460 list_add_tail(&device->dev_node, &device_list);
4c22e07f
SG
461 isert_info("Created a new iser device %p refcount %d\n",
462 device, device->refcount);
b8d26b3b
NB
463 mutex_unlock(&device_list_mutex);
464
465 return device;
466}
467
59464ef4 468static void
a3a5a826 469isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
59464ef4
VP
470{
471 struct fast_reg_descriptor *fr_desc, *tmp;
472 int i = 0;
473
a3a5a826 474 if (list_empty(&isert_conn->conn_fr_pool))
59464ef4
VP
475 return;
476
4c22e07f 477 isert_info("Freeing conn %p fastreg pool", isert_conn);
59464ef4
VP
478
479 list_for_each_entry_safe(fr_desc, tmp,
a3a5a826 480 &isert_conn->conn_fr_pool, list) {
59464ef4
VP
481 list_del(&fr_desc->list);
482 ib_free_fast_reg_page_list(fr_desc->data_frpl);
483 ib_dereg_mr(fr_desc->data_mr);
d3e125da
SG
484 if (fr_desc->pi_ctx) {
485 ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
486 ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
487 ib_destroy_mr(fr_desc->pi_ctx->sig_mr);
488 kfree(fr_desc->pi_ctx);
489 }
59464ef4
VP
490 kfree(fr_desc);
491 ++i;
492 }
493
a3a5a826 494 if (i < isert_conn->conn_fr_pool_size)
24f412dd 495 isert_warn("Pool still has %d regions registered\n",
a3a5a826 496 isert_conn->conn_fr_pool_size - i);
59464ef4
VP
497}
498
570db170
SG
499static int
500isert_create_pi_ctx(struct fast_reg_descriptor *desc,
501 struct ib_device *device,
502 struct ib_pd *pd)
503{
504 struct ib_mr_init_attr mr_init_attr;
505 struct pi_context *pi_ctx;
506 int ret;
507
508 pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
509 if (!pi_ctx) {
24f412dd 510 isert_err("Failed to allocate pi context\n");
570db170
SG
511 return -ENOMEM;
512 }
513
514 pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device,
515 ISCSI_ISER_SG_TABLESIZE);
516 if (IS_ERR(pi_ctx->prot_frpl)) {
24f412dd 517 isert_err("Failed to allocate prot frpl err=%ld\n",
570db170
SG
518 PTR_ERR(pi_ctx->prot_frpl));
519 ret = PTR_ERR(pi_ctx->prot_frpl);
520 goto err_pi_ctx;
521 }
522
523 pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
524 if (IS_ERR(pi_ctx->prot_mr)) {
24f412dd 525 isert_err("Failed to allocate prot frmr err=%ld\n",
570db170
SG
526 PTR_ERR(pi_ctx->prot_mr));
527 ret = PTR_ERR(pi_ctx->prot_mr);
528 goto err_prot_frpl;
529 }
530 desc->ind |= ISERT_PROT_KEY_VALID;
531
532 memset(&mr_init_attr, 0, sizeof(mr_init_attr));
533 mr_init_attr.max_reg_descriptors = 2;
534 mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
535 pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
536 if (IS_ERR(pi_ctx->sig_mr)) {
24f412dd 537 isert_err("Failed to allocate signature enabled mr err=%ld\n",
570db170
SG
538 PTR_ERR(pi_ctx->sig_mr));
539 ret = PTR_ERR(pi_ctx->sig_mr);
540 goto err_prot_mr;
541 }
542
543 desc->pi_ctx = pi_ctx;
544 desc->ind |= ISERT_SIG_KEY_VALID;
545 desc->ind &= ~ISERT_PROTECTED;
546
547 return 0;
548
549err_prot_mr:
550 ib_dereg_mr(desc->pi_ctx->prot_mr);
551err_prot_frpl:
552 ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
553err_pi_ctx:
554 kfree(desc->pi_ctx);
555
556 return ret;
557}
558
dc87a90f
SG
559static int
560isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
570db170 561 struct fast_reg_descriptor *fr_desc)
dc87a90f 562{
d3e125da
SG
563 int ret;
564
dc87a90f
SG
565 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
566 ISCSI_ISER_SG_TABLESIZE);
567 if (IS_ERR(fr_desc->data_frpl)) {
24f412dd 568 isert_err("Failed to allocate data frpl err=%ld\n",
4c22e07f 569 PTR_ERR(fr_desc->data_frpl));
dc87a90f
SG
570 return PTR_ERR(fr_desc->data_frpl);
571 }
572
573 fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
574 if (IS_ERR(fr_desc->data_mr)) {
24f412dd 575 isert_err("Failed to allocate data frmr err=%ld\n",
4c22e07f 576 PTR_ERR(fr_desc->data_mr));
d3e125da
SG
577 ret = PTR_ERR(fr_desc->data_mr);
578 goto err_data_frpl;
dc87a90f 579 }
d3e125da
SG
580 fr_desc->ind |= ISERT_DATA_KEY_VALID;
581
24f412dd 582 isert_dbg("Created fr_desc %p\n", fr_desc);
dc87a90f
SG
583
584 return 0;
570db170 585
d3e125da
SG
586err_data_frpl:
587 ib_free_fast_reg_page_list(fr_desc->data_frpl);
588
589 return ret;
59464ef4
VP
590}
591
592static int
570db170 593isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
59464ef4
VP
594{
595 struct fast_reg_descriptor *fr_desc;
596 struct isert_device *device = isert_conn->conn_device;
f46d6a8a
NB
597 struct se_session *se_sess = isert_conn->conn->sess->se_sess;
598 struct se_node_acl *se_nacl = se_sess->se_node_acl;
599 int i, ret, tag_num;
600 /*
601 * Setup the number of FRMRs based upon the number of tags
602 * available to session in iscsi_target_locate_portal().
603 */
604 tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
605 tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
59464ef4 606
a3a5a826 607 isert_conn->conn_fr_pool_size = 0;
f46d6a8a 608 for (i = 0; i < tag_num; i++) {
59464ef4
VP
609 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
610 if (!fr_desc) {
24f412dd 611 isert_err("Failed to allocate fast_reg descriptor\n");
59464ef4
VP
612 ret = -ENOMEM;
613 goto err;
614 }
615
dc87a90f 616 ret = isert_create_fr_desc(device->ib_device,
67cb3949 617 device->pd, fr_desc);
dc87a90f 618 if (ret) {
24f412dd 619 isert_err("Failed to create fastreg descriptor err=%d\n",
dc87a90f 620 ret);
a80e21b3 621 kfree(fr_desc);
59464ef4
VP
622 goto err;
623 }
59464ef4 624
a3a5a826
SG
625 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
626 isert_conn->conn_fr_pool_size++;
59464ef4
VP
627 }
628
24f412dd 629 isert_dbg("Creating conn %p fastreg pool size=%d",
a3a5a826 630 isert_conn, isert_conn->conn_fr_pool_size);
59464ef4
VP
631
632 return 0;
633
634err:
a3a5a826 635 isert_conn_free_fastreg_pool(isert_conn);
59464ef4
VP
636 return ret;
637}
638
ae9ea9ed
SG
639static void
640isert_init_conn(struct isert_conn *isert_conn)
b8d26b3b 641{
b8d26b3b
NB
642 isert_conn->state = ISER_CONN_INIT;
643 INIT_LIST_HEAD(&isert_conn->conn_accept_node);
644 init_completion(&isert_conn->conn_login_comp);
2371e5da 645 init_completion(&isert_conn->login_req_comp);
defd8848 646 init_completion(&isert_conn->conn_wait);
b8d26b3b 647 kref_init(&isert_conn->conn_kref);
b2cb9649 648 mutex_init(&isert_conn->conn_mutex);
59464ef4 649 spin_lock_init(&isert_conn->conn_lock);
f46d6a8a 650 INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
ae9ea9ed 651}
b8d26b3b 652
ae9ea9ed
SG
653static void
654isert_free_login_buf(struct isert_conn *isert_conn)
655{
656 struct ib_device *ib_dev = isert_conn->conn_device->ib_device;
657
658 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
659 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
660 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
661 ISCSI_DEF_MAX_RECV_SEG_LEN,
662 DMA_FROM_DEVICE);
663 kfree(isert_conn->login_buf);
664}
665
666static int
667isert_alloc_login_buf(struct isert_conn *isert_conn,
668 struct ib_device *ib_dev)
669{
670 int ret;
b8d26b3b
NB
671
672 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
673 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
674 if (!isert_conn->login_buf) {
24f412dd 675 isert_err("Unable to allocate isert_conn->login_buf\n");
ae9ea9ed 676 return -ENOMEM;
b8d26b3b
NB
677 }
678
679 isert_conn->login_req_buf = isert_conn->login_buf;
680 isert_conn->login_rsp_buf = isert_conn->login_buf +
681 ISCSI_DEF_MAX_RECV_SEG_LEN;
ae9ea9ed 682
24f412dd 683 isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
b8d26b3b
NB
684 isert_conn->login_buf, isert_conn->login_req_buf,
685 isert_conn->login_rsp_buf);
686
687 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
688 (void *)isert_conn->login_req_buf,
689 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
690
691 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
692 if (ret) {
ae9ea9ed 693 isert_err("login_req_dma mapping error: %d\n", ret);
b8d26b3b
NB
694 isert_conn->login_req_dma = 0;
695 goto out_login_buf;
696 }
697
698 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
699 (void *)isert_conn->login_rsp_buf,
700 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
701
702 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
703 if (ret) {
ae9ea9ed 704 isert_err("login_rsp_dma mapping error: %d\n", ret);
b8d26b3b
NB
705 isert_conn->login_rsp_dma = 0;
706 goto out_req_dma_map;
707 }
708
ae9ea9ed
SG
709 return 0;
710
711out_req_dma_map:
712 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
713 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
714out_login_buf:
715 kfree(isert_conn->login_buf);
716 return ret;
717}
718
719static int
720isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
721{
722 struct isert_np *isert_np = cma_id->context;
723 struct iscsi_np *np = isert_np->np;
724 struct isert_conn *isert_conn;
725 struct isert_device *device;
726 int ret = 0;
727
728 spin_lock_bh(&np->np_thread_lock);
729 if (!np->enabled) {
730 spin_unlock_bh(&np->np_thread_lock);
731 isert_dbg("iscsi_np is not enabled, reject connect request\n");
732 return rdma_reject(cma_id, NULL, 0);
733 }
734 spin_unlock_bh(&np->np_thread_lock);
735
736 isert_dbg("cma_id: %p, portal: %p\n",
737 cma_id, cma_id->context);
738
739 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
740 if (!isert_conn)
741 return -ENOMEM;
742
743 isert_init_conn(isert_conn);
744 isert_conn->conn_cm_id = cma_id;
745
746 ret = isert_alloc_login_buf(isert_conn, cma_id->device);
747 if (ret)
748 goto out;
749
cf8ae958 750 device = isert_device_get(cma_id);
b8d26b3b
NB
751 if (IS_ERR(device)) {
752 ret = PTR_ERR(device);
753 goto out_rsp_dma_map;
754 }
cf8ae958 755 isert_conn->conn_device = device;
b8d26b3b 756
1a92e17e
SG
757 /* Set max inflight RDMA READ requests */
758 isert_conn->initiator_depth = min_t(u8,
759 event->param.conn.initiator_depth,
760 device->dev_attr.max_qp_init_rd_atom);
24f412dd 761 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
1a92e17e 762
570db170 763 ret = isert_conn_setup_qp(isert_conn, cma_id);
b8d26b3b
NB
764 if (ret)
765 goto out_conn_dev;
766
2371e5da
SG
767 ret = isert_rdma_post_recvl(isert_conn);
768 if (ret)
769 goto out_conn_dev;
770
771 ret = isert_rdma_accept(isert_conn);
772 if (ret)
773 goto out_conn_dev;
774
b8d26b3b 775 mutex_lock(&isert_np->np_accept_mutex);
9fe63c88 776 list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
b8d26b3b
NB
777 mutex_unlock(&isert_np->np_accept_mutex);
778
24f412dd 779 isert_info("np %p: Allow accept_np to continue\n", np);
531b7bf4 780 up(&isert_np->np_sem);
b8d26b3b
NB
781 return 0;
782
783out_conn_dev:
cf8ae958 784 isert_device_put(device);
b8d26b3b 785out_rsp_dma_map:
ae9ea9ed 786 isert_free_login_buf(isert_conn);
b8d26b3b
NB
787out:
788 kfree(isert_conn);
2371e5da 789 rdma_reject(cma_id, NULL, 0);
b8d26b3b
NB
790 return ret;
791}
792
793static void
794isert_connect_release(struct isert_conn *isert_conn)
795{
b8d26b3b 796 struct isert_device *device = isert_conn->conn_device;
b8d26b3b 797
4c22e07f 798 isert_dbg("conn %p\n", isert_conn);
b8d26b3b 799
a3a5a826
SG
800 if (device && device->use_fastreg)
801 isert_conn_free_fastreg_pool(isert_conn);
59464ef4 802
19e2090f 803 isert_free_rx_descriptors(isert_conn);
4a579da2
SG
804 if (isert_conn->conn_cm_id)
805 rdma_destroy_id(isert_conn->conn_cm_id);
19e2090f 806
b8d26b3b 807 if (isert_conn->conn_qp) {
4a295bae
SG
808 struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context;
809
40fc069a 810 isert_comp_put(comp);
19e2090f 811 ib_destroy_qp(isert_conn->conn_qp);
b8d26b3b
NB
812 }
813
ae9ea9ed
SG
814 if (isert_conn->login_buf)
815 isert_free_login_buf(isert_conn);
816
b8d26b3b
NB
817 kfree(isert_conn);
818
819 if (device)
cf8ae958 820 isert_device_put(device);
b8d26b3b
NB
821}
822
823static void
824isert_connected_handler(struct rdma_cm_id *cma_id)
825{
19e2090f 826 struct isert_conn *isert_conn = cma_id->qp->qp_context;
c2f88b17 827
24f412dd 828 isert_info("conn %p\n", isert_conn);
128e9cc8 829
2371e5da 830 if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
24f412dd 831 isert_warn("conn %p connect_release is running\n", isert_conn);
2371e5da
SG
832 return;
833 }
834
835 mutex_lock(&isert_conn->conn_mutex);
836 if (isert_conn->state != ISER_CONN_FULL_FEATURE)
837 isert_conn->state = ISER_CONN_UP;
838 mutex_unlock(&isert_conn->conn_mutex);
b8d26b3b
NB
839}
840
841static void
842isert_release_conn_kref(struct kref *kref)
843{
844 struct isert_conn *isert_conn = container_of(kref,
845 struct isert_conn, conn_kref);
846
4c22e07f
SG
847 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
848 current->pid);
b8d26b3b
NB
849
850 isert_connect_release(isert_conn);
851}
852
853static void
854isert_put_conn(struct isert_conn *isert_conn)
855{
856 kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
857}
858
954f2372
SG
859/**
860 * isert_conn_terminate() - Initiate connection termination
861 * @isert_conn: isert connection struct
862 *
863 * Notes:
128e9cc8 864 * In case the connection state is FULL_FEATURE, move state
954f2372 865 * to TEMINATING and start teardown sequence (rdma_disconnect).
128e9cc8 866 * In case the connection state is UP, complete flush as well.
954f2372
SG
867 *
868 * This routine must be called with conn_mutex held. Thus it is
869 * safe to call multiple times.
870 */
871static void
872isert_conn_terminate(struct isert_conn *isert_conn)
873{
874 int err;
875
128e9cc8
SG
876 switch (isert_conn->state) {
877 case ISER_CONN_TERMINATING:
878 break;
879 case ISER_CONN_UP:
128e9cc8 880 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
24f412dd 881 isert_info("Terminating conn %p state %d\n",
954f2372 882 isert_conn, isert_conn->state);
128e9cc8 883 isert_conn->state = ISER_CONN_TERMINATING;
954f2372
SG
884 err = rdma_disconnect(isert_conn->conn_cm_id);
885 if (err)
24f412dd 886 isert_warn("Failed rdma_disconnect isert_conn %p\n",
954f2372 887 isert_conn);
128e9cc8
SG
888 break;
889 default:
24f412dd 890 isert_warn("conn %p teminating in state %d\n",
128e9cc8 891 isert_conn, isert_conn->state);
954f2372
SG
892 }
893}
894
3b726ae2 895static int
ca6c1d82
SG
896isert_np_cma_handler(struct isert_np *isert_np,
897 enum rdma_cm_event_type event)
b8d26b3b 898{
24f412dd 899 isert_dbg("isert np %p, handling event %d\n", isert_np, event);
3b726ae2 900
ca6c1d82
SG
901 switch (event) {
902 case RDMA_CM_EVENT_DEVICE_REMOVAL:
3b726ae2 903 isert_np->np_cm_id = NULL;
ca6c1d82
SG
904 break;
905 case RDMA_CM_EVENT_ADDR_CHANGE:
906 isert_np->np_cm_id = isert_setup_id(isert_np);
907 if (IS_ERR(isert_np->np_cm_id)) {
24f412dd
SG
908 isert_err("isert np %p setup id failed: %ld\n",
909 isert_np, PTR_ERR(isert_np->np_cm_id));
ca6c1d82
SG
910 isert_np->np_cm_id = NULL;
911 }
912 break;
913 default:
24f412dd 914 isert_err("isert np %p Unexpected event %d\n",
ca6c1d82 915 isert_np, event);
3b726ae2
SG
916 }
917
ca6c1d82
SG
918 return -1;
919}
920
921static int
922isert_disconnected_handler(struct rdma_cm_id *cma_id,
923 enum rdma_cm_event_type event)
924{
925 struct isert_np *isert_np = cma_id->context;
926 struct isert_conn *isert_conn;
927
928 if (isert_np->np_cm_id == cma_id)
929 return isert_np_cma_handler(cma_id->context, event);
930
19e2090f 931 isert_conn = cma_id->qp->qp_context;
b8d26b3b 932
128e9cc8
SG
933 mutex_lock(&isert_conn->conn_mutex);
934 isert_conn_terminate(isert_conn);
935 mutex_unlock(&isert_conn->conn_mutex);
936
24f412dd 937 isert_info("conn %p completing conn_wait\n", isert_conn);
128e9cc8 938 complete(&isert_conn->conn_wait);
3b726ae2
SG
939
940 return 0;
b8d26b3b
NB
941}
942
4a579da2 943static int
954f2372
SG
944isert_connect_error(struct rdma_cm_id *cma_id)
945{
19e2090f 946 struct isert_conn *isert_conn = cma_id->qp->qp_context;
954f2372 947
4a579da2 948 isert_conn->conn_cm_id = NULL;
954f2372 949 isert_put_conn(isert_conn);
4a579da2
SG
950
951 return -1;
954f2372
SG
952}
953
b8d26b3b
NB
954static int
955isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
956{
957 int ret = 0;
958
4c22e07f
SG
959 isert_info("event %d status %d id %p np %p\n", event->event,
960 event->status, cma_id, cma_id->context);
b8d26b3b
NB
961
962 switch (event->event) {
963 case RDMA_CM_EVENT_CONNECT_REQUEST:
b8d26b3b 964 ret = isert_connect_request(cma_id, event);
3b726ae2 965 if (ret)
4c22e07f 966 isert_err("failed handle connect request %d\n", ret);
b8d26b3b
NB
967 break;
968 case RDMA_CM_EVENT_ESTABLISHED:
b8d26b3b
NB
969 isert_connected_handler(cma_id);
970 break;
88c4015f
SG
971 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
972 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
973 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
88c4015f 974 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
ca6c1d82 975 ret = isert_disconnected_handler(cma_id, event->event);
b8d26b3b 976 break;
954f2372
SG
977 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
978 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
b8d26b3b 979 case RDMA_CM_EVENT_CONNECT_ERROR:
4a579da2 980 ret = isert_connect_error(cma_id);
954f2372 981 break;
b8d26b3b 982 default:
24f412dd 983 isert_err("Unhandled RDMA CMA event: %d\n", event->event);
b8d26b3b
NB
984 break;
985 }
986
b8d26b3b
NB
987 return ret;
988}
989
990static int
991isert_post_recv(struct isert_conn *isert_conn, u32 count)
992{
993 struct ib_recv_wr *rx_wr, *rx_wr_failed;
994 int i, ret;
995 unsigned int rx_head = isert_conn->conn_rx_desc_head;
996 struct iser_rx_desc *rx_desc;
997
998 for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
999 rx_desc = &isert_conn->conn_rx_descs[rx_head];
b0a191e7 1000 rx_wr->wr_id = (uintptr_t)rx_desc;
b8d26b3b
NB
1001 rx_wr->sg_list = &rx_desc->rx_sg;
1002 rx_wr->num_sge = 1;
1003 rx_wr->next = rx_wr + 1;
1004 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
1005 }
1006
1007 rx_wr--;
1008 rx_wr->next = NULL; /* mark end of work requests list */
1009
1010 isert_conn->post_recv_buf_count += count;
1011 ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
1012 &rx_wr_failed);
1013 if (ret) {
24f412dd 1014 isert_err("ib_post_recv() failed with ret: %d\n", ret);
b8d26b3b
NB
1015 isert_conn->post_recv_buf_count -= count;
1016 } else {
11378cdb 1017 isert_dbg("Posted %d RX buffers\n", count);
b8d26b3b
NB
1018 isert_conn->conn_rx_desc_head = rx_head;
1019 }
1020 return ret;
1021}
1022
1023static int
1024isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
1025{
1026 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1027 struct ib_send_wr send_wr, *send_wr_failed;
1028 int ret;
1029
1030 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
1031 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1032
1033 send_wr.next = NULL;
b0a191e7 1034 send_wr.wr_id = (uintptr_t)tx_desc;
b8d26b3b
NB
1035 send_wr.sg_list = tx_desc->tx_sg;
1036 send_wr.num_sge = tx_desc->num_sge;
1037 send_wr.opcode = IB_WR_SEND;
1038 send_wr.send_flags = IB_SEND_SIGNALED;
1039
b8d26b3b 1040 ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
bdf20e72 1041 if (ret)
24f412dd 1042 isert_err("ib_post_send() failed, ret: %d\n", ret);
b8d26b3b
NB
1043
1044 return ret;
1045}
1046
1047static void
1048isert_create_send_desc(struct isert_conn *isert_conn,
1049 struct isert_cmd *isert_cmd,
1050 struct iser_tx_desc *tx_desc)
1051{
67cb3949
SG
1052 struct isert_device *device = isert_conn->conn_device;
1053 struct ib_device *ib_dev = device->ib_device;
b8d26b3b
NB
1054
1055 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
1056 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1057
1058 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
1059 tx_desc->iser_header.flags = ISER_VER;
1060
1061 tx_desc->num_sge = 1;
1062 tx_desc->isert_cmd = isert_cmd;
1063
67cb3949
SG
1064 if (tx_desc->tx_sg[0].lkey != device->mr->lkey) {
1065 tx_desc->tx_sg[0].lkey = device->mr->lkey;
24f412dd 1066 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
b8d26b3b
NB
1067 }
1068}
1069
1070static int
1071isert_init_tx_hdrs(struct isert_conn *isert_conn,
1072 struct iser_tx_desc *tx_desc)
1073{
67cb3949
SG
1074 struct isert_device *device = isert_conn->conn_device;
1075 struct ib_device *ib_dev = device->ib_device;
b8d26b3b
NB
1076 u64 dma_addr;
1077
1078 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
1079 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1080 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
24f412dd 1081 isert_err("ib_dma_mapping_error() failed\n");
b8d26b3b
NB
1082 return -ENOMEM;
1083 }
1084
1085 tx_desc->dma_addr = dma_addr;
1086 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
1087 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
67cb3949 1088 tx_desc->tx_sg[0].lkey = device->mr->lkey;
b8d26b3b 1089
4c22e07f
SG
1090 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
1091 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
1092 tx_desc->tx_sg[0].lkey);
b8d26b3b
NB
1093
1094 return 0;
1095}
1096
1097static void
95b60f07 1098isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
68a86dee 1099 struct ib_send_wr *send_wr)
b8d26b3b 1100{
95b60f07
NB
1101 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
1102
b8d26b3b 1103 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
b0a191e7 1104 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
b8d26b3b 1105 send_wr->opcode = IB_WR_SEND;
95b60f07 1106 send_wr->sg_list = &tx_desc->tx_sg[0];
b8d26b3b 1107 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
95b60f07 1108 send_wr->send_flags = IB_SEND_SIGNALED;
b8d26b3b
NB
1109}
1110
1111static int
1112isert_rdma_post_recvl(struct isert_conn *isert_conn)
1113{
1114 struct ib_recv_wr rx_wr, *rx_wr_fail;
1115 struct ib_sge sge;
1116 int ret;
1117
1118 memset(&sge, 0, sizeof(struct ib_sge));
1119 sge.addr = isert_conn->login_req_dma;
1120 sge.length = ISER_RX_LOGIN_SIZE;
67cb3949 1121 sge.lkey = isert_conn->conn_device->mr->lkey;
b8d26b3b 1122
24f412dd 1123 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
b8d26b3b
NB
1124 sge.addr, sge.length, sge.lkey);
1125
1126 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
b0a191e7 1127 rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf;
b8d26b3b
NB
1128 rx_wr.sg_list = &sge;
1129 rx_wr.num_sge = 1;
1130
1131 isert_conn->post_recv_buf_count++;
1132 ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
1133 if (ret) {
24f412dd 1134 isert_err("ib_post_recv() failed: %d\n", ret);
b8d26b3b
NB
1135 isert_conn->post_recv_buf_count--;
1136 }
1137
b8d26b3b
NB
1138 return ret;
1139}
1140
1141static int
1142isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1143 u32 length)
1144{
1145 struct isert_conn *isert_conn = conn->context;
67cb3949
SG
1146 struct isert_device *device = isert_conn->conn_device;
1147 struct ib_device *ib_dev = device->ib_device;
b8d26b3b
NB
1148 struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
1149 int ret;
1150
1151 isert_create_send_desc(isert_conn, NULL, tx_desc);
1152
1153 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
1154 sizeof(struct iscsi_hdr));
1155
1156 isert_init_tx_hdrs(isert_conn, tx_desc);
1157
1158 if (length > 0) {
1159 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
1160
1161 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
1162 length, DMA_TO_DEVICE);
1163
1164 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
1165
1166 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
1167 length, DMA_TO_DEVICE);
1168
1169 tx_dsg->addr = isert_conn->login_rsp_dma;
1170 tx_dsg->length = length;
67cb3949 1171 tx_dsg->lkey = isert_conn->conn_device->mr->lkey;
b8d26b3b
NB
1172 tx_desc->num_sge = 2;
1173 }
1174 if (!login->login_failed) {
1175 if (login->login_complete) {
e0546fc1
SG
1176 if (!conn->sess->sess_ops->SessionType &&
1177 isert_conn->conn_device->use_fastreg) {
570db170 1178 ret = isert_conn_create_fastreg_pool(isert_conn);
f46d6a8a 1179 if (ret) {
24f412dd 1180 isert_err("Conn: %p failed to create"
f46d6a8a
NB
1181 " fastreg pool\n", isert_conn);
1182 return ret;
1183 }
1184 }
1185
b8d26b3b
NB
1186 ret = isert_alloc_rx_descriptors(isert_conn);
1187 if (ret)
1188 return ret;
1189
1190 ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
1191 if (ret)
1192 return ret;
1193
128e9cc8 1194 /* Now we are in FULL_FEATURE phase */
2371e5da 1195 mutex_lock(&isert_conn->conn_mutex);
128e9cc8 1196 isert_conn->state = ISER_CONN_FULL_FEATURE;
2371e5da 1197 mutex_unlock(&isert_conn->conn_mutex);
b8d26b3b
NB
1198 goto post_send;
1199 }
1200
1201 ret = isert_rdma_post_recvl(isert_conn);
1202 if (ret)
1203 return ret;
1204 }
1205post_send:
1206 ret = isert_post_send(isert_conn, tx_desc);
1207 if (ret)
1208 return ret;
1209
1210 return 0;
1211}
1212
1213static void
2371e5da 1214isert_rx_login_req(struct isert_conn *isert_conn)
b8d26b3b 1215{
2371e5da
SG
1216 struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
1217 int rx_buflen = isert_conn->login_req_len;
b8d26b3b
NB
1218 struct iscsi_conn *conn = isert_conn->conn;
1219 struct iscsi_login *login = conn->conn_login;
1220 int size;
1221
24f412dd 1222 isert_info("conn %p\n", isert_conn);
2371e5da
SG
1223
1224 WARN_ON_ONCE(!login);
b8d26b3b
NB
1225
1226 if (login->first_request) {
1227 struct iscsi_login_req *login_req =
1228 (struct iscsi_login_req *)&rx_desc->iscsi_header;
1229 /*
1230 * Setup the initial iscsi_login values from the leading
1231 * login request PDU.
1232 */
1233 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1234 login->current_stage =
1235 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
1236 >> 2;
1237 login->version_min = login_req->min_version;
1238 login->version_max = login_req->max_version;
1239 memcpy(login->isid, login_req->isid, 6);
1240 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
1241 login->init_task_tag = login_req->itt;
1242 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1243 login->cid = be16_to_cpu(login_req->cid);
1244 login->tsih = be16_to_cpu(login_req->tsih);
1245 }
1246
1247 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1248
1249 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
4c22e07f
SG
1250 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1251 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
1252 MAX_KEY_VALUE_PAIRS);
b8d26b3b
NB
1253 memcpy(login->req_buf, &rx_desc->data[0], size);
1254
6faaa85f
NB
1255 if (login->first_request) {
1256 complete(&isert_conn->conn_login_comp);
1257 return;
1258 }
1259 schedule_delayed_work(&conn->login_work, 0);
b8d26b3b
NB
1260}
1261
b8d26b3b 1262static struct iscsi_cmd
676687c6 1263*isert_allocate_cmd(struct iscsi_conn *conn)
b8d26b3b 1264{
6700425e 1265 struct isert_conn *isert_conn = conn->context;
b8d26b3b 1266 struct isert_cmd *isert_cmd;
d703ce2f 1267 struct iscsi_cmd *cmd;
b8d26b3b 1268
676687c6 1269 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
d703ce2f 1270 if (!cmd) {
24f412dd 1271 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
b8d26b3b
NB
1272 return NULL;
1273 }
d703ce2f 1274 isert_cmd = iscsit_priv_cmd(cmd);
b8d26b3b 1275 isert_cmd->conn = isert_conn;
d703ce2f 1276 isert_cmd->iscsi_cmd = cmd;
b8d26b3b 1277
d703ce2f 1278 return cmd;
b8d26b3b
NB
1279}
1280
1281static int
1282isert_handle_scsi_cmd(struct isert_conn *isert_conn,
d703ce2f
NB
1283 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1284 struct iser_rx_desc *rx_desc, unsigned char *buf)
b8d26b3b 1285{
b8d26b3b
NB
1286 struct iscsi_conn *conn = isert_conn->conn;
1287 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1288 struct scatterlist *sg;
1289 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1290 bool dump_payload = false;
1291
1292 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1293 if (rc < 0)
1294 return rc;
1295
1296 imm_data = cmd->immediate_data;
1297 imm_data_len = cmd->first_burst_len;
1298 unsol_data = cmd->unsolicited_data;
1299
1300 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1301 if (rc < 0) {
1302 return 0;
1303 } else if (rc > 0) {
1304 dump_payload = true;
1305 goto sequence_cmd;
1306 }
1307
1308 if (!imm_data)
1309 return 0;
1310
1311 sg = &cmd->se_cmd.t_data_sg[0];
1312 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1313
24f412dd 1314 isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
4c22e07f 1315 sg, sg_nents, &rx_desc->data[0], imm_data_len);
b8d26b3b
NB
1316
1317 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
1318
1319 cmd->write_data_done += imm_data_len;
1320
1321 if (cmd->write_data_done == cmd->se_cmd.data_length) {
1322 spin_lock_bh(&cmd->istate_lock);
1323 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1324 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1325 spin_unlock_bh(&cmd->istate_lock);
1326 }
1327
1328sequence_cmd:
561bf158 1329 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
b8d26b3b
NB
1330
1331 if (!rc && dump_payload == false && unsol_data)
1332 iscsit_set_unsoliticed_dataout(cmd);
6cc44a6f
NB
1333 else if (dump_payload && imm_data)
1334 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
b8d26b3b 1335
b8d26b3b
NB
1336 return 0;
1337}
1338
1339static int
1340isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1341 struct iser_rx_desc *rx_desc, unsigned char *buf)
1342{
1343 struct scatterlist *sg_start;
1344 struct iscsi_conn *conn = isert_conn->conn;
1345 struct iscsi_cmd *cmd = NULL;
1346 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1347 u32 unsol_data_len = ntoh24(hdr->dlength);
1348 int rc, sg_nents, sg_off, page_off;
1349
1350 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1351 if (rc < 0)
1352 return rc;
1353 else if (!cmd)
1354 return 0;
1355 /*
1356 * FIXME: Unexpected unsolicited_data out
1357 */
1358 if (!cmd->unsolicited_data) {
24f412dd 1359 isert_err("Received unexpected solicited data payload\n");
b8d26b3b
NB
1360 dump_stack();
1361 return -1;
1362 }
1363
4c22e07f
SG
1364 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1365 "write_data_done: %u, data_length: %u\n",
1366 unsol_data_len, cmd->write_data_done,
1367 cmd->se_cmd.data_length);
b8d26b3b
NB
1368
1369 sg_off = cmd->write_data_done / PAGE_SIZE;
1370 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1371 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1372 page_off = cmd->write_data_done % PAGE_SIZE;
1373 /*
1374 * FIXME: Non page-aligned unsolicited_data out
1375 */
1376 if (page_off) {
4c22e07f 1377 isert_err("unexpected non-page aligned data payload\n");
b8d26b3b
NB
1378 dump_stack();
1379 return -1;
1380 }
4c22e07f
SG
1381 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1382 "sg_nents: %u from %p %u\n", sg_start, sg_off,
1383 sg_nents, &rx_desc->data[0], unsol_data_len);
b8d26b3b
NB
1384
1385 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1386 unsol_data_len);
1387
1388 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1389 if (rc < 0)
1390 return rc;
1391
1392 return 0;
1393}
1394
778de368
NB
1395static int
1396isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
d703ce2f
NB
1397 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1398 unsigned char *buf)
778de368 1399{
778de368
NB
1400 struct iscsi_conn *conn = isert_conn->conn;
1401 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1402 int rc;
1403
1404 rc = iscsit_setup_nop_out(conn, cmd, hdr);
1405 if (rc < 0)
1406 return rc;
1407 /*
1408 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1409 */
1410
1411 return iscsit_process_nop_out(conn, cmd, hdr);
1412}
1413
adb54c29
NB
1414static int
1415isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
d703ce2f
NB
1416 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1417 struct iscsi_text *hdr)
adb54c29 1418{
adb54c29
NB
1419 struct iscsi_conn *conn = isert_conn->conn;
1420 u32 payload_length = ntoh24(hdr->dlength);
1421 int rc;
b44a2b67 1422 unsigned char *text_in = NULL;
adb54c29
NB
1423
1424 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1425 if (rc < 0)
1426 return rc;
1427
b44a2b67
SG
1428 if (payload_length) {
1429 text_in = kzalloc(payload_length, GFP_KERNEL);
1430 if (!text_in) {
1431 isert_err("Unable to allocate text_in of payload_length: %u\n",
1432 payload_length);
1433 return -ENOMEM;
1434 }
adb54c29
NB
1435 }
1436 cmd->text_in_ptr = text_in;
1437
1438 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1439
1440 return iscsit_process_text_cmd(conn, cmd, hdr);
1441}
1442
b8d26b3b
NB
1443static int
1444isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1445 uint32_t read_stag, uint64_t read_va,
1446 uint32_t write_stag, uint64_t write_va)
1447{
1448 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1449 struct iscsi_conn *conn = isert_conn->conn;
1450 struct iscsi_cmd *cmd;
1451 struct isert_cmd *isert_cmd;
1452 int ret = -EINVAL;
1453 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1454
fb140271 1455 if (conn->sess->sess_ops->SessionType &&
ca40d24e 1456 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
24f412dd 1457 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
4c22e07f 1458 " ignoring\n", opcode);
ca40d24e
NB
1459 return 0;
1460 }
1461
b8d26b3b
NB
1462 switch (opcode) {
1463 case ISCSI_OP_SCSI_CMD:
676687c6 1464 cmd = isert_allocate_cmd(conn);
b8d26b3b
NB
1465 if (!cmd)
1466 break;
1467
d703ce2f 1468 isert_cmd = iscsit_priv_cmd(cmd);
b8d26b3b
NB
1469 isert_cmd->read_stag = read_stag;
1470 isert_cmd->read_va = read_va;
1471 isert_cmd->write_stag = write_stag;
1472 isert_cmd->write_va = write_va;
1473
d703ce2f 1474 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
b8d26b3b
NB
1475 rx_desc, (unsigned char *)hdr);
1476 break;
1477 case ISCSI_OP_NOOP_OUT:
676687c6 1478 cmd = isert_allocate_cmd(conn);
b8d26b3b
NB
1479 if (!cmd)
1480 break;
1481
d703ce2f
NB
1482 isert_cmd = iscsit_priv_cmd(cmd);
1483 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
778de368 1484 rx_desc, (unsigned char *)hdr);
b8d26b3b
NB
1485 break;
1486 case ISCSI_OP_SCSI_DATA_OUT:
1487 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1488 (unsigned char *)hdr);
1489 break;
1490 case ISCSI_OP_SCSI_TMFUNC:
676687c6 1491 cmd = isert_allocate_cmd(conn);
b8d26b3b
NB
1492 if (!cmd)
1493 break;
1494
1495 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1496 (unsigned char *)hdr);
1497 break;
1498 case ISCSI_OP_LOGOUT:
676687c6 1499 cmd = isert_allocate_cmd(conn);
b8d26b3b
NB
1500 if (!cmd)
1501 break;
1502
1503 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
b8d26b3b 1504 break;
adb54c29 1505 case ISCSI_OP_TEXT:
e4f4e801
SG
1506 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) {
1507 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
1508 if (!cmd)
1509 break;
1510 } else {
1511 cmd = isert_allocate_cmd(conn);
1512 if (!cmd)
1513 break;
1514 }
adb54c29 1515
d703ce2f
NB
1516 isert_cmd = iscsit_priv_cmd(cmd);
1517 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
adb54c29
NB
1518 rx_desc, (struct iscsi_text *)hdr);
1519 break;
b8d26b3b 1520 default:
24f412dd 1521 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
b8d26b3b
NB
1522 dump_stack();
1523 break;
1524 }
1525
1526 return ret;
1527}
1528
1529static void
1530isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1531{
1532 struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1533 uint64_t read_va = 0, write_va = 0;
1534 uint32_t read_stag = 0, write_stag = 0;
1535 int rc;
1536
1537 switch (iser_hdr->flags & 0xF0) {
1538 case ISCSI_CTRL:
1539 if (iser_hdr->flags & ISER_RSV) {
1540 read_stag = be32_to_cpu(iser_hdr->read_stag);
1541 read_va = be64_to_cpu(iser_hdr->read_va);
4c22e07f
SG
1542 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1543 read_stag, (unsigned long long)read_va);
b8d26b3b
NB
1544 }
1545 if (iser_hdr->flags & ISER_WSV) {
1546 write_stag = be32_to_cpu(iser_hdr->write_stag);
1547 write_va = be64_to_cpu(iser_hdr->write_va);
4c22e07f
SG
1548 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1549 write_stag, (unsigned long long)write_va);
b8d26b3b
NB
1550 }
1551
24f412dd 1552 isert_dbg("ISER ISCSI_CTRL PDU\n");
b8d26b3b
NB
1553 break;
1554 case ISER_HELLO:
24f412dd 1555 isert_err("iSER Hello message\n");
b8d26b3b
NB
1556 break;
1557 default:
24f412dd 1558 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
b8d26b3b
NB
1559 break;
1560 }
1561
1562 rc = isert_rx_opcode(isert_conn, rx_desc,
1563 read_stag, read_va, write_stag, write_va);
1564}
1565
1566static void
7748681b
SG
1567isert_rcv_completion(struct iser_rx_desc *desc,
1568 struct isert_conn *isert_conn,
1569 u32 xfer_len)
b8d26b3b
NB
1570{
1571 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1572 struct iscsi_hdr *hdr;
1573 u64 rx_dma;
1574 int rx_buflen, outstanding;
1575
1576 if ((char *)desc == isert_conn->login_req_buf) {
1577 rx_dma = isert_conn->login_req_dma;
1578 rx_buflen = ISER_RX_LOGIN_SIZE;
4c22e07f 1579 isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
b8d26b3b
NB
1580 rx_dma, rx_buflen);
1581 } else {
1582 rx_dma = desc->dma_addr;
1583 rx_buflen = ISER_RX_PAYLOAD_SIZE;
4c22e07f 1584 isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
b8d26b3b
NB
1585 rx_dma, rx_buflen);
1586 }
1587
1588 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1589
1590 hdr = &desc->iscsi_header;
24f412dd 1591 isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
b8d26b3b
NB
1592 hdr->opcode, hdr->itt, hdr->flags,
1593 (int)(xfer_len - ISER_HEADERS_LEN));
1594
2371e5da
SG
1595 if ((char *)desc == isert_conn->login_req_buf) {
1596 isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
1597 if (isert_conn->conn) {
1598 struct iscsi_login *login = isert_conn->conn->conn_login;
1599
1600 if (login && !login->first_request)
1601 isert_rx_login_req(isert_conn);
1602 }
1603 mutex_lock(&isert_conn->conn_mutex);
1604 complete(&isert_conn->login_req_comp);
1605 mutex_unlock(&isert_conn->conn_mutex);
1606 } else {
b8d26b3b 1607 isert_rx_do_work(desc, isert_conn);
2371e5da 1608 }
b8d26b3b
NB
1609
1610 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1611 DMA_FROM_DEVICE);
1612
1613 isert_conn->post_recv_buf_count--;
4c22e07f
SG
1614 isert_dbg("Decremented post_recv_buf_count: %d\n",
1615 isert_conn->post_recv_buf_count);
b8d26b3b
NB
1616
1617 if ((char *)desc == isert_conn->login_req_buf)
1618 return;
1619
1620 outstanding = isert_conn->post_recv_buf_count;
1621 if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1622 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1623 ISERT_MIN_POSTED_RX);
1624 err = isert_post_recv(isert_conn, count);
1625 if (err) {
24f412dd 1626 isert_err("isert_post_recv() count: %d failed, %d\n",
b8d26b3b
NB
1627 count, err);
1628 }
1629 }
1630}
1631
e3d7e4c3
SG
1632static int
1633isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1634 struct scatterlist *sg, u32 nents, u32 length, u32 offset,
1635 enum iser_ib_op_code op, struct isert_data_buf *data)
1636{
1637 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1638
1639 data->dma_dir = op == ISER_IB_RDMA_WRITE ?
1640 DMA_TO_DEVICE : DMA_FROM_DEVICE;
1641
1642 data->len = length - offset;
1643 data->offset = offset;
1644 data->sg_off = data->offset / PAGE_SIZE;
1645
1646 data->sg = &sg[data->sg_off];
1647 data->nents = min_t(unsigned int, nents - data->sg_off,
1648 ISCSI_ISER_SG_TABLESIZE);
1649 data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
1650 PAGE_SIZE);
1651
1652 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
1653 data->dma_dir);
1654 if (unlikely(!data->dma_nents)) {
24f412dd 1655 isert_err("Cmd: unable to dma map SGs %p\n", sg);
e3d7e4c3
SG
1656 return -EINVAL;
1657 }
1658
24f412dd 1659 isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
4c22e07f 1660 isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
e3d7e4c3
SG
1661
1662 return 0;
1663}
1664
1665static void
1666isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
1667{
1668 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1669
1670 ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
1671 memset(data, 0, sizeof(*data));
1672}
1673
1674
1675
b8d26b3b
NB
1676static void
1677isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1678{
1679 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
b8d26b3b 1680
4c22e07f 1681 isert_dbg("Cmd %p\n", isert_cmd);
e3d7e4c3
SG
1682
1683 if (wr->data.sg) {
4c22e07f 1684 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
e3d7e4c3 1685 isert_unmap_data_buf(isert_conn, &wr->data);
b8d26b3b
NB
1686 }
1687
90ecc6e2 1688 if (wr->send_wr) {
4c22e07f 1689 isert_dbg("Cmd %p free send_wr\n", isert_cmd);
90ecc6e2
VP
1690 kfree(wr->send_wr);
1691 wr->send_wr = NULL;
1692 }
b8d26b3b 1693
90ecc6e2 1694 if (wr->ib_sge) {
4c22e07f 1695 isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
90ecc6e2
VP
1696 kfree(wr->ib_sge);
1697 wr->ib_sge = NULL;
1698 }
b8d26b3b
NB
1699}
1700
59464ef4 1701static void
a3a5a826 1702isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
59464ef4
VP
1703{
1704 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
59464ef4 1705
4c22e07f 1706 isert_dbg("Cmd %p\n", isert_cmd);
59464ef4
VP
1707
1708 if (wr->fr_desc) {
4c22e07f 1709 isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, wr->fr_desc);
9e961ae7
SG
1710 if (wr->fr_desc->ind & ISERT_PROTECTED) {
1711 isert_unmap_data_buf(isert_conn, &wr->prot);
1712 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1713 }
59464ef4 1714 spin_lock_bh(&isert_conn->conn_lock);
a3a5a826 1715 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
59464ef4
VP
1716 spin_unlock_bh(&isert_conn->conn_lock);
1717 wr->fr_desc = NULL;
1718 }
1719
e3d7e4c3 1720 if (wr->data.sg) {
4c22e07f 1721 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
e3d7e4c3 1722 isert_unmap_data_buf(isert_conn, &wr->data);
59464ef4
VP
1723 }
1724
1725 wr->ib_sge = NULL;
1726 wr->send_wr = NULL;
1727}
1728
b8d26b3b 1729static void
03e7848a 1730isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
b8d26b3b 1731{
d703ce2f 1732 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
b8d26b3b 1733 struct isert_conn *isert_conn = isert_cmd->conn;
186a9647 1734 struct iscsi_conn *conn = isert_conn->conn;
d40945d8 1735 struct isert_device *device = isert_conn->conn_device;
e4f4e801 1736 struct iscsi_text_rsp *hdr;
b8d26b3b 1737
4c22e07f 1738 isert_dbg("Cmd %p\n", isert_cmd);
b8d26b3b
NB
1739
1740 switch (cmd->iscsi_opcode) {
1741 case ISCSI_OP_SCSI_CMD:
b8d26b3b
NB
1742 spin_lock_bh(&conn->cmd_lock);
1743 if (!list_empty(&cmd->i_conn_node))
5159d763 1744 list_del_init(&cmd->i_conn_node);
b8d26b3b
NB
1745 spin_unlock_bh(&conn->cmd_lock);
1746
03e7848a 1747 if (cmd->data_direction == DMA_TO_DEVICE) {
b8d26b3b 1748 iscsit_stop_dataout_timer(cmd);
03e7848a
NB
1749 /*
1750 * Check for special case during comp_err where
1751 * WRITE_PENDING has been handed off from core,
1752 * but requires an extra target_put_sess_cmd()
1753 * before transport_generic_free_cmd() below.
1754 */
1755 if (comp_err &&
1756 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1757 struct se_cmd *se_cmd = &cmd->se_cmd;
1758
1759 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1760 }
1761 }
b8d26b3b 1762
d40945d8 1763 device->unreg_rdma_mem(isert_cmd, isert_conn);
186a9647
NB
1764 transport_generic_free_cmd(&cmd->se_cmd, 0);
1765 break;
b8d26b3b 1766 case ISCSI_OP_SCSI_TMFUNC:
186a9647
NB
1767 spin_lock_bh(&conn->cmd_lock);
1768 if (!list_empty(&cmd->i_conn_node))
5159d763 1769 list_del_init(&cmd->i_conn_node);
186a9647
NB
1770 spin_unlock_bh(&conn->cmd_lock);
1771
b8d26b3b
NB
1772 transport_generic_free_cmd(&cmd->se_cmd, 0);
1773 break;
1774 case ISCSI_OP_REJECT:
1775 case ISCSI_OP_NOOP_OUT:
adb54c29 1776 case ISCSI_OP_TEXT:
e4f4e801
SG
1777 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1778 /* If the continue bit is on, keep the command alive */
1779 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
1780 break;
1781
b8d26b3b
NB
1782 spin_lock_bh(&conn->cmd_lock);
1783 if (!list_empty(&cmd->i_conn_node))
5159d763 1784 list_del_init(&cmd->i_conn_node);
b8d26b3b
NB
1785 spin_unlock_bh(&conn->cmd_lock);
1786
1787 /*
1788 * Handle special case for REJECT when iscsi_add_reject*() has
1789 * overwritten the original iscsi_opcode assignment, and the
1790 * associated cmd->se_cmd needs to be released.
1791 */
1792 if (cmd->se_cmd.se_tfo != NULL) {
11378cdb 1793 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
3df8f68a 1794 cmd->iscsi_opcode);
b8d26b3b
NB
1795 transport_generic_free_cmd(&cmd->se_cmd, 0);
1796 break;
1797 }
1798 /*
1799 * Fall-through
1800 */
1801 default:
d703ce2f 1802 iscsit_release_cmd(cmd);
b8d26b3b
NB
1803 break;
1804 }
1805}
1806
1807static void
1808isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1809{
1810 if (tx_desc->dma_addr != 0) {
4c22e07f 1811 isert_dbg("unmap single for tx_desc->dma_addr\n");
b8d26b3b
NB
1812 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1813 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1814 tx_desc->dma_addr = 0;
1815 }
1816}
1817
1818static void
1819isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
03e7848a 1820 struct ib_device *ib_dev, bool comp_err)
b8d26b3b 1821{
dbbc5d11 1822 if (isert_cmd->pdu_buf_dma != 0) {
4c22e07f 1823 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
dbbc5d11
NB
1824 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1825 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1826 isert_cmd->pdu_buf_dma = 0;
b8d26b3b
NB
1827 }
1828
1829 isert_unmap_tx_desc(tx_desc, ib_dev);
03e7848a 1830 isert_put_cmd(isert_cmd, comp_err);
b8d26b3b
NB
1831}
1832
96b7973e
SG
1833static int
1834isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1835{
1836 struct ib_mr_status mr_status;
1837 int ret;
1838
1839 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1840 if (ret) {
24f412dd 1841 isert_err("ib_check_mr_status failed, ret %d\n", ret);
96b7973e
SG
1842 goto fail_mr_status;
1843 }
1844
1845 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1846 u64 sec_offset_err;
1847 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1848
1849 switch (mr_status.sig_err.err_type) {
1850 case IB_SIG_BAD_GUARD:
1851 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1852 break;
1853 case IB_SIG_BAD_REFTAG:
1854 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1855 break;
1856 case IB_SIG_BAD_APPTAG:
1857 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1858 break;
1859 }
1860 sec_offset_err = mr_status.sig_err.sig_err_offset;
1861 do_div(sec_offset_err, block_size);
1862 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1863
4c22e07f
SG
1864 isert_err("PI error found type %d at sector 0x%llx "
1865 "expected 0x%x vs actual 0x%x\n",
1866 mr_status.sig_err.err_type,
1867 (unsigned long long)se_cmd->bad_sector,
1868 mr_status.sig_err.expected,
1869 mr_status.sig_err.actual);
96b7973e
SG
1870 ret = 1;
1871 }
1872
1873fail_mr_status:
1874 return ret;
1875}
1876
f93f3a70
SG
1877static void
1878isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
1879 struct isert_cmd *isert_cmd)
1880{
9e961ae7 1881 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
f93f3a70 1882 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
9e961ae7 1883 struct se_cmd *se_cmd = &cmd->se_cmd;
f93f3a70
SG
1884 struct isert_conn *isert_conn = isert_cmd->conn;
1885 struct isert_device *device = isert_conn->conn_device;
9e961ae7
SG
1886 int ret = 0;
1887
1888 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
96b7973e
SG
1889 ret = isert_check_pi_status(se_cmd,
1890 wr->fr_desc->pi_ctx->sig_mr);
1891 wr->fr_desc->ind &= ~ISERT_PROTECTED;
9e961ae7 1892 }
f93f3a70
SG
1893
1894 device->unreg_rdma_mem(isert_cmd, isert_conn);
897bb2c9 1895 wr->send_wr_num = 0;
9e961ae7
SG
1896 if (ret)
1897 transport_send_check_condition_and_sense(se_cmd,
1898 se_cmd->pi_err, 0);
1899 else
1900 isert_put_response(isert_conn->conn, cmd);
f93f3a70
SG
1901}
1902
b8d26b3b
NB
1903static void
1904isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1905 struct isert_cmd *isert_cmd)
1906{
1907 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
d703ce2f 1908 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
b8d26b3b 1909 struct se_cmd *se_cmd = &cmd->se_cmd;
90ecc6e2 1910 struct isert_conn *isert_conn = isert_cmd->conn;
d40945d8 1911 struct isert_device *device = isert_conn->conn_device;
5bac4b1a 1912 int ret = 0;
b8d26b3b 1913
9e961ae7 1914 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
96b7973e
SG
1915 ret = isert_check_pi_status(se_cmd,
1916 wr->fr_desc->pi_ctx->sig_mr);
1917 wr->fr_desc->ind &= ~ISERT_PROTECTED;
9e961ae7
SG
1918 }
1919
b8d26b3b 1920 iscsit_stop_dataout_timer(cmd);
d40945d8 1921 device->unreg_rdma_mem(isert_cmd, isert_conn);
e3d7e4c3 1922 cmd->write_data_done = wr->data.len;
b6b87a1d 1923 wr->send_wr_num = 0;
b8d26b3b 1924
24f412dd 1925 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
b8d26b3b
NB
1926 spin_lock_bh(&cmd->istate_lock);
1927 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1928 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1929 spin_unlock_bh(&cmd->istate_lock);
1930
364189f0
SG
1931 if (ret) {
1932 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
5bac4b1a
SG
1933 transport_send_check_condition_and_sense(se_cmd,
1934 se_cmd->pi_err, 0);
364189f0 1935 } else {
5bac4b1a 1936 target_execute_cmd(se_cmd);
364189f0 1937 }
b8d26b3b
NB
1938}
1939
1940static void
1941isert_do_control_comp(struct work_struct *work)
1942{
1943 struct isert_cmd *isert_cmd = container_of(work,
1944 struct isert_cmd, comp_work);
1945 struct isert_conn *isert_conn = isert_cmd->conn;
1946 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
d703ce2f 1947 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
b8d26b3b 1948
4c22e07f
SG
1949 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
1950
b8d26b3b
NB
1951 switch (cmd->i_state) {
1952 case ISTATE_SEND_TASKMGTRSP:
b8d26b3b 1953 iscsit_tmr_post_handler(cmd, cmd->conn);
10633c37
SG
1954 case ISTATE_SEND_REJECT: /* FALLTHRU */
1955 case ISTATE_SEND_TEXTRSP: /* FALLTHRU */
b8d26b3b 1956 cmd->i_state = ISTATE_SENT_STATUS;
4c22e07f
SG
1957 isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
1958 ib_dev, false);
3df8f68a 1959 break;
b8d26b3b 1960 case ISTATE_SEND_LOGOUTRSP:
b8d26b3b
NB
1961 iscsit_logout_post_handler(cmd, cmd->conn);
1962 break;
1963 default:
4c22e07f 1964 isert_err("Unknown i_state %d\n", cmd->i_state);
b8d26b3b
NB
1965 dump_stack();
1966 break;
1967 }
1968}
1969
1970static void
1971isert_response_completion(struct iser_tx_desc *tx_desc,
1972 struct isert_cmd *isert_cmd,
1973 struct isert_conn *isert_conn,
1974 struct ib_device *ib_dev)
1975{
d703ce2f 1976 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
b8d26b3b
NB
1977
1978 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
3df8f68a 1979 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
adb54c29
NB
1980 cmd->i_state == ISTATE_SEND_REJECT ||
1981 cmd->i_state == ISTATE_SEND_TEXTRSP) {
b8d26b3b
NB
1982 isert_unmap_tx_desc(tx_desc, ib_dev);
1983
1984 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1985 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1986 return;
1987 }
897bb2c9 1988
b8d26b3b 1989 cmd->i_state = ISTATE_SENT_STATUS;
03e7848a 1990 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
b8d26b3b
NB
1991}
1992
1993static void
7748681b 1994isert_snd_completion(struct iser_tx_desc *tx_desc,
68a86dee 1995 struct isert_conn *isert_conn)
b8d26b3b
NB
1996{
1997 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1998 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1999 struct isert_rdma_wr *wr;
2000
2001 if (!isert_cmd) {
b8d26b3b
NB
2002 isert_unmap_tx_desc(tx_desc, ib_dev);
2003 return;
2004 }
2005 wr = &isert_cmd->rdma_wr;
2006
4c22e07f
SG
2007 isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op);
2008
b8d26b3b 2009 switch (wr->iser_ib_op) {
b8d26b3b 2010 case ISER_IB_SEND:
b8d26b3b
NB
2011 isert_response_completion(tx_desc, isert_cmd,
2012 isert_conn, ib_dev);
2013 break;
2014 case ISER_IB_RDMA_WRITE:
f93f3a70 2015 isert_completion_rdma_write(tx_desc, isert_cmd);
b8d26b3b
NB
2016 break;
2017 case ISER_IB_RDMA_READ:
b8d26b3b
NB
2018 isert_completion_rdma_read(tx_desc, isert_cmd);
2019 break;
2020 default:
4c22e07f 2021 isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr->iser_ib_op);
b8d26b3b
NB
2022 dump_stack();
2023 break;
2024 }
2025}
2026
6f0fae3d
SG
2027/**
2028 * is_isert_tx_desc() - Indicate if the completion wr_id
2029 * is a TX descriptor or not.
2030 * @isert_conn: iser connection
2031 * @wr_id: completion WR identifier
2032 *
2033 * Since we cannot rely on wc opcode in FLUSH errors
2034 * we must work around it by checking if the wr_id address
2035 * falls in the iser connection rx_descs buffer. If so
2036 * it is an RX descriptor, otherwize it is a TX.
2037 */
2038static inline bool
2039is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
2040{
2041 void *start = isert_conn->conn_rx_descs;
2042 int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->conn_rx_descs);
2043
2044 if (wr_id >= start && wr_id < start + len)
2045 return false;
2046
2047 return true;
2048}
2049
b8d26b3b 2050static void
6f0fae3d 2051isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
b8d26b3b 2052{
bdf20e72 2053 if (wc->wr_id == ISER_BEACON_WRID) {
24f412dd 2054 isert_info("conn %p completing conn_wait_comp_err\n",
bdf20e72
SG
2055 isert_conn);
2056 complete(&isert_conn->conn_wait_comp_err);
ed4520ae 2057 } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) {
df43debd
SG
2058 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2059 struct isert_cmd *isert_cmd;
6f0fae3d 2060 struct iser_tx_desc *desc;
df43debd 2061
6f0fae3d
SG
2062 desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
2063 isert_cmd = desc->isert_cmd;
df43debd
SG
2064 if (!isert_cmd)
2065 isert_unmap_tx_desc(desc, ib_dev);
2066 else
2067 isert_completion_put(desc, isert_cmd, ib_dev, true);
df43debd
SG
2068 } else {
2069 isert_conn->post_recv_buf_count--;
bdf20e72
SG
2070 if (!isert_conn->post_recv_buf_count)
2071 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
df43debd 2072 }
b8d26b3b
NB
2073}
2074
2075static void
6f0fae3d 2076isert_handle_wc(struct ib_wc *wc)
b8d26b3b 2077{
b8d26b3b
NB
2078 struct isert_conn *isert_conn;
2079 struct iser_tx_desc *tx_desc;
6f0fae3d 2080 struct iser_rx_desc *rx_desc;
b8d26b3b 2081
6f0fae3d
SG
2082 isert_conn = wc->qp->qp_context;
2083 if (likely(wc->status == IB_WC_SUCCESS)) {
2084 if (wc->opcode == IB_WC_RECV) {
2085 rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
7748681b 2086 isert_rcv_completion(rx_desc, isert_conn, wc->byte_len);
b8d26b3b 2087 } else {
6f0fae3d 2088 tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
7748681b 2089 isert_snd_completion(tx_desc, isert_conn);
b8d26b3b 2090 }
6f0fae3d
SG
2091 } else {
2092 if (wc->status != IB_WC_WR_FLUSH_ERR)
24f412dd 2093 isert_err("wr id %llx status %d vend_err %x\n",
6f0fae3d
SG
2094 wc->wr_id, wc->status, wc->vendor_err);
2095 else
24f412dd 2096 isert_dbg("flush error: wr id %llx\n", wc->wr_id);
b8d26b3b 2097
6f0fae3d
SG
2098 if (wc->wr_id != ISER_FASTREG_LI_WRID)
2099 isert_cq_comp_err(isert_conn, wc);
2100 }
b8d26b3b
NB
2101}
2102
2103static void
6f0fae3d 2104isert_cq_work(struct work_struct *work)
b8d26b3b 2105{
37d9fe80 2106 enum { isert_poll_budget = 65536 };
4a295bae 2107 struct isert_comp *comp = container_of(work, struct isert_comp,
6f0fae3d 2108 work);
36ea63b5
SG
2109 struct ib_wc *const wcs = comp->wcs;
2110 int i, n, completed = 0;
b8d26b3b 2111
36ea63b5
SG
2112 while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
2113 for (i = 0; i < n; i++)
2114 isert_handle_wc(&wcs[i]);
b8d26b3b 2115
36ea63b5
SG
2116 completed += n;
2117 if (completed >= isert_poll_budget)
37d9fe80
SG
2118 break;
2119 }
2120
6f0fae3d 2121 ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
b8d26b3b
NB
2122}
2123
2124static void
6f0fae3d 2125isert_cq_callback(struct ib_cq *cq, void *context)
b8d26b3b 2126{
4a295bae 2127 struct isert_comp *comp = context;
b8d26b3b 2128
6f0fae3d 2129 queue_work(isert_comp_wq, &comp->work);
b8d26b3b
NB
2130}
2131
2132static int
2133isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
2134{
2135 struct ib_send_wr *wr_failed;
2136 int ret;
2137
b8d26b3b
NB
2138 ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
2139 &wr_failed);
2140 if (ret) {
24f412dd 2141 isert_err("ib_post_send failed with %d\n", ret);
b8d26b3b
NB
2142 return ret;
2143 }
2144 return ret;
2145}
2146
2147static int
2148isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2149{
d703ce2f 2150 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
6700425e 2151 struct isert_conn *isert_conn = conn->context;
b8d26b3b
NB
2152 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2153 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
2154 &isert_cmd->tx_desc.iscsi_header;
2155
2156 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2157 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
2158 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2159 /*
2160 * Attach SENSE DATA payload to iSCSI Response PDU
2161 */
2162 if (cmd->se_cmd.sense_buffer &&
2163 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
2164 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
67cb3949
SG
2165 struct isert_device *device = isert_conn->conn_device;
2166 struct ib_device *ib_dev = device->ib_device;
b8d26b3b 2167 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
dbbc5d11 2168 u32 padding, pdu_len;
b8d26b3b
NB
2169
2170 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
2171 cmd->sense_buffer);
2172 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
2173
2174 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
2175 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
dbbc5d11 2176 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
b8d26b3b 2177
dbbc5d11
NB
2178 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2179 (void *)cmd->sense_buffer, pdu_len,
b8d26b3b
NB
2180 DMA_TO_DEVICE);
2181
dbbc5d11
NB
2182 isert_cmd->pdu_buf_len = pdu_len;
2183 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2184 tx_dsg->length = pdu_len;
67cb3949 2185 tx_dsg->lkey = device->mr->lkey;
b8d26b3b
NB
2186 isert_cmd->tx_desc.num_sge = 2;
2187 }
2188
68a86dee 2189 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
b8d26b3b 2190
4c22e07f 2191 isert_dbg("Posting SCSI Response\n");
b8d26b3b
NB
2192
2193 return isert_post_response(isert_conn, isert_cmd);
2194}
2195
131e6abc
NB
2196static void
2197isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2198{
2199 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
6700425e 2200 struct isert_conn *isert_conn = conn->context;
131e6abc
NB
2201 struct isert_device *device = isert_conn->conn_device;
2202
2203 spin_lock_bh(&conn->cmd_lock);
2204 if (!list_empty(&cmd->i_conn_node))
2205 list_del_init(&cmd->i_conn_node);
2206 spin_unlock_bh(&conn->cmd_lock);
2207
2208 if (cmd->data_direction == DMA_TO_DEVICE)
2209 iscsit_stop_dataout_timer(cmd);
2210
2211 device->unreg_rdma_mem(isert_cmd, isert_conn);
2212}
2213
e70beee7
NB
2214static enum target_prot_op
2215isert_get_sup_prot_ops(struct iscsi_conn *conn)
2216{
6700425e 2217 struct isert_conn *isert_conn = conn->context;
e70beee7
NB
2218 struct isert_device *device = isert_conn->conn_device;
2219
23a548ee
SG
2220 if (conn->tpg->tpg_attrib.t10_pi) {
2221 if (device->pi_capable) {
24f412dd 2222 isert_info("conn %p PI offload enabled\n", isert_conn);
23a548ee
SG
2223 isert_conn->pi_support = true;
2224 return TARGET_PROT_ALL;
2225 }
2226 }
2227
24f412dd 2228 isert_info("conn %p PI offload disabled\n", isert_conn);
23a548ee 2229 isert_conn->pi_support = false;
e70beee7
NB
2230
2231 return TARGET_PROT_NORMAL;
2232}
2233
b8d26b3b
NB
2234static int
2235isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2236 bool nopout_response)
2237{
d703ce2f 2238 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
6700425e 2239 struct isert_conn *isert_conn = conn->context;
b8d26b3b
NB
2240 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2241
2242 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2243 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
2244 &isert_cmd->tx_desc.iscsi_header,
2245 nopout_response);
2246 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
68a86dee 2247 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
b8d26b3b 2248
4c22e07f 2249 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn);
b8d26b3b
NB
2250
2251 return isert_post_response(isert_conn, isert_cmd);
2252}
2253
2254static int
2255isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2256{
d703ce2f 2257 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
6700425e 2258 struct isert_conn *isert_conn = conn->context;
b8d26b3b
NB
2259 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2260
2261 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2262 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
2263 &isert_cmd->tx_desc.iscsi_header);
2264 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
68a86dee 2265 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
b8d26b3b 2266
4c22e07f 2267 isert_dbg("conn %p Posting Logout Response\n", isert_conn);
b8d26b3b
NB
2268
2269 return isert_post_response(isert_conn, isert_cmd);
2270}
2271
2272static int
2273isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2274{
d703ce2f 2275 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
6700425e 2276 struct isert_conn *isert_conn = conn->context;
b8d26b3b
NB
2277 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2278
2279 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2280 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
2281 &isert_cmd->tx_desc.iscsi_header);
2282 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
68a86dee 2283 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
b8d26b3b 2284
4c22e07f 2285 isert_dbg("conn %p Posting Task Management Response\n", isert_conn);
b8d26b3b
NB
2286
2287 return isert_post_response(isert_conn, isert_cmd);
2288}
2289
2290static int
2291isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2292{
d703ce2f 2293 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
6700425e 2294 struct isert_conn *isert_conn = conn->context;
b8d26b3b 2295 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
67cb3949
SG
2296 struct isert_device *device = isert_conn->conn_device;
2297 struct ib_device *ib_dev = device->ib_device;
3df8f68a
NB
2298 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2299 struct iscsi_reject *hdr =
2300 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
b8d26b3b
NB
2301
2302 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
3df8f68a 2303 iscsit_build_reject(cmd, conn, hdr);
b8d26b3b 2304 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
3df8f68a
NB
2305
2306 hton24(hdr->dlength, ISCSI_HDR_LEN);
dbbc5d11 2307 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
3df8f68a
NB
2308 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
2309 DMA_TO_DEVICE);
dbbc5d11
NB
2310 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
2311 tx_dsg->addr = isert_cmd->pdu_buf_dma;
3df8f68a 2312 tx_dsg->length = ISCSI_HDR_LEN;
67cb3949 2313 tx_dsg->lkey = device->mr->lkey;
3df8f68a
NB
2314 isert_cmd->tx_desc.num_sge = 2;
2315
68a86dee 2316 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
b8d26b3b 2317
4c22e07f 2318 isert_dbg("conn %p Posting Reject\n", isert_conn);
b8d26b3b
NB
2319
2320 return isert_post_response(isert_conn, isert_cmd);
2321}
2322
adb54c29
NB
2323static int
2324isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2325{
d703ce2f 2326 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
6700425e 2327 struct isert_conn *isert_conn = conn->context;
adb54c29
NB
2328 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2329 struct iscsi_text_rsp *hdr =
2330 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
2331 u32 txt_rsp_len;
2332 int rc;
2333
2334 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
22c7aaa5 2335 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
adb54c29
NB
2336 if (rc < 0)
2337 return rc;
2338
2339 txt_rsp_len = rc;
2340 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2341
2342 if (txt_rsp_len) {
67cb3949
SG
2343 struct isert_device *device = isert_conn->conn_device;
2344 struct ib_device *ib_dev = device->ib_device;
adb54c29
NB
2345 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2346 void *txt_rsp_buf = cmd->buf_ptr;
2347
2348 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2349 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
2350
2351 isert_cmd->pdu_buf_len = txt_rsp_len;
2352 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2353 tx_dsg->length = txt_rsp_len;
67cb3949 2354 tx_dsg->lkey = device->mr->lkey;
adb54c29
NB
2355 isert_cmd->tx_desc.num_sge = 2;
2356 }
68a86dee 2357 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
adb54c29 2358
f64d2792 2359 isert_dbg("conn %p Text Response\n", isert_conn);
adb54c29
NB
2360
2361 return isert_post_response(isert_conn, isert_cmd);
2362}
2363
b8d26b3b
NB
2364static int
2365isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2366 struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
2367 u32 data_left, u32 offset)
2368{
d703ce2f 2369 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
b8d26b3b 2370 struct scatterlist *sg_start, *tmp_sg;
67cb3949
SG
2371 struct isert_device *device = isert_conn->conn_device;
2372 struct ib_device *ib_dev = device->ib_device;
b8d26b3b
NB
2373 u32 sg_off, page_off;
2374 int i = 0, sg_nents;
2375
2376 sg_off = offset / PAGE_SIZE;
2377 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2378 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
2379 page_off = offset % PAGE_SIZE;
2380
2381 send_wr->sg_list = ib_sge;
2382 send_wr->num_sge = sg_nents;
b0a191e7 2383 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
b8d26b3b
NB
2384 /*
2385 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2386 */
2387 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
4c22e07f
SG
2388 isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, "
2389 "page_off: %u\n",
2390 (unsigned long long)tmp_sg->dma_address,
2391 tmp_sg->length, page_off);
b8d26b3b
NB
2392
2393 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
2394 ib_sge->length = min_t(u32, data_left,
2395 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
67cb3949 2396 ib_sge->lkey = device->mr->lkey;
b8d26b3b 2397
4c22e07f
SG
2398 isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
2399 ib_sge->addr, ib_sge->length, ib_sge->lkey);
b8d26b3b
NB
2400 page_off = 0;
2401 data_left -= ib_sge->length;
2402 ib_sge++;
24f412dd 2403 isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
b8d26b3b
NB
2404 }
2405
24f412dd 2406 isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
4c22e07f 2407 send_wr->sg_list, send_wr->num_sge);
b8d26b3b
NB
2408
2409 return sg_nents;
2410}
2411
2412static int
90ecc6e2
VP
2413isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2414 struct isert_rdma_wr *wr)
b8d26b3b
NB
2415{
2416 struct se_cmd *se_cmd = &cmd->se_cmd;
d703ce2f 2417 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
6700425e 2418 struct isert_conn *isert_conn = conn->context;
e3d7e4c3 2419 struct isert_data_buf *data = &wr->data;
90ecc6e2 2420 struct ib_send_wr *send_wr;
b8d26b3b 2421 struct ib_sge *ib_sge;
e3d7e4c3
SG
2422 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
2423 int ret = 0, i, ib_sge_cnt;
90ecc6e2 2424
e3d7e4c3 2425 isert_cmd->tx_desc.isert_cmd = isert_cmd;
b8d26b3b 2426
e3d7e4c3
SG
2427 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2428 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2429 se_cmd->t_data_nents, se_cmd->data_length,
2430 offset, wr->iser_ib_op, &wr->data);
2431 if (ret)
2432 return ret;
b8d26b3b 2433
e3d7e4c3
SG
2434 data_left = data->len;
2435 offset = data->offset;
b8d26b3b 2436
e3d7e4c3 2437 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
b8d26b3b 2438 if (!ib_sge) {
24f412dd 2439 isert_warn("Unable to allocate ib_sge\n");
b8d26b3b 2440 ret = -ENOMEM;
e3d7e4c3 2441 goto unmap_cmd;
b8d26b3b 2442 }
90ecc6e2 2443 wr->ib_sge = ib_sge;
b8d26b3b 2444
e3d7e4c3 2445 wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
b8d26b3b
NB
2446 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2447 GFP_KERNEL);
2448 if (!wr->send_wr) {
24f412dd 2449 isert_dbg("Unable to allocate wr->send_wr\n");
b8d26b3b 2450 ret = -ENOMEM;
e3d7e4c3 2451 goto unmap_cmd;
b8d26b3b 2452 }
b8d26b3b
NB
2453
2454 wr->isert_cmd = isert_cmd;
2455 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
b8d26b3b
NB
2456
2457 for (i = 0; i < wr->send_wr_num; i++) {
2458 send_wr = &isert_cmd->rdma_wr.send_wr[i];
2459 data_len = min(data_left, rdma_write_max);
2460
b8d26b3b 2461 send_wr->send_flags = 0;
90ecc6e2
VP
2462 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2463 send_wr->opcode = IB_WR_RDMA_WRITE;
2464 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
2465 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2466 if (i + 1 == wr->send_wr_num)
2467 send_wr->next = &isert_cmd->tx_desc.send_wr;
2468 else
2469 send_wr->next = &wr->send_wr[i + 1];
2470 } else {
2471 send_wr->opcode = IB_WR_RDMA_READ;
2472 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2473 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2474 if (i + 1 == wr->send_wr_num)
2475 send_wr->send_flags = IB_SEND_SIGNALED;
2476 else
2477 send_wr->next = &wr->send_wr[i + 1];
2478 }
b8d26b3b
NB
2479
2480 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2481 send_wr, data_len, offset);
2482 ib_sge += ib_sge_cnt;
2483
b8d26b3b 2484 offset += data_len;
90ecc6e2 2485 va_offset += data_len;
b8d26b3b
NB
2486 data_left -= data_len;
2487 }
90ecc6e2
VP
2488
2489 return 0;
e3d7e4c3
SG
2490unmap_cmd:
2491 isert_unmap_data_buf(isert_conn, data);
2492
90ecc6e2
VP
2493 return ret;
2494}
2495
59464ef4
VP
2496static int
2497isert_map_fr_pagelist(struct ib_device *ib_dev,
2498 struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
2499{
2500 u64 start_addr, end_addr, page, chunk_start = 0;
2501 struct scatterlist *tmp_sg;
2502 int i = 0, new_chunk, last_ent, n_pages;
2503
2504 n_pages = 0;
2505 new_chunk = 1;
2506 last_ent = sg_nents - 1;
2507 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2508 start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
2509 if (new_chunk)
2510 chunk_start = start_addr;
2511 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2512
4c22e07f
SG
2513 isert_dbg("SGL[%d] dma_addr: 0x%llx len: %u\n",
2514 i, (unsigned long long)tmp_sg->dma_address,
2515 tmp_sg->length);
59464ef4
VP
2516
2517 if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2518 new_chunk = 0;
2519 continue;
2520 }
2521 new_chunk = 1;
2522
2523 page = chunk_start & PAGE_MASK;
2524 do {
2525 fr_pl[n_pages++] = page;
4c22e07f
SG
2526 isert_dbg("Mapped page_list[%d] page_addr: 0x%llx\n",
2527 n_pages - 1, page);
59464ef4
VP
2528 page += PAGE_SIZE;
2529 } while (page < end_addr);
2530 }
2531
2532 return n_pages;
2533}
2534
10633c37
SG
2535static inline void
2536isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
2537{
2538 u32 rkey;
2539
2540 memset(inv_wr, 0, sizeof(*inv_wr));
2541 inv_wr->wr_id = ISER_FASTREG_LI_WRID;
2542 inv_wr->opcode = IB_WR_LOCAL_INV;
2543 inv_wr->ex.invalidate_rkey = mr->rkey;
2544
2545 /* Bump the key */
2546 rkey = ib_inc_rkey(mr->rkey);
2547 ib_update_fast_reg_key(mr, rkey);
2548}
2549
59464ef4 2550static int
e3d7e4c3
SG
2551isert_fast_reg_mr(struct isert_conn *isert_conn,
2552 struct fast_reg_descriptor *fr_desc,
2553 struct isert_data_buf *mem,
9e961ae7 2554 enum isert_indicator ind,
e3d7e4c3 2555 struct ib_sge *sge)
59464ef4 2556{
67cb3949
SG
2557 struct isert_device *device = isert_conn->conn_device;
2558 struct ib_device *ib_dev = device->ib_device;
9e961ae7
SG
2559 struct ib_mr *mr;
2560 struct ib_fast_reg_page_list *frpl;
59464ef4
VP
2561 struct ib_send_wr fr_wr, inv_wr;
2562 struct ib_send_wr *bad_wr, *wr = NULL;
9bd626e7
SG
2563 int ret, pagelist_len;
2564 u32 page_off;
59464ef4 2565
e3d7e4c3 2566 if (mem->dma_nents == 1) {
67cb3949 2567 sge->lkey = device->mr->lkey;
e3d7e4c3
SG
2568 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
2569 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
4c22e07f
SG
2570 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2571 sge->addr, sge->length, sge->lkey);
e3d7e4c3
SG
2572 return 0;
2573 }
2574
9e961ae7
SG
2575 if (ind == ISERT_DATA_KEY_VALID) {
2576 /* Registering data buffer */
2577 mr = fr_desc->data_mr;
2578 frpl = fr_desc->data_frpl;
2579 } else {
2580 /* Registering protection buffer */
2581 mr = fr_desc->pi_ctx->prot_mr;
2582 frpl = fr_desc->pi_ctx->prot_frpl;
2583 }
2584
e3d7e4c3 2585 page_off = mem->offset % PAGE_SIZE;
59464ef4 2586
24f412dd 2587 isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
4c22e07f 2588 fr_desc, mem->nents, mem->offset);
59464ef4 2589
e3d7e4c3 2590 pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
9e961ae7 2591 &frpl->page_list[0]);
59464ef4 2592
10633c37
SG
2593 if (!(fr_desc->ind & ind)) {
2594 isert_inv_rkey(&inv_wr, mr);
59464ef4 2595 wr = &inv_wr;
59464ef4
VP
2596 }
2597
2598 /* Prepare FASTREG WR */
2599 memset(&fr_wr, 0, sizeof(fr_wr));
9bb4ca68 2600 fr_wr.wr_id = ISER_FASTREG_LI_WRID;
59464ef4 2601 fr_wr.opcode = IB_WR_FAST_REG_MR;
9e961ae7
SG
2602 fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off;
2603 fr_wr.wr.fast_reg.page_list = frpl;
59464ef4
VP
2604 fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2605 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
e3d7e4c3 2606 fr_wr.wr.fast_reg.length = mem->len;
9e961ae7 2607 fr_wr.wr.fast_reg.rkey = mr->rkey;
59464ef4
VP
2608 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2609
2610 if (!wr)
2611 wr = &fr_wr;
2612 else
2613 wr->next = &fr_wr;
2614
2615 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2616 if (ret) {
24f412dd 2617 isert_err("fast registration failed, ret:%d\n", ret);
59464ef4
VP
2618 return ret;
2619 }
9e961ae7 2620 fr_desc->ind &= ~ind;
59464ef4 2621
9e961ae7
SG
2622 sge->lkey = mr->lkey;
2623 sge->addr = frpl->page_list[0] + page_off;
e3d7e4c3 2624 sge->length = mem->len;
59464ef4 2625
4c22e07f
SG
2626 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2627 sge->addr, sge->length, sge->lkey);
9e961ae7
SG
2628
2629 return ret;
2630}
2631
3d73cf1a
SG
2632static inline void
2633isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
2634 struct ib_sig_domain *domain)
2635{
78eda2bb 2636 domain->sig_type = IB_SIG_TYPE_T10_DIF;
3d73cf1a
SG
2637 domain->sig.dif.bg_type = IB_T10DIF_CRC;
2638 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
2639 domain->sig.dif.ref_tag = se_cmd->reftag_seed;
78eda2bb
SG
2640 /*
2641 * At the moment we hard code those, but if in the future
2642 * the target core would like to use it, we will take it
2643 * from se_cmd.
2644 */
2645 domain->sig.dif.apptag_check_mask = 0xffff;
2646 domain->sig.dif.app_escape = true;
2647 domain->sig.dif.ref_escape = true;
2648 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
2649 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
2650 domain->sig.dif.ref_remap = true;
3d73cf1a
SG
2651};
2652
9e961ae7
SG
2653static int
2654isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2655{
9e961ae7
SG
2656 switch (se_cmd->prot_op) {
2657 case TARGET_PROT_DIN_INSERT:
2658 case TARGET_PROT_DOUT_STRIP:
78eda2bb 2659 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
3d73cf1a 2660 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
9e961ae7
SG
2661 break;
2662 case TARGET_PROT_DOUT_INSERT:
2663 case TARGET_PROT_DIN_STRIP:
78eda2bb 2664 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
3d73cf1a 2665 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
9e961ae7
SG
2666 break;
2667 case TARGET_PROT_DIN_PASS:
2668 case TARGET_PROT_DOUT_PASS:
3d73cf1a
SG
2669 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2670 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
9e961ae7
SG
2671 break;
2672 default:
24f412dd 2673 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
9e961ae7
SG
2674 return -EINVAL;
2675 }
2676
2677 return 0;
2678}
2679
2680static inline u8
2681isert_set_prot_checks(u8 prot_checks)
2682{
2683 return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2684 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2685 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2686}
2687
2688static int
570db170
SG
2689isert_reg_sig_mr(struct isert_conn *isert_conn,
2690 struct se_cmd *se_cmd,
2691 struct isert_rdma_wr *rdma_wr,
2692 struct fast_reg_descriptor *fr_desc)
9e961ae7
SG
2693{
2694 struct ib_send_wr sig_wr, inv_wr;
2695 struct ib_send_wr *bad_wr, *wr = NULL;
2696 struct pi_context *pi_ctx = fr_desc->pi_ctx;
2697 struct ib_sig_attrs sig_attrs;
2698 int ret;
9e961ae7
SG
2699
2700 memset(&sig_attrs, 0, sizeof(sig_attrs));
2701 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2702 if (ret)
2703 goto err;
59464ef4 2704
9e961ae7
SG
2705 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
2706
2707 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
10633c37 2708 isert_inv_rkey(&inv_wr, pi_ctx->sig_mr);
9e961ae7 2709 wr = &inv_wr;
9e961ae7
SG
2710 }
2711
2712 memset(&sig_wr, 0, sizeof(sig_wr));
2713 sig_wr.opcode = IB_WR_REG_SIG_MR;
c2caa207 2714 sig_wr.wr_id = ISER_FASTREG_LI_WRID;
570db170 2715 sig_wr.sg_list = &rdma_wr->ib_sg[DATA];
9e961ae7
SG
2716 sig_wr.num_sge = 1;
2717 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
2718 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
2719 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
2720 if (se_cmd->t_prot_sg)
570db170 2721 sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT];
9e961ae7
SG
2722
2723 if (!wr)
2724 wr = &sig_wr;
2725 else
2726 wr->next = &sig_wr;
2727
2728 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2729 if (ret) {
24f412dd 2730 isert_err("fast registration failed, ret:%d\n", ret);
9e961ae7
SG
2731 goto err;
2732 }
2733 fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2734
570db170
SG
2735 rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
2736 rdma_wr->ib_sg[SIG].addr = 0;
2737 rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
9e961ae7
SG
2738 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2739 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2740 /*
2741 * We have protection guards on the wire
2742 * so we need to set a larget transfer
2743 */
570db170 2744 rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
9e961ae7 2745
24f412dd 2746 isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
570db170
SG
2747 rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
2748 rdma_wr->ib_sg[SIG].lkey);
9e961ae7 2749err:
59464ef4
VP
2750 return ret;
2751}
2752
570db170
SG
2753static int
2754isert_handle_prot_cmd(struct isert_conn *isert_conn,
2755 struct isert_cmd *isert_cmd,
2756 struct isert_rdma_wr *wr)
2757{
2758 struct isert_device *device = isert_conn->conn_device;
2759 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
2760 int ret;
2761
2762 if (!wr->fr_desc->pi_ctx) {
2763 ret = isert_create_pi_ctx(wr->fr_desc,
2764 device->ib_device,
67cb3949 2765 device->pd);
570db170 2766 if (ret) {
24f412dd 2767 isert_err("conn %p failed to allocate pi_ctx\n",
570db170
SG
2768 isert_conn);
2769 return ret;
2770 }
2771 }
2772
2773 if (se_cmd->t_prot_sg) {
2774 ret = isert_map_data_buf(isert_conn, isert_cmd,
2775 se_cmd->t_prot_sg,
2776 se_cmd->t_prot_nents,
2777 se_cmd->prot_length,
2778 0, wr->iser_ib_op, &wr->prot);
2779 if (ret) {
24f412dd 2780 isert_err("conn %p failed to map protection buffer\n",
570db170
SG
2781 isert_conn);
2782 return ret;
2783 }
2784
2785 memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
2786 ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
2787 ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
2788 if (ret) {
24f412dd 2789 isert_err("conn %p failed to fast reg mr\n",
570db170
SG
2790 isert_conn);
2791 goto unmap_prot_cmd;
2792 }
2793 }
2794
2795 ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
2796 if (ret) {
24f412dd 2797 isert_err("conn %p failed to fast reg mr\n",
570db170
SG
2798 isert_conn);
2799 goto unmap_prot_cmd;
2800 }
2801 wr->fr_desc->ind |= ISERT_PROTECTED;
2802
2803 return 0;
2804
2805unmap_prot_cmd:
2806 if (se_cmd->t_prot_sg)
2807 isert_unmap_data_buf(isert_conn, &wr->prot);
2808
2809 return ret;
2810}
2811
59464ef4 2812static int
a3a5a826
SG
2813isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2814 struct isert_rdma_wr *wr)
59464ef4
VP
2815{
2816 struct se_cmd *se_cmd = &cmd->se_cmd;
2817 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
e3d7e4c3 2818 struct isert_conn *isert_conn = conn->context;
e3d7e4c3 2819 struct fast_reg_descriptor *fr_desc = NULL;
570db170
SG
2820 struct ib_send_wr *send_wr;
2821 struct ib_sge *ib_sg;
e3d7e4c3
SG
2822 u32 offset;
2823 int ret = 0;
59464ef4
VP
2824 unsigned long flags;
2825
e3d7e4c3 2826 isert_cmd->tx_desc.isert_cmd = isert_cmd;
59464ef4 2827
e3d7e4c3
SG
2828 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2829 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2830 se_cmd->t_data_nents, se_cmd->data_length,
2831 offset, wr->iser_ib_op, &wr->data);
2832 if (ret)
2833 return ret;
59464ef4 2834
302cc7c3 2835 if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
e3d7e4c3
SG
2836 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2837 fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2838 struct fast_reg_descriptor, list);
2839 list_del(&fr_desc->list);
2840 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2841 wr->fr_desc = fr_desc;
59464ef4 2842 }
59464ef4 2843
9e961ae7 2844 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
570db170 2845 ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
e3d7e4c3
SG
2846 if (ret)
2847 goto unmap_cmd;
59464ef4 2848
302cc7c3 2849 if (isert_prot_cmd(isert_conn, se_cmd)) {
570db170 2850 ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
9e961ae7 2851 if (ret)
570db170 2852 goto unmap_cmd;
9e961ae7 2853
570db170
SG
2854 ib_sg = &wr->ib_sg[SIG];
2855 } else {
2856 ib_sg = &wr->ib_sg[DATA];
2857 }
9e961ae7 2858
570db170 2859 memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
e3d7e4c3 2860 wr->ib_sge = &wr->s_ib_sge;
59464ef4
VP
2861 wr->send_wr_num = 1;
2862 memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2863 wr->send_wr = &wr->s_send_wr;
59464ef4 2864 wr->isert_cmd = isert_cmd;
59464ef4
VP
2865
2866 send_wr = &isert_cmd->rdma_wr.s_send_wr;
e3d7e4c3 2867 send_wr->sg_list = &wr->s_ib_sge;
59464ef4 2868 send_wr->num_sge = 1;
b0a191e7 2869 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
59464ef4
VP
2870 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2871 send_wr->opcode = IB_WR_RDMA_WRITE;
2872 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2873 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
302cc7c3 2874 send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
9e961ae7 2875 0 : IB_SEND_SIGNALED;
59464ef4
VP
2876 } else {
2877 send_wr->opcode = IB_WR_RDMA_READ;
2878 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
2879 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2880 send_wr->send_flags = IB_SEND_SIGNALED;
2881 }
2882
e3d7e4c3 2883 return 0;
570db170 2884
e3d7e4c3
SG
2885unmap_cmd:
2886 if (fr_desc) {
f01b9f73 2887 spin_lock_irqsave(&isert_conn->conn_lock, flags);
e3d7e4c3 2888 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
f01b9f73 2889 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
59464ef4 2890 }
e3d7e4c3 2891 isert_unmap_data_buf(isert_conn, &wr->data);
59464ef4 2892
59464ef4
VP
2893 return ret;
2894}
2895
90ecc6e2
VP
2896static int
2897isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2898{
2899 struct se_cmd *se_cmd = &cmd->se_cmd;
59464ef4 2900 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
90ecc6e2 2901 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
6700425e 2902 struct isert_conn *isert_conn = conn->context;
d40945d8 2903 struct isert_device *device = isert_conn->conn_device;
90ecc6e2
VP
2904 struct ib_send_wr *wr_failed;
2905 int rc;
2906
24f412dd 2907 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
90ecc6e2 2908 isert_cmd, se_cmd->data_length);
4c22e07f 2909
90ecc6e2 2910 wr->iser_ib_op = ISER_IB_RDMA_WRITE;
d40945d8 2911 rc = device->reg_rdma_mem(conn, cmd, wr);
90ecc6e2 2912 if (rc) {
24f412dd 2913 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
90ecc6e2
VP
2914 return rc;
2915 }
2916
302cc7c3 2917 if (!isert_prot_cmd(isert_conn, se_cmd)) {
9e961ae7
SG
2918 /*
2919 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2920 */
2921 isert_create_send_desc(isert_conn, isert_cmd,
2922 &isert_cmd->tx_desc);
2923 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2924 &isert_cmd->tx_desc.iscsi_header);
2925 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2926 isert_init_send_wr(isert_conn, isert_cmd,
68a86dee 2927 &isert_cmd->tx_desc.send_wr);
9e961ae7 2928 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
897bb2c9 2929 wr->send_wr_num += 1;
9e961ae7 2930 }
b8d26b3b 2931
b8d26b3b 2932 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
bdf20e72 2933 if (rc)
24f412dd 2934 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
9e961ae7 2935
302cc7c3 2936 if (!isert_prot_cmd(isert_conn, se_cmd))
24f412dd 2937 isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
9e961ae7
SG
2938 "READ\n", isert_cmd);
2939 else
24f412dd 2940 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
9e961ae7 2941 isert_cmd);
b8d26b3b 2942
90ecc6e2 2943 return 1;
b8d26b3b
NB
2944}
2945
2946static int
2947isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2948{
2949 struct se_cmd *se_cmd = &cmd->se_cmd;
d703ce2f 2950 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
b8d26b3b 2951 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
6700425e 2952 struct isert_conn *isert_conn = conn->context;
d40945d8 2953 struct isert_device *device = isert_conn->conn_device;
90ecc6e2
VP
2954 struct ib_send_wr *wr_failed;
2955 int rc;
b8d26b3b 2956
24f412dd 2957 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
90ecc6e2 2958 isert_cmd, se_cmd->data_length, cmd->write_data_done);
b8d26b3b 2959 wr->iser_ib_op = ISER_IB_RDMA_READ;
d40945d8 2960 rc = device->reg_rdma_mem(conn, cmd, wr);
90ecc6e2 2961 if (rc) {
24f412dd 2962 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
90ecc6e2 2963 return rc;
b8d26b3b
NB
2964 }
2965
b8d26b3b 2966 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
bdf20e72 2967 if (rc)
24f412dd 2968 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
bdf20e72 2969
24f412dd 2970 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
90ecc6e2 2971 isert_cmd);
b8d26b3b 2972
90ecc6e2 2973 return 0;
b8d26b3b
NB
2974}
2975
2976static int
2977isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2978{
2979 int ret;
2980
2981 switch (state) {
2982 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2983 ret = isert_put_nopin(cmd, conn, false);
2984 break;
2985 default:
24f412dd 2986 isert_err("Unknown immediate state: 0x%02x\n", state);
b8d26b3b
NB
2987 ret = -EINVAL;
2988 break;
2989 }
2990
2991 return ret;
2992}
2993
2994static int
2995isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2996{
991bb764 2997 struct isert_conn *isert_conn = conn->context;
b8d26b3b
NB
2998 int ret;
2999
3000 switch (state) {
3001 case ISTATE_SEND_LOGOUTRSP:
3002 ret = isert_put_logout_rsp(cmd, conn);
991bb764
SG
3003 if (!ret)
3004 isert_conn->logout_posted = true;
b8d26b3b
NB
3005 break;
3006 case ISTATE_SEND_NOPIN:
3007 ret = isert_put_nopin(cmd, conn, true);
3008 break;
3009 case ISTATE_SEND_TASKMGTRSP:
3010 ret = isert_put_tm_rsp(cmd, conn);
3011 break;
3012 case ISTATE_SEND_REJECT:
3013 ret = isert_put_reject(cmd, conn);
3014 break;
adb54c29
NB
3015 case ISTATE_SEND_TEXTRSP:
3016 ret = isert_put_text_rsp(cmd, conn);
3017 break;
b8d26b3b
NB
3018 case ISTATE_SEND_STATUS:
3019 /*
3020 * Special case for sending non GOOD SCSI status from TX thread
3021 * context during pre se_cmd excecution failure.
3022 */
3023 ret = isert_put_response(conn, cmd);
3024 break;
3025 default:
24f412dd 3026 isert_err("Unknown response state: 0x%02x\n", state);
b8d26b3b
NB
3027 ret = -EINVAL;
3028 break;
3029 }
3030
3031 return ret;
3032}
3033
ca6c1d82
SG
3034struct rdma_cm_id *
3035isert_setup_id(struct isert_np *isert_np)
3036{
3037 struct iscsi_np *np = isert_np->np;
3038 struct rdma_cm_id *id;
3039 struct sockaddr *sa;
3040 int ret;
3041
3042 sa = (struct sockaddr *)&np->np_sockaddr;
24f412dd 3043 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
ca6c1d82
SG
3044
3045 id = rdma_create_id(isert_cma_handler, isert_np,
3046 RDMA_PS_TCP, IB_QPT_RC);
3047 if (IS_ERR(id)) {
24f412dd 3048 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
ca6c1d82
SG
3049 ret = PTR_ERR(id);
3050 goto out;
3051 }
24f412dd 3052 isert_dbg("id %p context %p\n", id, id->context);
ca6c1d82
SG
3053
3054 ret = rdma_bind_addr(id, sa);
3055 if (ret) {
24f412dd 3056 isert_err("rdma_bind_addr() failed: %d\n", ret);
ca6c1d82
SG
3057 goto out_id;
3058 }
3059
3060 ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
3061 if (ret) {
24f412dd 3062 isert_err("rdma_listen() failed: %d\n", ret);
ca6c1d82
SG
3063 goto out_id;
3064 }
3065
3066 return id;
3067out_id:
3068 rdma_destroy_id(id);
3069out:
3070 return ERR_PTR(ret);
3071}
3072
b8d26b3b
NB
3073static int
3074isert_setup_np(struct iscsi_np *np,
3075 struct __kernel_sockaddr_storage *ksockaddr)
3076{
3077 struct isert_np *isert_np;
3078 struct rdma_cm_id *isert_lid;
b8d26b3b
NB
3079 int ret;
3080
3081 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
3082 if (!isert_np) {
24f412dd 3083 isert_err("Unable to allocate struct isert_np\n");
b8d26b3b
NB
3084 return -ENOMEM;
3085 }
531b7bf4 3086 sema_init(&isert_np->np_sem, 0);
b8d26b3b
NB
3087 mutex_init(&isert_np->np_accept_mutex);
3088 INIT_LIST_HEAD(&isert_np->np_accept_list);
3089 init_completion(&isert_np->np_login_comp);
ca6c1d82 3090 isert_np->np = np;
b8d26b3b 3091
b8d26b3b
NB
3092 /*
3093 * Setup the np->np_sockaddr from the passed sockaddr setup
3094 * in iscsi_target_configfs.c code..
3095 */
3096 memcpy(&np->np_sockaddr, ksockaddr,
3097 sizeof(struct __kernel_sockaddr_storage));
3098
ca6c1d82 3099 isert_lid = isert_setup_id(isert_np);
b8d26b3b 3100 if (IS_ERR(isert_lid)) {
b8d26b3b
NB
3101 ret = PTR_ERR(isert_lid);
3102 goto out;
3103 }
3104
b8d26b3b
NB
3105 isert_np->np_cm_id = isert_lid;
3106 np->np_context = isert_np;
b8d26b3b
NB
3107
3108 return 0;
3109
b8d26b3b
NB
3110out:
3111 kfree(isert_np);
ca6c1d82 3112
b8d26b3b
NB
3113 return ret;
3114}
3115
b8d26b3b
NB
3116static int
3117isert_rdma_accept(struct isert_conn *isert_conn)
3118{
3119 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
3120 struct rdma_conn_param cp;
3121 int ret;
3122
3123 memset(&cp, 0, sizeof(struct rdma_conn_param));
b8d26b3b
NB
3124 cp.initiator_depth = isert_conn->initiator_depth;
3125 cp.retry_count = 7;
3126 cp.rnr_retry_count = 7;
3127
b8d26b3b
NB
3128 ret = rdma_accept(cm_id, &cp);
3129 if (ret) {
24f412dd 3130 isert_err("rdma_accept() failed with: %d\n", ret);
b8d26b3b
NB
3131 return ret;
3132 }
3133
b8d26b3b
NB
3134 return 0;
3135}
3136
3137static int
3138isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
3139{
6700425e 3140 struct isert_conn *isert_conn = conn->context;
b8d26b3b
NB
3141 int ret;
3142
24f412dd 3143 isert_info("before login_req comp conn: %p\n", isert_conn);
2371e5da
SG
3144 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
3145 if (ret) {
24f412dd 3146 isert_err("isert_conn %p interrupted before got login req\n",
2371e5da
SG
3147 isert_conn);
3148 return ret;
3149 }
3150 reinit_completion(&isert_conn->login_req_comp);
3151
6faaa85f
NB
3152 /*
3153 * For login requests after the first PDU, isert_rx_login_req() will
3154 * kick schedule_delayed_work(&conn->login_work) as the packet is
3155 * received, which turns this callback from iscsi_target_do_login_rx()
3156 * into a NOP.
3157 */
3158 if (!login->first_request)
3159 return 0;
b8d26b3b 3160
2371e5da
SG
3161 isert_rx_login_req(isert_conn);
3162
24f412dd 3163 isert_info("before conn_login_comp conn: %p\n", conn);
b8d26b3b
NB
3164 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
3165 if (ret)
3166 return ret;
3167
24f412dd 3168 isert_info("processing login->req: %p\n", login->req);
2371e5da 3169
b8d26b3b
NB
3170 return 0;
3171}
3172
3173static void
3174isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
3175 struct isert_conn *isert_conn)
3176{
3177 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
3178 struct rdma_route *cm_route = &cm_id->route;
3179 struct sockaddr_in *sock_in;
3180 struct sockaddr_in6 *sock_in6;
3181
3182 conn->login_family = np->np_sockaddr.ss_family;
3183
3184 if (np->np_sockaddr.ss_family == AF_INET6) {
3185 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
3186 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
3187 &sock_in6->sin6_addr.in6_u);
3188 conn->login_port = ntohs(sock_in6->sin6_port);
3189
3190 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
3191 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
3192 &sock_in6->sin6_addr.in6_u);
3193 conn->local_port = ntohs(sock_in6->sin6_port);
3194 } else {
3195 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
3196 sprintf(conn->login_ip, "%pI4",
3197 &sock_in->sin_addr.s_addr);
3198 conn->login_port = ntohs(sock_in->sin_port);
3199
3200 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
3201 sprintf(conn->local_ip, "%pI4",
3202 &sock_in->sin_addr.s_addr);
3203 conn->local_port = ntohs(sock_in->sin_port);
3204 }
3205}
3206
3207static int
3208isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
3209{
6700425e 3210 struct isert_np *isert_np = np->np_context;
b8d26b3b 3211 struct isert_conn *isert_conn;
c6b8e918 3212 int ret;
b8d26b3b
NB
3213
3214accept_wait:
531b7bf4 3215 ret = down_interruptible(&isert_np->np_sem);
c6b8e918 3216 if (ret)
b8d26b3b
NB
3217 return -ENODEV;
3218
3219 spin_lock_bh(&np->np_thread_lock);
e346ab34 3220 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
b8d26b3b 3221 spin_unlock_bh(&np->np_thread_lock);
11378cdb 3222 isert_dbg("np_thread_state %d\n",
e346ab34
SG
3223 np->np_thread_state);
3224 /**
3225 * No point in stalling here when np_thread
3226 * is in state RESET/SHUTDOWN/EXIT - bail
3227 **/
b8d26b3b
NB
3228 return -ENODEV;
3229 }
3230 spin_unlock_bh(&np->np_thread_lock);
3231
3232 mutex_lock(&isert_np->np_accept_mutex);
3233 if (list_empty(&isert_np->np_accept_list)) {
3234 mutex_unlock(&isert_np->np_accept_mutex);
b8d26b3b
NB
3235 goto accept_wait;
3236 }
3237 isert_conn = list_first_entry(&isert_np->np_accept_list,
3238 struct isert_conn, conn_accept_node);
3239 list_del_init(&isert_conn->conn_accept_node);
3240 mutex_unlock(&isert_np->np_accept_mutex);
3241
3242 conn->context = isert_conn;
3243 isert_conn->conn = conn;
b8d26b3b 3244
b8d26b3b
NB
3245 isert_set_conn_info(np, conn, isert_conn);
3246
24f412dd 3247 isert_dbg("Processing isert_conn: %p\n", isert_conn);
2371e5da 3248
b8d26b3b
NB
3249 return 0;
3250}
3251
3252static void
3253isert_free_np(struct iscsi_np *np)
3254{
6700425e 3255 struct isert_np *isert_np = np->np_context;
268e6811 3256 struct isert_conn *isert_conn, *n;
b8d26b3b 3257
3b726ae2
SG
3258 if (isert_np->np_cm_id)
3259 rdma_destroy_id(isert_np->np_cm_id);
b8d26b3b 3260
268e6811
SG
3261 /*
3262 * FIXME: At this point we don't have a good way to insure
3263 * that at this point we don't have hanging connections that
3264 * completed RDMA establishment but didn't start iscsi login
3265 * process. So work-around this by cleaning up what ever piled
3266 * up in np_accept_list.
3267 */
3268 mutex_lock(&isert_np->np_accept_mutex);
3269 if (!list_empty(&isert_np->np_accept_list)) {
24f412dd 3270 isert_info("Still have isert connections, cleaning up...\n");
268e6811
SG
3271 list_for_each_entry_safe(isert_conn, n,
3272 &isert_np->np_accept_list,
3273 conn_accept_node) {
24f412dd 3274 isert_info("cleaning isert_conn %p state (%d)\n",
268e6811
SG
3275 isert_conn, isert_conn->state);
3276 isert_connect_release(isert_conn);
3277 }
3278 }
3279 mutex_unlock(&isert_np->np_accept_mutex);
3280
b8d26b3b
NB
3281 np->np_context = NULL;
3282 kfree(isert_np);
3283}
3284
b02efbfc
SG
3285static void isert_release_work(struct work_struct *work)
3286{
3287 struct isert_conn *isert_conn = container_of(work,
3288 struct isert_conn,
3289 release_work);
3290
24f412dd 3291 isert_info("Starting release conn %p\n", isert_conn);
b02efbfc
SG
3292
3293 wait_for_completion(&isert_conn->conn_wait);
3294
3295 mutex_lock(&isert_conn->conn_mutex);
3296 isert_conn->state = ISER_CONN_DOWN;
3297 mutex_unlock(&isert_conn->conn_mutex);
3298
24f412dd 3299 isert_info("Destroying conn %p\n", isert_conn);
b02efbfc
SG
3300 isert_put_conn(isert_conn);
3301}
3302
991bb764
SG
3303static void
3304isert_wait4logout(struct isert_conn *isert_conn)
3305{
3306 struct iscsi_conn *conn = isert_conn->conn;
3307
4c22e07f
SG
3308 isert_info("conn %p\n", isert_conn);
3309
991bb764 3310 if (isert_conn->logout_posted) {
24f412dd 3311 isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
991bb764
SG
3312 wait_for_completion_timeout(&conn->conn_logout_comp,
3313 SECONDS_FOR_LOGOUT_COMP * HZ);
3314 }
3315}
3316
c7e160ee
SG
3317static void
3318isert_wait4cmds(struct iscsi_conn *conn)
3319{
4c22e07f
SG
3320 isert_info("iscsi_conn %p\n", conn);
3321
c7e160ee
SG
3322 if (conn->sess) {
3323 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
3324 target_wait_for_sess_cmds(conn->sess->se_sess);
3325 }
3326}
3327
bdf20e72
SG
3328static void
3329isert_wait4flush(struct isert_conn *isert_conn)
3330{
3331 struct ib_recv_wr *bad_wr;
3332
4c22e07f
SG
3333 isert_info("conn %p\n", isert_conn);
3334
bdf20e72
SG
3335 init_completion(&isert_conn->conn_wait_comp_err);
3336 isert_conn->beacon.wr_id = ISER_BEACON_WRID;
3337 /* post an indication that all flush errors were consumed */
3338 if (ib_post_recv(isert_conn->conn_qp, &isert_conn->beacon, &bad_wr)) {
24f412dd 3339 isert_err("conn %p failed to post beacon", isert_conn);
bdf20e72
SG
3340 return;
3341 }
3342
3343 wait_for_completion(&isert_conn->conn_wait_comp_err);
3344}
3345
defd8848 3346static void isert_wait_conn(struct iscsi_conn *conn)
b8d26b3b
NB
3347{
3348 struct isert_conn *isert_conn = conn->context;
3349
4c22e07f 3350 isert_info("Starting conn %p\n", isert_conn);
b8d26b3b 3351
9d49f5e2 3352 mutex_lock(&isert_conn->conn_mutex);
b8d26b3b
NB
3353 /*
3354 * Only wait for conn_wait_comp_err if the isert_conn made it
3355 * into full feature phase..
3356 */
b2cb9649
NB
3357 if (isert_conn->state == ISER_CONN_INIT) {
3358 mutex_unlock(&isert_conn->conn_mutex);
b2cb9649 3359 return;
b8d26b3b 3360 }
954f2372 3361 isert_conn_terminate(isert_conn);
b2cb9649 3362 mutex_unlock(&isert_conn->conn_mutex);
b8d26b3b 3363
c7e160ee 3364 isert_wait4cmds(conn);
bdf20e72 3365 isert_wait4flush(isert_conn);
991bb764 3366 isert_wait4logout(isert_conn);
954f2372 3367
b02efbfc
SG
3368 INIT_WORK(&isert_conn->release_work, isert_release_work);
3369 queue_work(isert_release_wq, &isert_conn->release_work);
defd8848
NB
3370}
3371
3372static void isert_free_conn(struct iscsi_conn *conn)
3373{
3374 struct isert_conn *isert_conn = conn->context;
b8d26b3b
NB
3375
3376 isert_put_conn(isert_conn);
3377}
3378
3379static struct iscsit_transport iser_target_transport = {
3380 .name = "IB/iSER",
3381 .transport_type = ISCSI_INFINIBAND,
d703ce2f 3382 .priv_size = sizeof(struct isert_cmd),
b8d26b3b
NB
3383 .owner = THIS_MODULE,
3384 .iscsit_setup_np = isert_setup_np,
3385 .iscsit_accept_np = isert_accept_np,
3386 .iscsit_free_np = isert_free_np,
defd8848 3387 .iscsit_wait_conn = isert_wait_conn,
b8d26b3b 3388 .iscsit_free_conn = isert_free_conn,
b8d26b3b
NB
3389 .iscsit_get_login_rx = isert_get_login_rx,
3390 .iscsit_put_login_tx = isert_put_login_tx,
3391 .iscsit_immediate_queue = isert_immediate_queue,
3392 .iscsit_response_queue = isert_response_queue,
3393 .iscsit_get_dataout = isert_get_dataout,
3394 .iscsit_queue_data_in = isert_put_datain,
3395 .iscsit_queue_status = isert_put_response,
131e6abc 3396 .iscsit_aborted_task = isert_aborted_task,
e70beee7 3397 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
b8d26b3b
NB
3398};
3399
3400static int __init isert_init(void)
3401{
3402 int ret;
3403
631af550
SG
3404 isert_comp_wq = alloc_workqueue("isert_comp_wq",
3405 WQ_UNBOUND | WQ_HIGHPRI, 0);
b8d26b3b 3406 if (!isert_comp_wq) {
24f412dd 3407 isert_err("Unable to allocate isert_comp_wq\n");
b8d26b3b 3408 ret = -ENOMEM;
6f0fae3d 3409 return -ENOMEM;
b8d26b3b
NB
3410 }
3411
b02efbfc
SG
3412 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
3413 WQ_UNBOUND_MAX_ACTIVE);
3414 if (!isert_release_wq) {
24f412dd 3415 isert_err("Unable to allocate isert_release_wq\n");
b02efbfc
SG
3416 ret = -ENOMEM;
3417 goto destroy_comp_wq;
3418 }
3419
b8d26b3b 3420 iscsit_register_transport(&iser_target_transport);
24f412dd 3421 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
b02efbfc 3422
b8d26b3b
NB
3423 return 0;
3424
b02efbfc
SG
3425destroy_comp_wq:
3426 destroy_workqueue(isert_comp_wq);
6f0fae3d 3427
b8d26b3b
NB
3428 return ret;
3429}
3430
3431static void __exit isert_exit(void)
3432{
f5ebec96 3433 flush_scheduled_work();
b02efbfc 3434 destroy_workqueue(isert_release_wq);
b8d26b3b 3435 destroy_workqueue(isert_comp_wq);
b8d26b3b 3436 iscsit_unregister_transport(&iser_target_transport);
4c22e07f 3437 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
b8d26b3b
NB
3438}
3439
3440MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
3441MODULE_VERSION("0.1");
3442MODULE_AUTHOR("nab@Linux-iSCSI.org");
3443MODULE_LICENSE("GPL");
3444
3445module_init(isert_init);
3446module_exit(isert_exit);