]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/infiniband/ulp/srp/ib_srp.c
Merge branch 'kconfig' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / ulp / srp / ib_srp.c
CommitLineData
aef9ec39
RD
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
aef9ec39
RD
31 */
32
d236cd0e 33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
e0bda7d8 34
aef9ec39
RD
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
de25968c 42#include <linux/jiffies.h>
aef9ec39 43
60063497 44#include <linux/atomic.h>
aef9ec39
RD
45
46#include <scsi/scsi.h>
47#include <scsi/scsi_device.h>
48#include <scsi/scsi_dbg.h>
71444b97 49#include <scsi/scsi_tcq.h>
aef9ec39 50#include <scsi/srp.h>
3236822b 51#include <scsi/scsi_transport_srp.h>
aef9ec39 52
aef9ec39
RD
53#include "ib_srp.h"
54
55#define DRV_NAME "ib_srp"
56#define PFX DRV_NAME ": "
e8ca4135
VP
57#define DRV_VERSION "1.0"
58#define DRV_RELDATE "July 1, 2013"
aef9ec39
RD
59
60MODULE_AUTHOR("Roland Dreier");
61MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
62 "v" DRV_VERSION " (" DRV_RELDATE ")");
63MODULE_LICENSE("Dual BSD/GPL");
64
49248644
DD
65static unsigned int srp_sg_tablesize;
66static unsigned int cmd_sg_entries;
c07d424d
DD
67static unsigned int indirect_sg_entries;
68static bool allow_ext_sg;
5cfb1782 69static bool prefer_fr;
b1b8854d 70static bool register_always;
49248644 71static int topspin_workarounds = 1;
74b0a15b 72
49248644
DD
73module_param(srp_sg_tablesize, uint, 0444);
74MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
74b0a15b 75
49248644
DD
76module_param(cmd_sg_entries, uint, 0444);
77MODULE_PARM_DESC(cmd_sg_entries,
78 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
aef9ec39 79
c07d424d
DD
80module_param(indirect_sg_entries, uint, 0444);
81MODULE_PARM_DESC(indirect_sg_entries,
82 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
83
84module_param(allow_ext_sg, bool, 0444);
85MODULE_PARM_DESC(allow_ext_sg,
86 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
87
aef9ec39
RD
88module_param(topspin_workarounds, int, 0444);
89MODULE_PARM_DESC(topspin_workarounds,
90 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
91
5cfb1782
BVA
92module_param(prefer_fr, bool, 0444);
93MODULE_PARM_DESC(prefer_fr,
94"Whether to use fast registration if both FMR and fast registration are supported");
95
b1b8854d
BVA
96module_param(register_always, bool, 0444);
97MODULE_PARM_DESC(register_always,
98 "Use memory registration even for contiguous memory regions");
99
ed9b2264
BVA
100static struct kernel_param_ops srp_tmo_ops;
101
a95cadb9
BVA
102static int srp_reconnect_delay = 10;
103module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
104 S_IRUGO | S_IWUSR);
105MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
106
ed9b2264
BVA
107static int srp_fast_io_fail_tmo = 15;
108module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
109 S_IRUGO | S_IWUSR);
110MODULE_PARM_DESC(fast_io_fail_tmo,
111 "Number of seconds between the observation of a transport"
112 " layer error and failing all I/O. \"off\" means that this"
113 " functionality is disabled.");
114
a95cadb9 115static int srp_dev_loss_tmo = 600;
ed9b2264
BVA
116module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
117 S_IRUGO | S_IWUSR);
118MODULE_PARM_DESC(dev_loss_tmo,
119 "Maximum number of seconds that the SRP transport should"
120 " insulate transport layer errors. After this time has been"
121 " exceeded the SCSI host is removed. Should be"
122 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
123 " if fast_io_fail_tmo has not been set. \"off\" means that"
124 " this functionality is disabled.");
125
d92c0da7
BVA
126static unsigned ch_count;
127module_param(ch_count, uint, 0444);
128MODULE_PARM_DESC(ch_count,
129 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
130
aef9ec39
RD
131static void srp_add_one(struct ib_device *device);
132static void srp_remove_one(struct ib_device *device);
509c07bc
BVA
133static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
134static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
aef9ec39
RD
135static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
136
3236822b 137static struct scsi_transport_template *ib_srp_transport_template;
bcc05910 138static struct workqueue_struct *srp_remove_wq;
3236822b 139
aef9ec39
RD
140static struct ib_client srp_client = {
141 .name = "srp",
142 .add = srp_add_one,
143 .remove = srp_remove_one
144};
145
c1a0b23b
MT
146static struct ib_sa_client srp_sa_client;
147
ed9b2264
BVA
148static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
149{
150 int tmo = *(int *)kp->arg;
151
152 if (tmo >= 0)
153 return sprintf(buffer, "%d", tmo);
154 else
155 return sprintf(buffer, "off");
156}
157
158static int srp_tmo_set(const char *val, const struct kernel_param *kp)
159{
160 int tmo, res;
161
162 if (strncmp(val, "off", 3) != 0) {
163 res = kstrtoint(val, 0, &tmo);
164 if (res)
165 goto out;
166 } else {
167 tmo = -1;
168 }
a95cadb9
BVA
169 if (kp->arg == &srp_reconnect_delay)
170 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
171 srp_dev_loss_tmo);
172 else if (kp->arg == &srp_fast_io_fail_tmo)
173 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
ed9b2264 174 else
a95cadb9
BVA
175 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
176 tmo);
ed9b2264
BVA
177 if (res)
178 goto out;
179 *(int *)kp->arg = tmo;
180
181out:
182 return res;
183}
184
185static struct kernel_param_ops srp_tmo_ops = {
186 .get = srp_tmo_get,
187 .set = srp_tmo_set,
188};
189
aef9ec39
RD
190static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
191{
192 return (struct srp_target_port *) host->hostdata;
193}
194
195static const char *srp_target_info(struct Scsi_Host *host)
196{
197 return host_to_target(host)->target_name;
198}
199
5d7cbfd6
RD
200static int srp_target_is_topspin(struct srp_target_port *target)
201{
202 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
3d1ff48d 203 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
5d7cbfd6
RD
204
205 return topspin_workarounds &&
3d1ff48d
RK
206 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
207 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
5d7cbfd6
RD
208}
209
aef9ec39
RD
210static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
211 gfp_t gfp_mask,
212 enum dma_data_direction direction)
213{
214 struct srp_iu *iu;
215
216 iu = kmalloc(sizeof *iu, gfp_mask);
217 if (!iu)
218 goto out;
219
220 iu->buf = kzalloc(size, gfp_mask);
221 if (!iu->buf)
222 goto out_free_iu;
223
05321937
GKH
224 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
225 direction);
226 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
aef9ec39
RD
227 goto out_free_buf;
228
229 iu->size = size;
230 iu->direction = direction;
231
232 return iu;
233
234out_free_buf:
235 kfree(iu->buf);
236out_free_iu:
237 kfree(iu);
238out:
239 return NULL;
240}
241
242static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
243{
244 if (!iu)
245 return;
246
05321937
GKH
247 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
248 iu->direction);
aef9ec39
RD
249 kfree(iu->buf);
250 kfree(iu);
251}
252
253static void srp_qp_event(struct ib_event *event, void *context)
254{
e0bda7d8 255 pr_debug("QP event %d\n", event->event);
aef9ec39
RD
256}
257
258static int srp_init_qp(struct srp_target_port *target,
259 struct ib_qp *qp)
260{
261 struct ib_qp_attr *attr;
262 int ret;
263
264 attr = kmalloc(sizeof *attr, GFP_KERNEL);
265 if (!attr)
266 return -ENOMEM;
267
969a60f9
RD
268 ret = ib_find_pkey(target->srp_host->srp_dev->dev,
269 target->srp_host->port,
747fe000 270 be16_to_cpu(target->pkey),
969a60f9 271 &attr->pkey_index);
aef9ec39
RD
272 if (ret)
273 goto out;
274
275 attr->qp_state = IB_QPS_INIT;
276 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
277 IB_ACCESS_REMOTE_WRITE);
278 attr->port_num = target->srp_host->port;
279
280 ret = ib_modify_qp(qp, attr,
281 IB_QP_STATE |
282 IB_QP_PKEY_INDEX |
283 IB_QP_ACCESS_FLAGS |
284 IB_QP_PORT);
285
286out:
287 kfree(attr);
288 return ret;
289}
290
509c07bc 291static int srp_new_cm_id(struct srp_rdma_ch *ch)
9fe4bcf4 292{
509c07bc 293 struct srp_target_port *target = ch->target;
9fe4bcf4
DD
294 struct ib_cm_id *new_cm_id;
295
05321937 296 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
509c07bc 297 srp_cm_handler, ch);
9fe4bcf4
DD
298 if (IS_ERR(new_cm_id))
299 return PTR_ERR(new_cm_id);
300
509c07bc
BVA
301 if (ch->cm_id)
302 ib_destroy_cm_id(ch->cm_id);
303 ch->cm_id = new_cm_id;
304 ch->path.sgid = target->sgid;
305 ch->path.dgid = target->orig_dgid;
306 ch->path.pkey = target->pkey;
307 ch->path.service_id = target->service_id;
9fe4bcf4
DD
308
309 return 0;
310}
311
d1b4289e
BVA
312static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
313{
314 struct srp_device *dev = target->srp_host->srp_dev;
315 struct ib_fmr_pool_param fmr_param;
316
317 memset(&fmr_param, 0, sizeof(fmr_param));
318 fmr_param.pool_size = target->scsi_host->can_queue;
319 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
320 fmr_param.cache = 1;
52ede08f
BVA
321 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
322 fmr_param.page_shift = ilog2(dev->mr_page_size);
d1b4289e
BVA
323 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
324 IB_ACCESS_REMOTE_WRITE |
325 IB_ACCESS_REMOTE_READ);
326
327 return ib_create_fmr_pool(dev->pd, &fmr_param);
328}
329
5cfb1782
BVA
330/**
331 * srp_destroy_fr_pool() - free the resources owned by a pool
332 * @pool: Fast registration pool to be destroyed.
333 */
334static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
335{
336 int i;
337 struct srp_fr_desc *d;
338
339 if (!pool)
340 return;
341
342 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
343 if (d->frpl)
344 ib_free_fast_reg_page_list(d->frpl);
345 if (d->mr)
346 ib_dereg_mr(d->mr);
347 }
348 kfree(pool);
349}
350
351/**
352 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
353 * @device: IB device to allocate fast registration descriptors for.
354 * @pd: Protection domain associated with the FR descriptors.
355 * @pool_size: Number of descriptors to allocate.
356 * @max_page_list_len: Maximum fast registration work request page list length.
357 */
358static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
359 struct ib_pd *pd, int pool_size,
360 int max_page_list_len)
361{
362 struct srp_fr_pool *pool;
363 struct srp_fr_desc *d;
364 struct ib_mr *mr;
365 struct ib_fast_reg_page_list *frpl;
366 int i, ret = -EINVAL;
367
368 if (pool_size <= 0)
369 goto err;
370 ret = -ENOMEM;
371 pool = kzalloc(sizeof(struct srp_fr_pool) +
372 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
373 if (!pool)
374 goto err;
375 pool->size = pool_size;
376 pool->max_page_list_len = max_page_list_len;
377 spin_lock_init(&pool->lock);
378 INIT_LIST_HEAD(&pool->free_list);
379
380 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
381 mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
382 if (IS_ERR(mr)) {
383 ret = PTR_ERR(mr);
384 goto destroy_pool;
385 }
386 d->mr = mr;
387 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
388 if (IS_ERR(frpl)) {
389 ret = PTR_ERR(frpl);
390 goto destroy_pool;
391 }
392 d->frpl = frpl;
393 list_add_tail(&d->entry, &pool->free_list);
394 }
395
396out:
397 return pool;
398
399destroy_pool:
400 srp_destroy_fr_pool(pool);
401
402err:
403 pool = ERR_PTR(ret);
404 goto out;
405}
406
407/**
408 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
409 * @pool: Pool to obtain descriptor from.
410 */
411static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
412{
413 struct srp_fr_desc *d = NULL;
414 unsigned long flags;
415
416 spin_lock_irqsave(&pool->lock, flags);
417 if (!list_empty(&pool->free_list)) {
418 d = list_first_entry(&pool->free_list, typeof(*d), entry);
419 list_del(&d->entry);
420 }
421 spin_unlock_irqrestore(&pool->lock, flags);
422
423 return d;
424}
425
426/**
427 * srp_fr_pool_put() - put an FR descriptor back in the free list
428 * @pool: Pool the descriptor was allocated from.
429 * @desc: Pointer to an array of fast registration descriptor pointers.
430 * @n: Number of descriptors to put back.
431 *
432 * Note: The caller must already have queued an invalidation request for
433 * desc->mr->rkey before calling this function.
434 */
435static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
436 int n)
437{
438 unsigned long flags;
439 int i;
440
441 spin_lock_irqsave(&pool->lock, flags);
442 for (i = 0; i < n; i++)
443 list_add(&desc[i]->entry, &pool->free_list);
444 spin_unlock_irqrestore(&pool->lock, flags);
445}
446
447static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
448{
449 struct srp_device *dev = target->srp_host->srp_dev;
450
451 return srp_create_fr_pool(dev->dev, dev->pd,
452 target->scsi_host->can_queue,
453 dev->max_pages_per_mr);
454}
455
7dad6b2e
BVA
456/**
457 * srp_destroy_qp() - destroy an RDMA queue pair
458 * @ch: SRP RDMA channel.
459 *
460 * Change a queue pair into the error state and wait until all receive
461 * completions have been processed before destroying it. This avoids that
462 * the receive completion handler can access the queue pair while it is
463 * being destroyed.
464 */
465static void srp_destroy_qp(struct srp_rdma_ch *ch)
466{
467 struct srp_target_port *target = ch->target;
468 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
469 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
470 struct ib_recv_wr *bad_wr;
471 int ret;
472
473 /* Destroying a QP and reusing ch->done is only safe if not connected */
474 WARN_ON_ONCE(target->connected);
475
476 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
477 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
478 if (ret)
479 goto out;
480
481 init_completion(&ch->done);
482 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
483 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
484 if (ret == 0)
485 wait_for_completion(&ch->done);
486
487out:
488 ib_destroy_qp(ch->qp);
489}
490
509c07bc 491static int srp_create_ch_ib(struct srp_rdma_ch *ch)
aef9ec39 492{
509c07bc 493 struct srp_target_port *target = ch->target;
62154b2e 494 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39 495 struct ib_qp_init_attr *init_attr;
73aa89ed
IR
496 struct ib_cq *recv_cq, *send_cq;
497 struct ib_qp *qp;
d1b4289e 498 struct ib_fmr_pool *fmr_pool = NULL;
5cfb1782
BVA
499 struct srp_fr_pool *fr_pool = NULL;
500 const int m = 1 + dev->use_fast_reg;
aef9ec39
RD
501 int ret;
502
503 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
504 if (!init_attr)
505 return -ENOMEM;
506
7dad6b2e 507 /* + 1 for SRP_LAST_WR_ID */
509c07bc 508 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
7dad6b2e 509 target->queue_size + 1, ch->comp_vector);
73aa89ed
IR
510 if (IS_ERR(recv_cq)) {
511 ret = PTR_ERR(recv_cq);
da9d2f07 512 goto err;
aef9ec39
RD
513 }
514
509c07bc
BVA
515 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
516 m * target->queue_size, ch->comp_vector);
73aa89ed
IR
517 if (IS_ERR(send_cq)) {
518 ret = PTR_ERR(send_cq);
da9d2f07 519 goto err_recv_cq;
9c03dc9f
BVA
520 }
521
73aa89ed 522 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
aef9ec39
RD
523
524 init_attr->event_handler = srp_qp_event;
5cfb1782 525 init_attr->cap.max_send_wr = m * target->queue_size;
7dad6b2e 526 init_attr->cap.max_recv_wr = target->queue_size + 1;
aef9ec39
RD
527 init_attr->cap.max_recv_sge = 1;
528 init_attr->cap.max_send_sge = 1;
5cfb1782 529 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
aef9ec39 530 init_attr->qp_type = IB_QPT_RC;
73aa89ed
IR
531 init_attr->send_cq = send_cq;
532 init_attr->recv_cq = recv_cq;
aef9ec39 533
62154b2e 534 qp = ib_create_qp(dev->pd, init_attr);
73aa89ed
IR
535 if (IS_ERR(qp)) {
536 ret = PTR_ERR(qp);
da9d2f07 537 goto err_send_cq;
aef9ec39
RD
538 }
539
73aa89ed 540 ret = srp_init_qp(target, qp);
da9d2f07
RD
541 if (ret)
542 goto err_qp;
aef9ec39 543
5cfb1782
BVA
544 if (dev->use_fast_reg && dev->has_fr) {
545 fr_pool = srp_alloc_fr_pool(target);
546 if (IS_ERR(fr_pool)) {
547 ret = PTR_ERR(fr_pool);
548 shost_printk(KERN_WARNING, target->scsi_host, PFX
549 "FR pool allocation failed (%d)\n", ret);
550 goto err_qp;
551 }
509c07bc
BVA
552 if (ch->fr_pool)
553 srp_destroy_fr_pool(ch->fr_pool);
554 ch->fr_pool = fr_pool;
5cfb1782 555 } else if (!dev->use_fast_reg && dev->has_fmr) {
d1b4289e
BVA
556 fmr_pool = srp_alloc_fmr_pool(target);
557 if (IS_ERR(fmr_pool)) {
558 ret = PTR_ERR(fmr_pool);
559 shost_printk(KERN_WARNING, target->scsi_host, PFX
560 "FMR pool allocation failed (%d)\n", ret);
561 goto err_qp;
562 }
509c07bc
BVA
563 if (ch->fmr_pool)
564 ib_destroy_fmr_pool(ch->fmr_pool);
565 ch->fmr_pool = fmr_pool;
d1b4289e
BVA
566 }
567
509c07bc 568 if (ch->qp)
7dad6b2e 569 srp_destroy_qp(ch);
509c07bc
BVA
570 if (ch->recv_cq)
571 ib_destroy_cq(ch->recv_cq);
572 if (ch->send_cq)
573 ib_destroy_cq(ch->send_cq);
73aa89ed 574
509c07bc
BVA
575 ch->qp = qp;
576 ch->recv_cq = recv_cq;
577 ch->send_cq = send_cq;
73aa89ed 578
da9d2f07
RD
579 kfree(init_attr);
580 return 0;
581
582err_qp:
73aa89ed 583 ib_destroy_qp(qp);
da9d2f07
RD
584
585err_send_cq:
73aa89ed 586 ib_destroy_cq(send_cq);
da9d2f07
RD
587
588err_recv_cq:
73aa89ed 589 ib_destroy_cq(recv_cq);
da9d2f07
RD
590
591err:
aef9ec39
RD
592 kfree(init_attr);
593 return ret;
594}
595
4d73f95f
BVA
596/*
597 * Note: this function may be called without srp_alloc_iu_bufs() having been
509c07bc 598 * invoked. Hence the ch->[rt]x_ring checks.
4d73f95f 599 */
509c07bc
BVA
600static void srp_free_ch_ib(struct srp_target_port *target,
601 struct srp_rdma_ch *ch)
aef9ec39 602{
5cfb1782 603 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39
RD
604 int i;
605
d92c0da7
BVA
606 if (!ch->target)
607 return;
608
509c07bc
BVA
609 if (ch->cm_id) {
610 ib_destroy_cm_id(ch->cm_id);
611 ch->cm_id = NULL;
394c595e
BVA
612 }
613
d92c0da7
BVA
614 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
615 if (!ch->qp)
616 return;
617
5cfb1782 618 if (dev->use_fast_reg) {
509c07bc
BVA
619 if (ch->fr_pool)
620 srp_destroy_fr_pool(ch->fr_pool);
5cfb1782 621 } else {
509c07bc
BVA
622 if (ch->fmr_pool)
623 ib_destroy_fmr_pool(ch->fmr_pool);
5cfb1782 624 }
7dad6b2e 625 srp_destroy_qp(ch);
509c07bc
BVA
626 ib_destroy_cq(ch->send_cq);
627 ib_destroy_cq(ch->recv_cq);
aef9ec39 628
d92c0da7
BVA
629 /*
630 * Avoid that the SCSI error handler tries to use this channel after
631 * it has been freed. The SCSI error handler can namely continue
632 * trying to perform recovery actions after scsi_remove_host()
633 * returned.
634 */
635 ch->target = NULL;
636
509c07bc
BVA
637 ch->qp = NULL;
638 ch->send_cq = ch->recv_cq = NULL;
73aa89ed 639
509c07bc 640 if (ch->rx_ring) {
4d73f95f 641 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
642 srp_free_iu(target->srp_host, ch->rx_ring[i]);
643 kfree(ch->rx_ring);
644 ch->rx_ring = NULL;
4d73f95f 645 }
509c07bc 646 if (ch->tx_ring) {
4d73f95f 647 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
648 srp_free_iu(target->srp_host, ch->tx_ring[i]);
649 kfree(ch->tx_ring);
650 ch->tx_ring = NULL;
4d73f95f 651 }
aef9ec39
RD
652}
653
654static void srp_path_rec_completion(int status,
655 struct ib_sa_path_rec *pathrec,
509c07bc 656 void *ch_ptr)
aef9ec39 657{
509c07bc
BVA
658 struct srp_rdma_ch *ch = ch_ptr;
659 struct srp_target_port *target = ch->target;
aef9ec39 660
509c07bc 661 ch->status = status;
aef9ec39 662 if (status)
7aa54bd7
DD
663 shost_printk(KERN_ERR, target->scsi_host,
664 PFX "Got failed path rec status %d\n", status);
aef9ec39 665 else
509c07bc
BVA
666 ch->path = *pathrec;
667 complete(&ch->done);
aef9ec39
RD
668}
669
509c07bc 670static int srp_lookup_path(struct srp_rdma_ch *ch)
aef9ec39 671{
509c07bc 672 struct srp_target_port *target = ch->target;
a702adce
BVA
673 int ret;
674
509c07bc
BVA
675 ch->path.numb_path = 1;
676
677 init_completion(&ch->done);
678
679 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
680 target->srp_host->srp_dev->dev,
681 target->srp_host->port,
682 &ch->path,
683 IB_SA_PATH_REC_SERVICE_ID |
684 IB_SA_PATH_REC_DGID |
685 IB_SA_PATH_REC_SGID |
686 IB_SA_PATH_REC_NUMB_PATH |
687 IB_SA_PATH_REC_PKEY,
688 SRP_PATH_REC_TIMEOUT_MS,
689 GFP_KERNEL,
690 srp_path_rec_completion,
691 ch, &ch->path_query);
692 if (ch->path_query_id < 0)
693 return ch->path_query_id;
694
695 ret = wait_for_completion_interruptible(&ch->done);
a702adce
BVA
696 if (ret < 0)
697 return ret;
aef9ec39 698
509c07bc 699 if (ch->status < 0)
7aa54bd7
DD
700 shost_printk(KERN_WARNING, target->scsi_host,
701 PFX "Path record query failed\n");
aef9ec39 702
509c07bc 703 return ch->status;
aef9ec39
RD
704}
705
d92c0da7 706static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
aef9ec39 707{
509c07bc 708 struct srp_target_port *target = ch->target;
aef9ec39
RD
709 struct {
710 struct ib_cm_req_param param;
711 struct srp_login_req priv;
712 } *req = NULL;
713 int status;
714
715 req = kzalloc(sizeof *req, GFP_KERNEL);
716 if (!req)
717 return -ENOMEM;
718
509c07bc 719 req->param.primary_path = &ch->path;
aef9ec39
RD
720 req->param.alternate_path = NULL;
721 req->param.service_id = target->service_id;
509c07bc
BVA
722 req->param.qp_num = ch->qp->qp_num;
723 req->param.qp_type = ch->qp->qp_type;
aef9ec39
RD
724 req->param.private_data = &req->priv;
725 req->param.private_data_len = sizeof req->priv;
726 req->param.flow_control = 1;
727
728 get_random_bytes(&req->param.starting_psn, 4);
729 req->param.starting_psn &= 0xffffff;
730
731 /*
732 * Pick some arbitrary defaults here; we could make these
733 * module parameters if anyone cared about setting them.
734 */
735 req->param.responder_resources = 4;
736 req->param.remote_cm_response_timeout = 20;
737 req->param.local_cm_response_timeout = 20;
7bb312e4 738 req->param.retry_count = target->tl_retry_count;
aef9ec39
RD
739 req->param.rnr_retry_count = 7;
740 req->param.max_cm_retries = 15;
741
742 req->priv.opcode = SRP_LOGIN_REQ;
743 req->priv.tag = 0;
49248644 744 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
aef9ec39
RD
745 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
746 SRP_BUF_FORMAT_INDIRECT);
d92c0da7
BVA
747 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
748 SRP_MULTICHAN_SINGLE);
0c0450db 749 /*
3cd96564 750 * In the published SRP specification (draft rev. 16a), the
0c0450db
R
751 * port identifier format is 8 bytes of ID extension followed
752 * by 8 bytes of GUID. Older drafts put the two halves in the
753 * opposite order, so that the GUID comes first.
754 *
755 * Targets conforming to these obsolete drafts can be
756 * recognized by the I/O Class they report.
757 */
758 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
759 memcpy(req->priv.initiator_port_id,
747fe000 760 &target->sgid.global.interface_id, 8);
0c0450db 761 memcpy(req->priv.initiator_port_id + 8,
01cb9bcb 762 &target->initiator_ext, 8);
0c0450db
R
763 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
764 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
765 } else {
766 memcpy(req->priv.initiator_port_id,
01cb9bcb
IR
767 &target->initiator_ext, 8);
768 memcpy(req->priv.initiator_port_id + 8,
747fe000 769 &target->sgid.global.interface_id, 8);
0c0450db
R
770 memcpy(req->priv.target_port_id, &target->id_ext, 8);
771 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
772 }
773
aef9ec39
RD
774 /*
775 * Topspin/Cisco SRP targets will reject our login unless we
01cb9bcb
IR
776 * zero out the first 8 bytes of our initiator port ID and set
777 * the second 8 bytes to the local node GUID.
aef9ec39 778 */
5d7cbfd6 779 if (srp_target_is_topspin(target)) {
7aa54bd7
DD
780 shost_printk(KERN_DEBUG, target->scsi_host,
781 PFX "Topspin/Cisco initiator port ID workaround "
782 "activated for target GUID %016llx\n",
783 (unsigned long long) be64_to_cpu(target->ioc_guid));
aef9ec39 784 memset(req->priv.initiator_port_id, 0, 8);
01cb9bcb 785 memcpy(req->priv.initiator_port_id + 8,
05321937 786 &target->srp_host->srp_dev->dev->node_guid, 8);
aef9ec39 787 }
aef9ec39 788
509c07bc 789 status = ib_send_cm_req(ch->cm_id, &req->param);
aef9ec39
RD
790
791 kfree(req);
792
793 return status;
794}
795
ef6c49d8
BVA
796static bool srp_queue_remove_work(struct srp_target_port *target)
797{
798 bool changed = false;
799
800 spin_lock_irq(&target->lock);
801 if (target->state != SRP_TARGET_REMOVED) {
802 target->state = SRP_TARGET_REMOVED;
803 changed = true;
804 }
805 spin_unlock_irq(&target->lock);
806
807 if (changed)
bcc05910 808 queue_work(srp_remove_wq, &target->remove_work);
ef6c49d8
BVA
809
810 return changed;
811}
812
294c875a
BVA
813static bool srp_change_conn_state(struct srp_target_port *target,
814 bool connected)
815{
816 bool changed = false;
817
818 spin_lock_irq(&target->lock);
819 if (target->connected != connected) {
820 target->connected = connected;
821 changed = true;
822 }
823 spin_unlock_irq(&target->lock);
824
825 return changed;
826}
827
aef9ec39
RD
828static void srp_disconnect_target(struct srp_target_port *target)
829{
d92c0da7
BVA
830 struct srp_rdma_ch *ch;
831 int i;
509c07bc 832
294c875a
BVA
833 if (srp_change_conn_state(target, false)) {
834 /* XXX should send SRP_I_LOGOUT request */
aef9ec39 835
d92c0da7
BVA
836 for (i = 0; i < target->ch_count; i++) {
837 ch = &target->ch[i];
838 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
839 shost_printk(KERN_DEBUG, target->scsi_host,
840 PFX "Sending CM DREQ failed\n");
841 }
294c875a 842 }
e6581056 843 }
aef9ec39
RD
844}
845
509c07bc
BVA
846static void srp_free_req_data(struct srp_target_port *target,
847 struct srp_rdma_ch *ch)
8f26c9ff 848{
5cfb1782
BVA
849 struct srp_device *dev = target->srp_host->srp_dev;
850 struct ib_device *ibdev = dev->dev;
8f26c9ff
DD
851 struct srp_request *req;
852 int i;
853
d92c0da7 854 if (!ch->target || !ch->req_ring)
4d73f95f
BVA
855 return;
856
857 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 858 req = &ch->req_ring[i];
5cfb1782
BVA
859 if (dev->use_fast_reg)
860 kfree(req->fr_list);
861 else
862 kfree(req->fmr_list);
8f26c9ff 863 kfree(req->map_page);
c07d424d
DD
864 if (req->indirect_dma_addr) {
865 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
866 target->indirect_size,
867 DMA_TO_DEVICE);
868 }
869 kfree(req->indirect_desc);
8f26c9ff 870 }
4d73f95f 871
509c07bc
BVA
872 kfree(ch->req_ring);
873 ch->req_ring = NULL;
8f26c9ff
DD
874}
875
509c07bc 876static int srp_alloc_req_data(struct srp_rdma_ch *ch)
b81d00bd 877{
509c07bc 878 struct srp_target_port *target = ch->target;
b81d00bd
BVA
879 struct srp_device *srp_dev = target->srp_host->srp_dev;
880 struct ib_device *ibdev = srp_dev->dev;
881 struct srp_request *req;
5cfb1782 882 void *mr_list;
b81d00bd
BVA
883 dma_addr_t dma_addr;
884 int i, ret = -ENOMEM;
885
509c07bc
BVA
886 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
887 GFP_KERNEL);
888 if (!ch->req_ring)
4d73f95f
BVA
889 goto out;
890
891 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 892 req = &ch->req_ring[i];
5cfb1782
BVA
893 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
894 GFP_KERNEL);
895 if (!mr_list)
896 goto out;
897 if (srp_dev->use_fast_reg)
898 req->fr_list = mr_list;
899 else
900 req->fmr_list = mr_list;
52ede08f 901 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
d1b4289e 902 sizeof(void *), GFP_KERNEL);
5cfb1782
BVA
903 if (!req->map_page)
904 goto out;
b81d00bd 905 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
5cfb1782 906 if (!req->indirect_desc)
b81d00bd
BVA
907 goto out;
908
909 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
910 target->indirect_size,
911 DMA_TO_DEVICE);
912 if (ib_dma_mapping_error(ibdev, dma_addr))
913 goto out;
914
915 req->indirect_dma_addr = dma_addr;
b81d00bd
BVA
916 }
917 ret = 0;
918
919out:
920 return ret;
921}
922
683b159a
BVA
923/**
924 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
925 * @shost: SCSI host whose attributes to remove from sysfs.
926 *
927 * Note: Any attributes defined in the host template and that did not exist
928 * before invocation of this function will be ignored.
929 */
930static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
931{
932 struct device_attribute **attr;
933
934 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
935 device_remove_file(&shost->shost_dev, *attr);
936}
937
ee12d6a8
BVA
938static void srp_remove_target(struct srp_target_port *target)
939{
d92c0da7
BVA
940 struct srp_rdma_ch *ch;
941 int i;
509c07bc 942
ef6c49d8
BVA
943 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
944
ee12d6a8 945 srp_del_scsi_host_attr(target->scsi_host);
9dd69a60 946 srp_rport_get(target->rport);
ee12d6a8
BVA
947 srp_remove_host(target->scsi_host);
948 scsi_remove_host(target->scsi_host);
93079162 949 srp_stop_rport_timers(target->rport);
ef6c49d8 950 srp_disconnect_target(target);
d92c0da7
BVA
951 for (i = 0; i < target->ch_count; i++) {
952 ch = &target->ch[i];
953 srp_free_ch_ib(target, ch);
954 }
c1120f89 955 cancel_work_sync(&target->tl_err_work);
9dd69a60 956 srp_rport_put(target->rport);
d92c0da7
BVA
957 for (i = 0; i < target->ch_count; i++) {
958 ch = &target->ch[i];
959 srp_free_req_data(target, ch);
960 }
961 kfree(target->ch);
962 target->ch = NULL;
65d7dd2f
VP
963
964 spin_lock(&target->srp_host->target_lock);
965 list_del(&target->list);
966 spin_unlock(&target->srp_host->target_lock);
967
ee12d6a8
BVA
968 scsi_host_put(target->scsi_host);
969}
970
c4028958 971static void srp_remove_work(struct work_struct *work)
aef9ec39 972{
c4028958 973 struct srp_target_port *target =
ef6c49d8 974 container_of(work, struct srp_target_port, remove_work);
aef9ec39 975
ef6c49d8 976 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
aef9ec39 977
96fc248a 978 srp_remove_target(target);
aef9ec39
RD
979}
980
dc1bdbd9
BVA
981static void srp_rport_delete(struct srp_rport *rport)
982{
983 struct srp_target_port *target = rport->lld_data;
984
985 srp_queue_remove_work(target);
986}
987
d92c0da7 988static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
aef9ec39 989{
509c07bc 990 struct srp_target_port *target = ch->target;
aef9ec39
RD
991 int ret;
992
d92c0da7 993 WARN_ON_ONCE(!multich && target->connected);
294c875a 994
948d1e88
BVA
995 target->qp_in_error = false;
996
509c07bc 997 ret = srp_lookup_path(ch);
aef9ec39
RD
998 if (ret)
999 return ret;
1000
1001 while (1) {
509c07bc 1002 init_completion(&ch->done);
d92c0da7 1003 ret = srp_send_req(ch, multich);
aef9ec39
RD
1004 if (ret)
1005 return ret;
509c07bc 1006 ret = wait_for_completion_interruptible(&ch->done);
a702adce
BVA
1007 if (ret < 0)
1008 return ret;
aef9ec39
RD
1009
1010 /*
1011 * The CM event handling code will set status to
1012 * SRP_PORT_REDIRECT if we get a port redirect REJ
1013 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1014 * redirect REJ back.
1015 */
509c07bc 1016 switch (ch->status) {
aef9ec39 1017 case 0:
294c875a 1018 srp_change_conn_state(target, true);
aef9ec39
RD
1019 return 0;
1020
1021 case SRP_PORT_REDIRECT:
509c07bc 1022 ret = srp_lookup_path(ch);
aef9ec39
RD
1023 if (ret)
1024 return ret;
1025 break;
1026
1027 case SRP_DLID_REDIRECT:
1028 break;
1029
9fe4bcf4 1030 case SRP_STALE_CONN:
9fe4bcf4 1031 shost_printk(KERN_ERR, target->scsi_host, PFX
205619f2 1032 "giving up on stale connection\n");
509c07bc
BVA
1033 ch->status = -ECONNRESET;
1034 return ch->status;
9fe4bcf4 1035
aef9ec39 1036 default:
509c07bc 1037 return ch->status;
aef9ec39
RD
1038 }
1039 }
1040}
1041
509c07bc 1042static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
5cfb1782
BVA
1043{
1044 struct ib_send_wr *bad_wr;
1045 struct ib_send_wr wr = {
1046 .opcode = IB_WR_LOCAL_INV,
1047 .wr_id = LOCAL_INV_WR_ID_MASK,
1048 .next = NULL,
1049 .num_sge = 0,
1050 .send_flags = 0,
1051 .ex.invalidate_rkey = rkey,
1052 };
1053
509c07bc 1054 return ib_post_send(ch->qp, &wr, &bad_wr);
5cfb1782
BVA
1055}
1056
d945e1df 1057static void srp_unmap_data(struct scsi_cmnd *scmnd,
509c07bc 1058 struct srp_rdma_ch *ch,
d945e1df
RD
1059 struct srp_request *req)
1060{
509c07bc 1061 struct srp_target_port *target = ch->target;
5cfb1782
BVA
1062 struct srp_device *dev = target->srp_host->srp_dev;
1063 struct ib_device *ibdev = dev->dev;
1064 int i, res;
8f26c9ff 1065
bb350d1d 1066 if (!scsi_sglist(scmnd) ||
d945e1df
RD
1067 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1068 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1069 return;
1070
5cfb1782
BVA
1071 if (dev->use_fast_reg) {
1072 struct srp_fr_desc **pfr;
1073
1074 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
509c07bc 1075 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
5cfb1782
BVA
1076 if (res < 0) {
1077 shost_printk(KERN_ERR, target->scsi_host, PFX
1078 "Queueing INV WR for rkey %#x failed (%d)\n",
1079 (*pfr)->mr->rkey, res);
1080 queue_work(system_long_wq,
1081 &target->tl_err_work);
1082 }
1083 }
1084 if (req->nmdesc)
509c07bc 1085 srp_fr_pool_put(ch->fr_pool, req->fr_list,
5cfb1782
BVA
1086 req->nmdesc);
1087 } else {
1088 struct ib_pool_fmr **pfmr;
1089
1090 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1091 ib_fmr_pool_unmap(*pfmr);
1092 }
f5358a17 1093
8f26c9ff
DD
1094 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1095 scmnd->sc_data_direction);
d945e1df
RD
1096}
1097
22032991
BVA
1098/**
1099 * srp_claim_req - Take ownership of the scmnd associated with a request.
509c07bc 1100 * @ch: SRP RDMA channel.
22032991 1101 * @req: SRP request.
b3fe628d 1102 * @sdev: If not NULL, only take ownership for this SCSI device.
22032991
BVA
1103 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1104 * ownership of @req->scmnd if it equals @scmnd.
1105 *
1106 * Return value:
1107 * Either NULL or a pointer to the SCSI command the caller became owner of.
1108 */
509c07bc 1109static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
22032991 1110 struct srp_request *req,
b3fe628d 1111 struct scsi_device *sdev,
22032991
BVA
1112 struct scsi_cmnd *scmnd)
1113{
1114 unsigned long flags;
1115
509c07bc 1116 spin_lock_irqsave(&ch->lock, flags);
b3fe628d
BVA
1117 if (req->scmnd &&
1118 (!sdev || req->scmnd->device == sdev) &&
1119 (!scmnd || req->scmnd == scmnd)) {
22032991
BVA
1120 scmnd = req->scmnd;
1121 req->scmnd = NULL;
22032991
BVA
1122 } else {
1123 scmnd = NULL;
1124 }
509c07bc 1125 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1126
1127 return scmnd;
1128}
1129
1130/**
1131 * srp_free_req() - Unmap data and add request to the free request list.
509c07bc 1132 * @ch: SRP RDMA channel.
af24663b
BVA
1133 * @req: Request to be freed.
1134 * @scmnd: SCSI command associated with @req.
1135 * @req_lim_delta: Amount to be added to @target->req_lim.
22032991 1136 */
509c07bc
BVA
1137static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1138 struct scsi_cmnd *scmnd, s32 req_lim_delta)
526b4caa 1139{
94a9174c
BVA
1140 unsigned long flags;
1141
509c07bc 1142 srp_unmap_data(scmnd, ch, req);
22032991 1143
509c07bc
BVA
1144 spin_lock_irqsave(&ch->lock, flags);
1145 ch->req_lim += req_lim_delta;
509c07bc 1146 spin_unlock_irqrestore(&ch->lock, flags);
526b4caa
IR
1147}
1148
509c07bc
BVA
1149static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1150 struct scsi_device *sdev, int result)
526b4caa 1151{
509c07bc 1152 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
22032991
BVA
1153
1154 if (scmnd) {
509c07bc 1155 srp_free_req(ch, req, scmnd, 0);
ed9b2264 1156 scmnd->result = result;
22032991 1157 scmnd->scsi_done(scmnd);
22032991 1158 }
526b4caa
IR
1159}
1160
ed9b2264 1161static void srp_terminate_io(struct srp_rport *rport)
aef9ec39 1162{
ed9b2264 1163 struct srp_target_port *target = rport->lld_data;
d92c0da7 1164 struct srp_rdma_ch *ch;
b3fe628d
BVA
1165 struct Scsi_Host *shost = target->scsi_host;
1166 struct scsi_device *sdev;
d92c0da7 1167 int i, j;
ed9b2264 1168
b3fe628d
BVA
1169 /*
1170 * Invoking srp_terminate_io() while srp_queuecommand() is running
1171 * is not safe. Hence the warning statement below.
1172 */
1173 shost_for_each_device(sdev, shost)
1174 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1175
d92c0da7
BVA
1176 for (i = 0; i < target->ch_count; i++) {
1177 ch = &target->ch[i];
509c07bc 1178
d92c0da7
BVA
1179 for (j = 0; j < target->req_ring_size; ++j) {
1180 struct srp_request *req = &ch->req_ring[j];
1181
1182 srp_finish_req(ch, req, NULL,
1183 DID_TRANSPORT_FAILFAST << 16);
1184 }
ed9b2264
BVA
1185 }
1186}
aef9ec39 1187
ed9b2264
BVA
1188/*
1189 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1190 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1191 * srp_reset_device() or srp_reset_host() calls will occur while this function
1192 * is in progress. One way to realize that is not to call this function
1193 * directly but to call srp_reconnect_rport() instead since that last function
1194 * serializes calls of this function via rport->mutex and also blocks
1195 * srp_queuecommand() calls before invoking this function.
1196 */
1197static int srp_rport_reconnect(struct srp_rport *rport)
1198{
1199 struct srp_target_port *target = rport->lld_data;
d92c0da7
BVA
1200 struct srp_rdma_ch *ch;
1201 int i, j, ret = 0;
1202 bool multich = false;
09be70a2 1203
aef9ec39 1204 srp_disconnect_target(target);
34aa654e
BVA
1205
1206 if (target->state == SRP_TARGET_SCANNING)
1207 return -ENODEV;
1208
aef9ec39 1209 /*
c7c4e7ff
BVA
1210 * Now get a new local CM ID so that we avoid confusing the target in
1211 * case things are really fouled up. Doing so also ensures that all CM
1212 * callbacks will have finished before a new QP is allocated.
aef9ec39 1213 */
d92c0da7
BVA
1214 for (i = 0; i < target->ch_count; i++) {
1215 ch = &target->ch[i];
1216 if (!ch->target)
1217 break;
1218 ret += srp_new_cm_id(ch);
536ae14e 1219 }
d92c0da7
BVA
1220 for (i = 0; i < target->ch_count; i++) {
1221 ch = &target->ch[i];
1222 if (!ch->target)
1223 break;
1224 for (j = 0; j < target->req_ring_size; ++j) {
1225 struct srp_request *req = &ch->req_ring[j];
aef9ec39 1226
d92c0da7
BVA
1227 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1228 }
1229 }
1230 for (i = 0; i < target->ch_count; i++) {
1231 ch = &target->ch[i];
1232 if (!ch->target)
1233 break;
1234 /*
1235 * Whether or not creating a new CM ID succeeded, create a new
1236 * QP. This guarantees that all completion callback function
1237 * invocations have finished before request resetting starts.
1238 */
1239 ret += srp_create_ch_ib(ch);
aef9ec39 1240
d92c0da7
BVA
1241 INIT_LIST_HEAD(&ch->free_tx);
1242 for (j = 0; j < target->queue_size; ++j)
1243 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1244 }
1245 for (i = 0; i < target->ch_count; i++) {
1246 ch = &target->ch[i];
1247 if (ret || !ch->target) {
1248 if (i > 1)
1249 ret = 0;
1250 break;
1251 }
1252 ret = srp_connect_ch(ch, multich);
1253 multich = true;
1254 }
09be70a2 1255
ed9b2264
BVA
1256 if (ret == 0)
1257 shost_printk(KERN_INFO, target->scsi_host,
1258 PFX "reconnect succeeded\n");
aef9ec39
RD
1259
1260 return ret;
1261}
1262
8f26c9ff
DD
1263static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1264 unsigned int dma_len, u32 rkey)
f5358a17 1265{
8f26c9ff 1266 struct srp_direct_buf *desc = state->desc;
f5358a17 1267
8f26c9ff
DD
1268 desc->va = cpu_to_be64(dma_addr);
1269 desc->key = cpu_to_be32(rkey);
1270 desc->len = cpu_to_be32(dma_len);
f5358a17 1271
8f26c9ff
DD
1272 state->total_len += dma_len;
1273 state->desc++;
1274 state->ndesc++;
1275}
559ce8f1 1276
8f26c9ff 1277static int srp_map_finish_fmr(struct srp_map_state *state,
509c07bc 1278 struct srp_rdma_ch *ch)
8f26c9ff 1279{
8f26c9ff
DD
1280 struct ib_pool_fmr *fmr;
1281 u64 io_addr = 0;
85507bcc 1282
509c07bc 1283 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
8f26c9ff
DD
1284 state->npages, io_addr);
1285 if (IS_ERR(fmr))
1286 return PTR_ERR(fmr);
f5358a17 1287
8f26c9ff 1288 *state->next_fmr++ = fmr;
52ede08f 1289 state->nmdesc++;
f5358a17 1290
52ede08f 1291 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
539dde6f 1292
8f26c9ff
DD
1293 return 0;
1294}
1295
5cfb1782 1296static int srp_map_finish_fr(struct srp_map_state *state,
509c07bc 1297 struct srp_rdma_ch *ch)
5cfb1782 1298{
509c07bc 1299 struct srp_target_port *target = ch->target;
5cfb1782
BVA
1300 struct srp_device *dev = target->srp_host->srp_dev;
1301 struct ib_send_wr *bad_wr;
1302 struct ib_send_wr wr;
1303 struct srp_fr_desc *desc;
1304 u32 rkey;
1305
509c07bc 1306 desc = srp_fr_pool_get(ch->fr_pool);
5cfb1782
BVA
1307 if (!desc)
1308 return -ENOMEM;
1309
1310 rkey = ib_inc_rkey(desc->mr->rkey);
1311 ib_update_fast_reg_key(desc->mr, rkey);
1312
1313 memcpy(desc->frpl->page_list, state->pages,
1314 sizeof(state->pages[0]) * state->npages);
1315
1316 memset(&wr, 0, sizeof(wr));
1317 wr.opcode = IB_WR_FAST_REG_MR;
1318 wr.wr_id = FAST_REG_WR_ID_MASK;
1319 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1320 wr.wr.fast_reg.page_list = desc->frpl;
1321 wr.wr.fast_reg.page_list_len = state->npages;
1322 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1323 wr.wr.fast_reg.length = state->dma_len;
1324 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1325 IB_ACCESS_REMOTE_READ |
1326 IB_ACCESS_REMOTE_WRITE);
1327 wr.wr.fast_reg.rkey = desc->mr->lkey;
1328
1329 *state->next_fr++ = desc;
1330 state->nmdesc++;
1331
1332 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1333 desc->mr->rkey);
1334
509c07bc 1335 return ib_post_send(ch->qp, &wr, &bad_wr);
5cfb1782
BVA
1336}
1337
539dde6f 1338static int srp_finish_mapping(struct srp_map_state *state,
509c07bc 1339 struct srp_rdma_ch *ch)
539dde6f 1340{
509c07bc 1341 struct srp_target_port *target = ch->target;
539dde6f
BVA
1342 int ret = 0;
1343
1344 if (state->npages == 0)
1345 return 0;
1346
b1b8854d 1347 if (state->npages == 1 && !register_always)
52ede08f 1348 srp_map_desc(state, state->base_dma_addr, state->dma_len,
539dde6f
BVA
1349 target->rkey);
1350 else
5cfb1782 1351 ret = target->srp_host->srp_dev->use_fast_reg ?
509c07bc
BVA
1352 srp_map_finish_fr(state, ch) :
1353 srp_map_finish_fmr(state, ch);
539dde6f
BVA
1354
1355 if (ret == 0) {
1356 state->npages = 0;
52ede08f 1357 state->dma_len = 0;
539dde6f
BVA
1358 }
1359
1360 return ret;
1361}
1362
8f26c9ff
DD
1363static void srp_map_update_start(struct srp_map_state *state,
1364 struct scatterlist *sg, int sg_index,
1365 dma_addr_t dma_addr)
1366{
1367 state->unmapped_sg = sg;
1368 state->unmapped_index = sg_index;
1369 state->unmapped_addr = dma_addr;
1370}
85507bcc 1371
8f26c9ff 1372static int srp_map_sg_entry(struct srp_map_state *state,
509c07bc 1373 struct srp_rdma_ch *ch,
8f26c9ff 1374 struct scatterlist *sg, int sg_index,
5cfb1782 1375 bool use_mr)
8f26c9ff 1376{
509c07bc 1377 struct srp_target_port *target = ch->target;
8f26c9ff
DD
1378 struct srp_device *dev = target->srp_host->srp_dev;
1379 struct ib_device *ibdev = dev->dev;
1380 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1381 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1382 unsigned int len;
1383 int ret;
1384
1385 if (!dma_len)
1386 return 0;
1387
5cfb1782
BVA
1388 if (!use_mr) {
1389 /*
1390 * Once we're in direct map mode for a request, we don't
1391 * go back to FMR or FR mode, so no need to update anything
8f26c9ff
DD
1392 * other than the descriptor.
1393 */
1394 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1395 return 0;
85507bcc 1396 }
f5358a17 1397
5cfb1782
BVA
1398 /*
1399 * Since not all RDMA HW drivers support non-zero page offsets for
1400 * FMR, if we start at an offset into a page, don't merge into the
1401 * current FMR mapping. Finish it out, and use the kernel's MR for
1402 * this sg entry.
8f26c9ff 1403 */
5cfb1782
BVA
1404 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
1405 dma_len > dev->mr_max_size) {
509c07bc 1406 ret = srp_finish_mapping(state, ch);
8f26c9ff
DD
1407 if (ret)
1408 return ret;
1409
1410 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1411 srp_map_update_start(state, NULL, 0, 0);
1412 return 0;
f5358a17
RD
1413 }
1414
5cfb1782
BVA
1415 /*
1416 * If this is the first sg that will be mapped via FMR or via FR, save
1417 * our position. We need to know the first unmapped entry, its index,
1418 * and the first unmapped address within that entry to be able to
1419 * restart mapping after an error.
8f26c9ff
DD
1420 */
1421 if (!state->unmapped_sg)
1422 srp_map_update_start(state, sg, sg_index, dma_addr);
f5358a17 1423
8f26c9ff 1424 while (dma_len) {
5cfb1782
BVA
1425 unsigned offset = dma_addr & ~dev->mr_page_mask;
1426 if (state->npages == dev->max_pages_per_mr || offset != 0) {
509c07bc 1427 ret = srp_finish_mapping(state, ch);
8f26c9ff
DD
1428 if (ret)
1429 return ret;
f5358a17 1430
8f26c9ff
DD
1431 srp_map_update_start(state, sg, sg_index, dma_addr);
1432 }
1433
5cfb1782 1434 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
f5358a17 1435
8f26c9ff
DD
1436 if (!state->npages)
1437 state->base_dma_addr = dma_addr;
5cfb1782 1438 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
52ede08f 1439 state->dma_len += len;
8f26c9ff
DD
1440 dma_addr += len;
1441 dma_len -= len;
1442 }
1443
5cfb1782
BVA
1444 /*
1445 * If the last entry of the MR wasn't a full page, then we need to
8f26c9ff
DD
1446 * close it out and start a new one -- we can only merge at page
1447 * boundries.
1448 */
1449 ret = 0;
52ede08f 1450 if (len != dev->mr_page_size) {
509c07bc 1451 ret = srp_finish_mapping(state, ch);
8f26c9ff
DD
1452 if (!ret)
1453 srp_map_update_start(state, NULL, 0, 0);
1454 }
f5358a17
RD
1455 return ret;
1456}
1457
509c07bc
BVA
1458static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1459 struct srp_request *req, struct scatterlist *scat,
1460 int count)
76bc1e1d 1461{
509c07bc 1462 struct srp_target_port *target = ch->target;
76bc1e1d
BVA
1463 struct srp_device *dev = target->srp_host->srp_dev;
1464 struct ib_device *ibdev = dev->dev;
1465 struct scatterlist *sg;
5cfb1782
BVA
1466 int i;
1467 bool use_mr;
76bc1e1d
BVA
1468
1469 state->desc = req->indirect_desc;
1470 state->pages = req->map_page;
5cfb1782
BVA
1471 if (dev->use_fast_reg) {
1472 state->next_fr = req->fr_list;
509c07bc 1473 use_mr = !!ch->fr_pool;
5cfb1782
BVA
1474 } else {
1475 state->next_fmr = req->fmr_list;
509c07bc 1476 use_mr = !!ch->fmr_pool;
5cfb1782 1477 }
76bc1e1d
BVA
1478
1479 for_each_sg(scat, sg, count, i) {
509c07bc 1480 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
5cfb1782
BVA
1481 /*
1482 * Memory registration failed, so backtrack to the
1483 * first unmapped entry and continue on without using
1484 * memory registration.
76bc1e1d
BVA
1485 */
1486 dma_addr_t dma_addr;
1487 unsigned int dma_len;
1488
1489backtrack:
1490 sg = state->unmapped_sg;
1491 i = state->unmapped_index;
1492
1493 dma_addr = ib_sg_dma_address(ibdev, sg);
1494 dma_len = ib_sg_dma_len(ibdev, sg);
1495 dma_len -= (state->unmapped_addr - dma_addr);
1496 dma_addr = state->unmapped_addr;
5cfb1782 1497 use_mr = false;
76bc1e1d
BVA
1498 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1499 }
1500 }
1501
509c07bc 1502 if (use_mr && srp_finish_mapping(state, ch))
76bc1e1d
BVA
1503 goto backtrack;
1504
52ede08f 1505 req->nmdesc = state->nmdesc;
5cfb1782
BVA
1506
1507 return 0;
76bc1e1d
BVA
1508}
1509
509c07bc 1510static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
aef9ec39
RD
1511 struct srp_request *req)
1512{
509c07bc 1513 struct srp_target_port *target = ch->target;
76bc1e1d 1514 struct scatterlist *scat;
aef9ec39 1515 struct srp_cmd *cmd = req->cmd->buf;
76bc1e1d 1516 int len, nents, count;
85507bcc
RC
1517 struct srp_device *dev;
1518 struct ib_device *ibdev;
8f26c9ff
DD
1519 struct srp_map_state state;
1520 struct srp_indirect_buf *indirect_hdr;
8f26c9ff
DD
1521 u32 table_len;
1522 u8 fmt;
aef9ec39 1523
bb350d1d 1524 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
aef9ec39
RD
1525 return sizeof (struct srp_cmd);
1526
1527 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1528 scmnd->sc_data_direction != DMA_TO_DEVICE) {
7aa54bd7
DD
1529 shost_printk(KERN_WARNING, target->scsi_host,
1530 PFX "Unhandled data direction %d\n",
1531 scmnd->sc_data_direction);
aef9ec39
RD
1532 return -EINVAL;
1533 }
1534
bb350d1d
FT
1535 nents = scsi_sg_count(scmnd);
1536 scat = scsi_sglist(scmnd);
aef9ec39 1537
05321937 1538 dev = target->srp_host->srp_dev;
85507bcc
RC
1539 ibdev = dev->dev;
1540
1541 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
8f26c9ff
DD
1542 if (unlikely(count == 0))
1543 return -EIO;
f5358a17
RD
1544
1545 fmt = SRP_DATA_DESC_DIRECT;
1546 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
aef9ec39 1547
b1b8854d 1548 if (count == 1 && !register_always) {
f5358a17
RD
1549 /*
1550 * The midlayer only generated a single gather/scatter
1551 * entry, or DMA mapping coalesced everything to a
1552 * single entry. So a direct descriptor along with
1553 * the DMA MR suffices.
1554 */
cf368713 1555 struct srp_direct_buf *buf = (void *) cmd->add_data;
aef9ec39 1556
85507bcc 1557 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
9af76271 1558 buf->key = cpu_to_be32(target->rkey);
85507bcc 1559 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
8f26c9ff 1560
52ede08f 1561 req->nmdesc = 0;
8f26c9ff
DD
1562 goto map_complete;
1563 }
1564
5cfb1782
BVA
1565 /*
1566 * We have more than one scatter/gather entry, so build our indirect
1567 * descriptor table, trying to merge as many entries as we can.
8f26c9ff
DD
1568 */
1569 indirect_hdr = (void *) cmd->add_data;
1570
c07d424d
DD
1571 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1572 target->indirect_size, DMA_TO_DEVICE);
1573
8f26c9ff 1574 memset(&state, 0, sizeof(state));
509c07bc 1575 srp_map_sg(&state, ch, req, scat, count);
cf368713 1576
c07d424d
DD
1577 /* We've mapped the request, now pull as much of the indirect
1578 * descriptor table as we can into the command buffer. If this
1579 * target is not using an external indirect table, we are
1580 * guaranteed to fit into the command, as the SCSI layer won't
1581 * give us more S/G entries than we allow.
8f26c9ff 1582 */
8f26c9ff 1583 if (state.ndesc == 1) {
5cfb1782
BVA
1584 /*
1585 * Memory registration collapsed the sg-list into one entry,
8f26c9ff
DD
1586 * so use a direct descriptor.
1587 */
1588 struct srp_direct_buf *buf = (void *) cmd->add_data;
cf368713 1589
c07d424d 1590 *buf = req->indirect_desc[0];
8f26c9ff 1591 goto map_complete;
aef9ec39
RD
1592 }
1593
c07d424d
DD
1594 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1595 !target->allow_ext_sg)) {
1596 shost_printk(KERN_ERR, target->scsi_host,
1597 "Could not fit S/G list into SRP_CMD\n");
1598 return -EIO;
1599 }
1600
1601 count = min(state.ndesc, target->cmd_sg_cnt);
8f26c9ff
DD
1602 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1603
1604 fmt = SRP_DATA_DESC_INDIRECT;
1605 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
c07d424d 1606 len += count * sizeof (struct srp_direct_buf);
8f26c9ff 1607
c07d424d
DD
1608 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1609 count * sizeof (struct srp_direct_buf));
8f26c9ff 1610
c07d424d 1611 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
8f26c9ff
DD
1612 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1613 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1614 indirect_hdr->len = cpu_to_be32(state.total_len);
1615
1616 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
c07d424d 1617 cmd->data_out_desc_cnt = count;
8f26c9ff 1618 else
c07d424d
DD
1619 cmd->data_in_desc_cnt = count;
1620
1621 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1622 DMA_TO_DEVICE);
8f26c9ff
DD
1623
1624map_complete:
aef9ec39
RD
1625 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1626 cmd->buf_fmt = fmt << 4;
1627 else
1628 cmd->buf_fmt = fmt;
1629
aef9ec39
RD
1630 return len;
1631}
1632
76c75b25
BVA
1633/*
1634 * Return an IU and possible credit to the free pool
1635 */
509c07bc 1636static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
76c75b25
BVA
1637 enum srp_iu_type iu_type)
1638{
1639 unsigned long flags;
1640
509c07bc
BVA
1641 spin_lock_irqsave(&ch->lock, flags);
1642 list_add(&iu->list, &ch->free_tx);
76c75b25 1643 if (iu_type != SRP_IU_RSP)
509c07bc
BVA
1644 ++ch->req_lim;
1645 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25
BVA
1646}
1647
05a1d750 1648/*
509c07bc 1649 * Must be called with ch->lock held to protect req_lim and free_tx.
e9684678 1650 * If IU is not sent, it must be returned using srp_put_tx_iu().
05a1d750
DD
1651 *
1652 * Note:
1653 * An upper limit for the number of allocated information units for each
1654 * request type is:
1655 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1656 * more than Scsi_Host.can_queue requests.
1657 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1658 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1659 * one unanswered SRP request to an initiator.
1660 */
509c07bc 1661static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
05a1d750
DD
1662 enum srp_iu_type iu_type)
1663{
509c07bc 1664 struct srp_target_port *target = ch->target;
05a1d750
DD
1665 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1666 struct srp_iu *iu;
1667
509c07bc 1668 srp_send_completion(ch->send_cq, ch);
05a1d750 1669
509c07bc 1670 if (list_empty(&ch->free_tx))
05a1d750
DD
1671 return NULL;
1672
1673 /* Initiator responses to target requests do not consume credits */
76c75b25 1674 if (iu_type != SRP_IU_RSP) {
509c07bc 1675 if (ch->req_lim <= rsv) {
76c75b25
BVA
1676 ++target->zero_req_lim;
1677 return NULL;
1678 }
1679
509c07bc 1680 --ch->req_lim;
05a1d750
DD
1681 }
1682
509c07bc 1683 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
76c75b25 1684 list_del(&iu->list);
05a1d750
DD
1685 return iu;
1686}
1687
509c07bc 1688static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
05a1d750 1689{
509c07bc 1690 struct srp_target_port *target = ch->target;
05a1d750
DD
1691 struct ib_sge list;
1692 struct ib_send_wr wr, *bad_wr;
05a1d750
DD
1693
1694 list.addr = iu->dma;
1695 list.length = len;
9af76271 1696 list.lkey = target->lkey;
05a1d750
DD
1697
1698 wr.next = NULL;
dcb4cb85 1699 wr.wr_id = (uintptr_t) iu;
05a1d750
DD
1700 wr.sg_list = &list;
1701 wr.num_sge = 1;
1702 wr.opcode = IB_WR_SEND;
1703 wr.send_flags = IB_SEND_SIGNALED;
1704
509c07bc 1705 return ib_post_send(ch->qp, &wr, &bad_wr);
05a1d750
DD
1706}
1707
509c07bc 1708static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
c996bb47 1709{
509c07bc 1710 struct srp_target_port *target = ch->target;
c996bb47 1711 struct ib_recv_wr wr, *bad_wr;
dcb4cb85 1712 struct ib_sge list;
c996bb47
BVA
1713
1714 list.addr = iu->dma;
1715 list.length = iu->size;
9af76271 1716 list.lkey = target->lkey;
c996bb47
BVA
1717
1718 wr.next = NULL;
dcb4cb85 1719 wr.wr_id = (uintptr_t) iu;
c996bb47
BVA
1720 wr.sg_list = &list;
1721 wr.num_sge = 1;
1722
509c07bc 1723 return ib_post_recv(ch->qp, &wr, &bad_wr);
c996bb47
BVA
1724}
1725
509c07bc 1726static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
aef9ec39 1727{
509c07bc 1728 struct srp_target_port *target = ch->target;
aef9ec39
RD
1729 struct srp_request *req;
1730 struct scsi_cmnd *scmnd;
1731 unsigned long flags;
aef9ec39 1732
aef9ec39 1733 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
509c07bc
BVA
1734 spin_lock_irqsave(&ch->lock, flags);
1735 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1736 spin_unlock_irqrestore(&ch->lock, flags);
94a9174c 1737
509c07bc 1738 ch->tsk_mgmt_status = -1;
f8b6e31e 1739 if (be32_to_cpu(rsp->resp_data_len) >= 4)
509c07bc
BVA
1740 ch->tsk_mgmt_status = rsp->data[3];
1741 complete(&ch->tsk_mgmt_done);
aef9ec39 1742 } else {
77f2c1a4
BVA
1743 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1744 if (scmnd) {
1745 req = (void *)scmnd->host_scribble;
1746 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1747 }
22032991 1748 if (!scmnd) {
7aa54bd7 1749 shost_printk(KERN_ERR, target->scsi_host,
d92c0da7
BVA
1750 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1751 rsp->tag, ch - target->ch, ch->qp->qp_num);
22032991 1752
509c07bc
BVA
1753 spin_lock_irqsave(&ch->lock, flags);
1754 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1755 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1756
1757 return;
1758 }
aef9ec39
RD
1759 scmnd->result = rsp->status;
1760
1761 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1762 memcpy(scmnd->sense_buffer, rsp->data +
1763 be32_to_cpu(rsp->resp_data_len),
1764 min_t(int, be32_to_cpu(rsp->sense_data_len),
1765 SCSI_SENSE_BUFFERSIZE));
1766 }
1767
e714531a 1768 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
bb350d1d 1769 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
e714531a
BVA
1770 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1771 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1772 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1773 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1774 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1775 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
aef9ec39 1776
509c07bc 1777 srp_free_req(ch, req, scmnd,
22032991
BVA
1778 be32_to_cpu(rsp->req_lim_delta));
1779
f8b6e31e
DD
1780 scmnd->host_scribble = NULL;
1781 scmnd->scsi_done(scmnd);
aef9ec39 1782 }
aef9ec39
RD
1783}
1784
509c07bc 1785static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
bb12588a
DD
1786 void *rsp, int len)
1787{
509c07bc 1788 struct srp_target_port *target = ch->target;
76c75b25 1789 struct ib_device *dev = target->srp_host->srp_dev->dev;
bb12588a
DD
1790 unsigned long flags;
1791 struct srp_iu *iu;
76c75b25 1792 int err;
bb12588a 1793
509c07bc
BVA
1794 spin_lock_irqsave(&ch->lock, flags);
1795 ch->req_lim += req_delta;
1796 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1797 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25 1798
bb12588a
DD
1799 if (!iu) {
1800 shost_printk(KERN_ERR, target->scsi_host, PFX
1801 "no IU available to send response\n");
76c75b25 1802 return 1;
bb12588a
DD
1803 }
1804
1805 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1806 memcpy(iu->buf, rsp, len);
1807 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1808
509c07bc 1809 err = srp_post_send(ch, iu, len);
76c75b25 1810 if (err) {
bb12588a
DD
1811 shost_printk(KERN_ERR, target->scsi_host, PFX
1812 "unable to post response: %d\n", err);
509c07bc 1813 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
76c75b25 1814 }
bb12588a 1815
bb12588a
DD
1816 return err;
1817}
1818
509c07bc 1819static void srp_process_cred_req(struct srp_rdma_ch *ch,
bb12588a
DD
1820 struct srp_cred_req *req)
1821{
1822 struct srp_cred_rsp rsp = {
1823 .opcode = SRP_CRED_RSP,
1824 .tag = req->tag,
1825 };
1826 s32 delta = be32_to_cpu(req->req_lim_delta);
1827
509c07bc
BVA
1828 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1829 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
bb12588a
DD
1830 "problems processing SRP_CRED_REQ\n");
1831}
1832
509c07bc 1833static void srp_process_aer_req(struct srp_rdma_ch *ch,
bb12588a
DD
1834 struct srp_aer_req *req)
1835{
509c07bc 1836 struct srp_target_port *target = ch->target;
bb12588a
DD
1837 struct srp_aer_rsp rsp = {
1838 .opcode = SRP_AER_RSP,
1839 .tag = req->tag,
1840 };
1841 s32 delta = be32_to_cpu(req->req_lim_delta);
1842
1843 shost_printk(KERN_ERR, target->scsi_host, PFX
1844 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1845
509c07bc 1846 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
bb12588a
DD
1847 shost_printk(KERN_ERR, target->scsi_host, PFX
1848 "problems processing SRP_AER_REQ\n");
1849}
1850
509c07bc 1851static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
aef9ec39 1852{
509c07bc 1853 struct srp_target_port *target = ch->target;
dcb4cb85 1854 struct ib_device *dev = target->srp_host->srp_dev->dev;
737b94eb 1855 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
c996bb47 1856 int res;
aef9ec39
RD
1857 u8 opcode;
1858
509c07bc 1859 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 1860 DMA_FROM_DEVICE);
aef9ec39
RD
1861
1862 opcode = *(u8 *) iu->buf;
1863
1864 if (0) {
7aa54bd7
DD
1865 shost_printk(KERN_ERR, target->scsi_host,
1866 PFX "recv completion, opcode 0x%02x\n", opcode);
7a700811
BVA
1867 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1868 iu->buf, wc->byte_len, true);
aef9ec39
RD
1869 }
1870
1871 switch (opcode) {
1872 case SRP_RSP:
509c07bc 1873 srp_process_rsp(ch, iu->buf);
aef9ec39
RD
1874 break;
1875
bb12588a 1876 case SRP_CRED_REQ:
509c07bc 1877 srp_process_cred_req(ch, iu->buf);
bb12588a
DD
1878 break;
1879
1880 case SRP_AER_REQ:
509c07bc 1881 srp_process_aer_req(ch, iu->buf);
bb12588a
DD
1882 break;
1883
aef9ec39
RD
1884 case SRP_T_LOGOUT:
1885 /* XXX Handle target logout */
7aa54bd7
DD
1886 shost_printk(KERN_WARNING, target->scsi_host,
1887 PFX "Got target logout request\n");
aef9ec39
RD
1888 break;
1889
1890 default:
7aa54bd7
DD
1891 shost_printk(KERN_WARNING, target->scsi_host,
1892 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
aef9ec39
RD
1893 break;
1894 }
1895
509c07bc 1896 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 1897 DMA_FROM_DEVICE);
c996bb47 1898
509c07bc 1899 res = srp_post_recv(ch, iu);
c996bb47
BVA
1900 if (res != 0)
1901 shost_printk(KERN_ERR, target->scsi_host,
1902 PFX "Recv failed with error code %d\n", res);
aef9ec39
RD
1903}
1904
c1120f89
BVA
1905/**
1906 * srp_tl_err_work() - handle a transport layer error
af24663b 1907 * @work: Work structure embedded in an SRP target port.
c1120f89
BVA
1908 *
1909 * Note: This function may get invoked before the rport has been created,
1910 * hence the target->rport test.
1911 */
1912static void srp_tl_err_work(struct work_struct *work)
1913{
1914 struct srp_target_port *target;
1915
1916 target = container_of(work, struct srp_target_port, tl_err_work);
1917 if (target->rport)
1918 srp_start_tl_fail_timers(target->rport);
1919}
1920
5cfb1782 1921static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
7dad6b2e 1922 bool send_err, struct srp_rdma_ch *ch)
948d1e88 1923{
7dad6b2e
BVA
1924 struct srp_target_port *target = ch->target;
1925
1926 if (wr_id == SRP_LAST_WR_ID) {
1927 complete(&ch->done);
1928 return;
1929 }
1930
294c875a 1931 if (target->connected && !target->qp_in_error) {
5cfb1782
BVA
1932 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1933 shost_printk(KERN_ERR, target->scsi_host, PFX
1934 "LOCAL_INV failed with status %d\n",
1935 wc_status);
1936 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1937 shost_printk(KERN_ERR, target->scsi_host, PFX
1938 "FAST_REG_MR failed status %d\n",
1939 wc_status);
1940 } else {
1941 shost_printk(KERN_ERR, target->scsi_host,
1942 PFX "failed %s status %d for iu %p\n",
1943 send_err ? "send" : "receive",
1944 wc_status, (void *)(uintptr_t)wr_id);
1945 }
c1120f89 1946 queue_work(system_long_wq, &target->tl_err_work);
4f0af697 1947 }
948d1e88
BVA
1948 target->qp_in_error = true;
1949}
1950
509c07bc 1951static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
aef9ec39 1952{
509c07bc 1953 struct srp_rdma_ch *ch = ch_ptr;
aef9ec39 1954 struct ib_wc wc;
aef9ec39
RD
1955
1956 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1957 while (ib_poll_cq(cq, 1, &wc) > 0) {
948d1e88 1958 if (likely(wc.status == IB_WC_SUCCESS)) {
509c07bc 1959 srp_handle_recv(ch, &wc);
948d1e88 1960 } else {
7dad6b2e 1961 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
aef9ec39 1962 }
9c03dc9f
BVA
1963 }
1964}
1965
509c07bc 1966static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
9c03dc9f 1967{
509c07bc 1968 struct srp_rdma_ch *ch = ch_ptr;
9c03dc9f 1969 struct ib_wc wc;
dcb4cb85 1970 struct srp_iu *iu;
9c03dc9f
BVA
1971
1972 while (ib_poll_cq(cq, 1, &wc) > 0) {
948d1e88
BVA
1973 if (likely(wc.status == IB_WC_SUCCESS)) {
1974 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
509c07bc 1975 list_add(&iu->list, &ch->free_tx);
948d1e88 1976 } else {
7dad6b2e 1977 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
9c03dc9f 1978 }
aef9ec39
RD
1979 }
1980}
1981
76c75b25 1982static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
aef9ec39 1983{
76c75b25 1984 struct srp_target_port *target = host_to_target(shost);
a95cadb9 1985 struct srp_rport *rport = target->rport;
509c07bc 1986 struct srp_rdma_ch *ch;
aef9ec39
RD
1987 struct srp_request *req;
1988 struct srp_iu *iu;
1989 struct srp_cmd *cmd;
85507bcc 1990 struct ib_device *dev;
76c75b25 1991 unsigned long flags;
77f2c1a4
BVA
1992 u32 tag;
1993 u16 idx;
d1b4289e 1994 int len, ret;
a95cadb9
BVA
1995 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1996
1997 /*
1998 * The SCSI EH thread is the only context from which srp_queuecommand()
1999 * can get invoked for blocked devices (SDEV_BLOCK /
2000 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2001 * locking the rport mutex if invoked from inside the SCSI EH.
2002 */
2003 if (in_scsi_eh)
2004 mutex_lock(&rport->mutex);
aef9ec39 2005
d1b4289e
BVA
2006 scmnd->result = srp_chkready(target->rport);
2007 if (unlikely(scmnd->result))
2008 goto err;
2ce19e72 2009
77f2c1a4
BVA
2010 WARN_ON_ONCE(scmnd->request->tag < 0);
2011 tag = blk_mq_unique_tag(scmnd->request);
d92c0da7 2012 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
77f2c1a4
BVA
2013 idx = blk_mq_unique_tag_to_tag(tag);
2014 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2015 dev_name(&shost->shost_gendev), tag, idx,
2016 target->req_ring_size);
509c07bc
BVA
2017
2018 spin_lock_irqsave(&ch->lock, flags);
2019 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
509c07bc 2020 spin_unlock_irqrestore(&ch->lock, flags);
aef9ec39 2021
77f2c1a4
BVA
2022 if (!iu)
2023 goto err;
2024
2025 req = &ch->req_ring[idx];
05321937 2026 dev = target->srp_host->srp_dev->dev;
49248644 2027 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
85507bcc 2028 DMA_TO_DEVICE);
aef9ec39 2029
f8b6e31e 2030 scmnd->host_scribble = (void *) req;
aef9ec39
RD
2031
2032 cmd = iu->buf;
2033 memset(cmd, 0, sizeof *cmd);
2034
2035 cmd->opcode = SRP_CMD;
2036 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
77f2c1a4 2037 cmd->tag = tag;
aef9ec39
RD
2038 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2039
aef9ec39
RD
2040 req->scmnd = scmnd;
2041 req->cmd = iu;
aef9ec39 2042
509c07bc 2043 len = srp_map_data(scmnd, ch, req);
aef9ec39 2044 if (len < 0) {
7aa54bd7 2045 shost_printk(KERN_ERR, target->scsi_host,
d1b4289e
BVA
2046 PFX "Failed to map data (%d)\n", len);
2047 /*
2048 * If we ran out of memory descriptors (-ENOMEM) because an
2049 * application is queuing many requests with more than
52ede08f 2050 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
d1b4289e
BVA
2051 * to reduce queue depth temporarily.
2052 */
2053 scmnd->result = len == -ENOMEM ?
2054 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
76c75b25 2055 goto err_iu;
aef9ec39
RD
2056 }
2057
49248644 2058 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
85507bcc 2059 DMA_TO_DEVICE);
aef9ec39 2060
509c07bc 2061 if (srp_post_send(ch, iu, len)) {
7aa54bd7 2062 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
aef9ec39
RD
2063 goto err_unmap;
2064 }
2065
d1b4289e
BVA
2066 ret = 0;
2067
a95cadb9
BVA
2068unlock_rport:
2069 if (in_scsi_eh)
2070 mutex_unlock(&rport->mutex);
2071
d1b4289e 2072 return ret;
aef9ec39
RD
2073
2074err_unmap:
509c07bc 2075 srp_unmap_data(scmnd, ch, req);
aef9ec39 2076
76c75b25 2077err_iu:
509c07bc 2078 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
76c75b25 2079
024ca901
BVA
2080 /*
2081 * Avoid that the loops that iterate over the request ring can
2082 * encounter a dangling SCSI command pointer.
2083 */
2084 req->scmnd = NULL;
2085
d1b4289e
BVA
2086err:
2087 if (scmnd->result) {
2088 scmnd->scsi_done(scmnd);
2089 ret = 0;
2090 } else {
2091 ret = SCSI_MLQUEUE_HOST_BUSY;
2092 }
a95cadb9 2093
d1b4289e 2094 goto unlock_rport;
aef9ec39
RD
2095}
2096
4d73f95f
BVA
2097/*
2098 * Note: the resources allocated in this function are freed in
509c07bc 2099 * srp_free_ch_ib().
4d73f95f 2100 */
509c07bc 2101static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
aef9ec39 2102{
509c07bc 2103 struct srp_target_port *target = ch->target;
aef9ec39
RD
2104 int i;
2105
509c07bc
BVA
2106 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2107 GFP_KERNEL);
2108 if (!ch->rx_ring)
4d73f95f 2109 goto err_no_ring;
509c07bc
BVA
2110 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2111 GFP_KERNEL);
2112 if (!ch->tx_ring)
4d73f95f
BVA
2113 goto err_no_ring;
2114
2115 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2116 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2117 ch->max_ti_iu_len,
2118 GFP_KERNEL, DMA_FROM_DEVICE);
2119 if (!ch->rx_ring[i])
aef9ec39
RD
2120 goto err;
2121 }
2122
4d73f95f 2123 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2124 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2125 target->max_iu_len,
2126 GFP_KERNEL, DMA_TO_DEVICE);
2127 if (!ch->tx_ring[i])
aef9ec39 2128 goto err;
dcb4cb85 2129
509c07bc 2130 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
aef9ec39
RD
2131 }
2132
2133 return 0;
2134
2135err:
4d73f95f 2136 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2137 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2138 srp_free_iu(target->srp_host, ch->tx_ring[i]);
aef9ec39
RD
2139 }
2140
4d73f95f
BVA
2141
2142err_no_ring:
509c07bc
BVA
2143 kfree(ch->tx_ring);
2144 ch->tx_ring = NULL;
2145 kfree(ch->rx_ring);
2146 ch->rx_ring = NULL;
4d73f95f 2147
aef9ec39
RD
2148 return -ENOMEM;
2149}
2150
c9b03c1a
BVA
2151static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2152{
2153 uint64_t T_tr_ns, max_compl_time_ms;
2154 uint32_t rq_tmo_jiffies;
2155
2156 /*
2157 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2158 * table 91), both the QP timeout and the retry count have to be set
2159 * for RC QP's during the RTR to RTS transition.
2160 */
2161 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2162 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2163
2164 /*
2165 * Set target->rq_tmo_jiffies to one second more than the largest time
2166 * it can take before an error completion is generated. See also
2167 * C9-140..142 in the IBTA spec for more information about how to
2168 * convert the QP Local ACK Timeout value to nanoseconds.
2169 */
2170 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2171 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2172 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2173 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2174
2175 return rq_tmo_jiffies;
2176}
2177
961e0be8
DD
2178static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2179 struct srp_login_rsp *lrsp,
509c07bc 2180 struct srp_rdma_ch *ch)
961e0be8 2181{
509c07bc 2182 struct srp_target_port *target = ch->target;
961e0be8
DD
2183 struct ib_qp_attr *qp_attr = NULL;
2184 int attr_mask = 0;
2185 int ret;
2186 int i;
2187
2188 if (lrsp->opcode == SRP_LOGIN_RSP) {
509c07bc
BVA
2189 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2190 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
961e0be8
DD
2191
2192 /*
2193 * Reserve credits for task management so we don't
2194 * bounce requests back to the SCSI mid-layer.
2195 */
2196 target->scsi_host->can_queue
509c07bc 2197 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
961e0be8 2198 target->scsi_host->can_queue);
4d73f95f
BVA
2199 target->scsi_host->cmd_per_lun
2200 = min_t(int, target->scsi_host->can_queue,
2201 target->scsi_host->cmd_per_lun);
961e0be8
DD
2202 } else {
2203 shost_printk(KERN_WARNING, target->scsi_host,
2204 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2205 ret = -ECONNRESET;
2206 goto error;
2207 }
2208
509c07bc
BVA
2209 if (!ch->rx_ring) {
2210 ret = srp_alloc_iu_bufs(ch);
961e0be8
DD
2211 if (ret)
2212 goto error;
2213 }
2214
2215 ret = -ENOMEM;
2216 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2217 if (!qp_attr)
2218 goto error;
2219
2220 qp_attr->qp_state = IB_QPS_RTR;
2221 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2222 if (ret)
2223 goto error_free;
2224
509c07bc 2225 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
961e0be8
DD
2226 if (ret)
2227 goto error_free;
2228
4d73f95f 2229 for (i = 0; i < target->queue_size; i++) {
509c07bc
BVA
2230 struct srp_iu *iu = ch->rx_ring[i];
2231
2232 ret = srp_post_recv(ch, iu);
961e0be8
DD
2233 if (ret)
2234 goto error_free;
2235 }
2236
2237 qp_attr->qp_state = IB_QPS_RTS;
2238 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2239 if (ret)
2240 goto error_free;
2241
c9b03c1a
BVA
2242 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2243
509c07bc 2244 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
961e0be8
DD
2245 if (ret)
2246 goto error_free;
2247
2248 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2249
2250error_free:
2251 kfree(qp_attr);
2252
2253error:
509c07bc 2254 ch->status = ret;
961e0be8
DD
2255}
2256
aef9ec39
RD
2257static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2258 struct ib_cm_event *event,
509c07bc 2259 struct srp_rdma_ch *ch)
aef9ec39 2260{
509c07bc 2261 struct srp_target_port *target = ch->target;
7aa54bd7 2262 struct Scsi_Host *shost = target->scsi_host;
aef9ec39
RD
2263 struct ib_class_port_info *cpi;
2264 int opcode;
2265
2266 switch (event->param.rej_rcvd.reason) {
2267 case IB_CM_REJ_PORT_CM_REDIRECT:
2268 cpi = event->param.rej_rcvd.ari;
509c07bc
BVA
2269 ch->path.dlid = cpi->redirect_lid;
2270 ch->path.pkey = cpi->redirect_pkey;
aef9ec39 2271 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
509c07bc 2272 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
aef9ec39 2273
509c07bc 2274 ch->status = ch->path.dlid ?
aef9ec39
RD
2275 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2276 break;
2277
2278 case IB_CM_REJ_PORT_REDIRECT:
5d7cbfd6 2279 if (srp_target_is_topspin(target)) {
aef9ec39
RD
2280 /*
2281 * Topspin/Cisco SRP gateways incorrectly send
2282 * reject reason code 25 when they mean 24
2283 * (port redirect).
2284 */
509c07bc 2285 memcpy(ch->path.dgid.raw,
aef9ec39
RD
2286 event->param.rej_rcvd.ari, 16);
2287
7aa54bd7
DD
2288 shost_printk(KERN_DEBUG, shost,
2289 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
509c07bc
BVA
2290 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2291 be64_to_cpu(ch->path.dgid.global.interface_id));
aef9ec39 2292
509c07bc 2293 ch->status = SRP_PORT_REDIRECT;
aef9ec39 2294 } else {
7aa54bd7
DD
2295 shost_printk(KERN_WARNING, shost,
2296 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
509c07bc 2297 ch->status = -ECONNRESET;
aef9ec39
RD
2298 }
2299 break;
2300
2301 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
7aa54bd7
DD
2302 shost_printk(KERN_WARNING, shost,
2303 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
509c07bc 2304 ch->status = -ECONNRESET;
aef9ec39
RD
2305 break;
2306
2307 case IB_CM_REJ_CONSUMER_DEFINED:
2308 opcode = *(u8 *) event->private_data;
2309 if (opcode == SRP_LOGIN_REJ) {
2310 struct srp_login_rej *rej = event->private_data;
2311 u32 reason = be32_to_cpu(rej->reason);
2312
2313 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
7aa54bd7
DD
2314 shost_printk(KERN_WARNING, shost,
2315 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
aef9ec39 2316 else
e7ffde01
BVA
2317 shost_printk(KERN_WARNING, shost, PFX
2318 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
747fe000
BVA
2319 target->sgid.raw,
2320 target->orig_dgid.raw, reason);
aef9ec39 2321 } else
7aa54bd7
DD
2322 shost_printk(KERN_WARNING, shost,
2323 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2324 " opcode 0x%02x\n", opcode);
509c07bc 2325 ch->status = -ECONNRESET;
aef9ec39
RD
2326 break;
2327
9fe4bcf4
DD
2328 case IB_CM_REJ_STALE_CONN:
2329 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
509c07bc 2330 ch->status = SRP_STALE_CONN;
9fe4bcf4
DD
2331 break;
2332
aef9ec39 2333 default:
7aa54bd7
DD
2334 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2335 event->param.rej_rcvd.reason);
509c07bc 2336 ch->status = -ECONNRESET;
aef9ec39
RD
2337 }
2338}
2339
2340static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2341{
509c07bc
BVA
2342 struct srp_rdma_ch *ch = cm_id->context;
2343 struct srp_target_port *target = ch->target;
aef9ec39 2344 int comp = 0;
aef9ec39
RD
2345
2346 switch (event->event) {
2347 case IB_CM_REQ_ERROR:
7aa54bd7
DD
2348 shost_printk(KERN_DEBUG, target->scsi_host,
2349 PFX "Sending CM REQ failed\n");
aef9ec39 2350 comp = 1;
509c07bc 2351 ch->status = -ECONNRESET;
aef9ec39
RD
2352 break;
2353
2354 case IB_CM_REP_RECEIVED:
2355 comp = 1;
509c07bc 2356 srp_cm_rep_handler(cm_id, event->private_data, ch);
aef9ec39
RD
2357 break;
2358
2359 case IB_CM_REJ_RECEIVED:
7aa54bd7 2360 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
aef9ec39
RD
2361 comp = 1;
2362
509c07bc 2363 srp_cm_rej_handler(cm_id, event, ch);
aef9ec39
RD
2364 break;
2365
b7ac4ab4 2366 case IB_CM_DREQ_RECEIVED:
7aa54bd7
DD
2367 shost_printk(KERN_WARNING, target->scsi_host,
2368 PFX "DREQ received - connection closed\n");
294c875a 2369 srp_change_conn_state(target, false);
b7ac4ab4 2370 if (ib_send_cm_drep(cm_id, NULL, 0))
7aa54bd7
DD
2371 shost_printk(KERN_ERR, target->scsi_host,
2372 PFX "Sending CM DREP failed\n");
c1120f89 2373 queue_work(system_long_wq, &target->tl_err_work);
aef9ec39
RD
2374 break;
2375
2376 case IB_CM_TIMEWAIT_EXIT:
7aa54bd7
DD
2377 shost_printk(KERN_ERR, target->scsi_host,
2378 PFX "connection closed\n");
ac72d766 2379 comp = 1;
aef9ec39 2380
509c07bc 2381 ch->status = 0;
aef9ec39
RD
2382 break;
2383
b7ac4ab4
IR
2384 case IB_CM_MRA_RECEIVED:
2385 case IB_CM_DREQ_ERROR:
2386 case IB_CM_DREP_RECEIVED:
2387 break;
2388
aef9ec39 2389 default:
7aa54bd7
DD
2390 shost_printk(KERN_WARNING, target->scsi_host,
2391 PFX "Unhandled CM event %d\n", event->event);
aef9ec39
RD
2392 break;
2393 }
2394
2395 if (comp)
509c07bc 2396 complete(&ch->done);
aef9ec39 2397
aef9ec39
RD
2398 return 0;
2399}
2400
71444b97
JW
2401/**
2402 * srp_change_queue_depth - setting device queue depth
2403 * @sdev: scsi device struct
2404 * @qdepth: requested queue depth
71444b97
JW
2405 *
2406 * Returns queue depth.
2407 */
2408static int
db5ed4df 2409srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
71444b97 2410{
c40ecc12 2411 if (!sdev->tagged_supported)
1e6f2416 2412 qdepth = 1;
db5ed4df 2413 return scsi_change_queue_depth(sdev, qdepth);
71444b97
JW
2414}
2415
509c07bc
BVA
2416static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
2417 unsigned int lun, u8 func)
aef9ec39 2418{
509c07bc 2419 struct srp_target_port *target = ch->target;
a95cadb9 2420 struct srp_rport *rport = target->rport;
19081f31 2421 struct ib_device *dev = target->srp_host->srp_dev->dev;
aef9ec39
RD
2422 struct srp_iu *iu;
2423 struct srp_tsk_mgmt *tsk_mgmt;
aef9ec39 2424
3780d1f0
BVA
2425 if (!target->connected || target->qp_in_error)
2426 return -1;
2427
509c07bc 2428 init_completion(&ch->tsk_mgmt_done);
aef9ec39 2429
a95cadb9 2430 /*
509c07bc 2431 * Lock the rport mutex to avoid that srp_create_ch_ib() is
a95cadb9
BVA
2432 * invoked while a task management function is being sent.
2433 */
2434 mutex_lock(&rport->mutex);
509c07bc
BVA
2435 spin_lock_irq(&ch->lock);
2436 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2437 spin_unlock_irq(&ch->lock);
76c75b25 2438
a95cadb9
BVA
2439 if (!iu) {
2440 mutex_unlock(&rport->mutex);
2441
76c75b25 2442 return -1;
a95cadb9 2443 }
aef9ec39 2444
19081f31
DD
2445 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2446 DMA_TO_DEVICE);
aef9ec39
RD
2447 tsk_mgmt = iu->buf;
2448 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2449
2450 tsk_mgmt->opcode = SRP_TSK_MGMT;
f8b6e31e
DD
2451 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
2452 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
aef9ec39 2453 tsk_mgmt->tsk_mgmt_func = func;
f8b6e31e 2454 tsk_mgmt->task_tag = req_tag;
aef9ec39 2455
19081f31
DD
2456 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2457 DMA_TO_DEVICE);
509c07bc
BVA
2458 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2459 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
a95cadb9
BVA
2460 mutex_unlock(&rport->mutex);
2461
76c75b25
BVA
2462 return -1;
2463 }
a95cadb9 2464 mutex_unlock(&rport->mutex);
d945e1df 2465
509c07bc 2466 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
aef9ec39 2467 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
d945e1df 2468 return -1;
aef9ec39 2469
d945e1df 2470 return 0;
d945e1df
RD
2471}
2472
aef9ec39
RD
2473static int srp_abort(struct scsi_cmnd *scmnd)
2474{
d945e1df 2475 struct srp_target_port *target = host_to_target(scmnd->device->host);
f8b6e31e 2476 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
77f2c1a4 2477 u32 tag;
d92c0da7 2478 u16 ch_idx;
509c07bc 2479 struct srp_rdma_ch *ch;
086f44f5 2480 int ret;
d945e1df 2481
7aa54bd7 2482 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
aef9ec39 2483
d92c0da7 2484 if (!req)
99b6697a 2485 return SUCCESS;
77f2c1a4 2486 tag = blk_mq_unique_tag(scmnd->request);
d92c0da7
BVA
2487 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2488 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2489 return SUCCESS;
2490 ch = &target->ch[ch_idx];
2491 if (!srp_claim_req(ch, req, NULL, scmnd))
2492 return SUCCESS;
2493 shost_printk(KERN_ERR, target->scsi_host,
2494 "Sending SRP abort for tag %#x\n", tag);
77f2c1a4 2495 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
80d5e8a2 2496 SRP_TSK_ABORT_TASK) == 0)
086f44f5 2497 ret = SUCCESS;
ed9b2264 2498 else if (target->rport->state == SRP_RPORT_LOST)
99e1c139 2499 ret = FAST_IO_FAIL;
086f44f5
BVA
2500 else
2501 ret = FAILED;
509c07bc 2502 srp_free_req(ch, req, scmnd, 0);
22032991 2503 scmnd->result = DID_ABORT << 16;
d8536670 2504 scmnd->scsi_done(scmnd);
d945e1df 2505
086f44f5 2506 return ret;
aef9ec39
RD
2507}
2508
2509static int srp_reset_device(struct scsi_cmnd *scmnd)
2510{
d945e1df 2511 struct srp_target_port *target = host_to_target(scmnd->device->host);
d92c0da7 2512 struct srp_rdma_ch *ch;
536ae14e 2513 int i;
d945e1df 2514
7aa54bd7 2515 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
aef9ec39 2516
d92c0da7 2517 ch = &target->ch[0];
509c07bc 2518 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
f8b6e31e 2519 SRP_TSK_LUN_RESET))
d945e1df 2520 return FAILED;
509c07bc 2521 if (ch->tsk_mgmt_status)
d945e1df
RD
2522 return FAILED;
2523
d92c0da7
BVA
2524 for (i = 0; i < target->ch_count; i++) {
2525 ch = &target->ch[i];
2526 for (i = 0; i < target->req_ring_size; ++i) {
2527 struct srp_request *req = &ch->req_ring[i];
509c07bc 2528
d92c0da7
BVA
2529 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2530 }
536ae14e 2531 }
d945e1df 2532
d945e1df 2533 return SUCCESS;
aef9ec39
RD
2534}
2535
2536static int srp_reset_host(struct scsi_cmnd *scmnd)
2537{
2538 struct srp_target_port *target = host_to_target(scmnd->device->host);
aef9ec39 2539
7aa54bd7 2540 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
aef9ec39 2541
ed9b2264 2542 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
aef9ec39
RD
2543}
2544
c9b03c1a
BVA
2545static int srp_slave_configure(struct scsi_device *sdev)
2546{
2547 struct Scsi_Host *shost = sdev->host;
2548 struct srp_target_port *target = host_to_target(shost);
2549 struct request_queue *q = sdev->request_queue;
2550 unsigned long timeout;
2551
2552 if (sdev->type == TYPE_DISK) {
2553 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2554 blk_queue_rq_timeout(q, timeout);
2555 }
2556
2557 return 0;
2558}
2559
ee959b00
TJ
2560static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2561 char *buf)
6ecb0c84 2562{
ee959b00 2563 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2564
6ecb0c84
RD
2565 return sprintf(buf, "0x%016llx\n",
2566 (unsigned long long) be64_to_cpu(target->id_ext));
2567}
2568
ee959b00
TJ
2569static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2570 char *buf)
6ecb0c84 2571{
ee959b00 2572 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2573
6ecb0c84
RD
2574 return sprintf(buf, "0x%016llx\n",
2575 (unsigned long long) be64_to_cpu(target->ioc_guid));
2576}
2577
ee959b00
TJ
2578static ssize_t show_service_id(struct device *dev,
2579 struct device_attribute *attr, char *buf)
6ecb0c84 2580{
ee959b00 2581 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2582
6ecb0c84
RD
2583 return sprintf(buf, "0x%016llx\n",
2584 (unsigned long long) be64_to_cpu(target->service_id));
2585}
2586
ee959b00
TJ
2587static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2588 char *buf)
6ecb0c84 2589{
ee959b00 2590 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2591
747fe000 2592 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
6ecb0c84
RD
2593}
2594
848b3082
BVA
2595static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2596 char *buf)
2597{
2598 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2599
747fe000 2600 return sprintf(buf, "%pI6\n", target->sgid.raw);
848b3082
BVA
2601}
2602
ee959b00
TJ
2603static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2604 char *buf)
6ecb0c84 2605{
ee959b00 2606 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7 2607 struct srp_rdma_ch *ch = &target->ch[0];
6ecb0c84 2608
509c07bc 2609 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
6ecb0c84
RD
2610}
2611
ee959b00
TJ
2612static ssize_t show_orig_dgid(struct device *dev,
2613 struct device_attribute *attr, char *buf)
3633b3d0 2614{
ee959b00 2615 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3633b3d0 2616
747fe000 2617 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
3633b3d0
IR
2618}
2619
89de7486
BVA
2620static ssize_t show_req_lim(struct device *dev,
2621 struct device_attribute *attr, char *buf)
2622{
2623 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7
BVA
2624 struct srp_rdma_ch *ch;
2625 int i, req_lim = INT_MAX;
89de7486 2626
d92c0da7
BVA
2627 for (i = 0; i < target->ch_count; i++) {
2628 ch = &target->ch[i];
2629 req_lim = min(req_lim, ch->req_lim);
2630 }
2631 return sprintf(buf, "%d\n", req_lim);
89de7486
BVA
2632}
2633
ee959b00
TJ
2634static ssize_t show_zero_req_lim(struct device *dev,
2635 struct device_attribute *attr, char *buf)
6bfa24fa 2636{
ee959b00 2637 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6bfa24fa 2638
6bfa24fa
RD
2639 return sprintf(buf, "%d\n", target->zero_req_lim);
2640}
2641
ee959b00
TJ
2642static ssize_t show_local_ib_port(struct device *dev,
2643 struct device_attribute *attr, char *buf)
ded7f1a1 2644{
ee959b00 2645 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1
IR
2646
2647 return sprintf(buf, "%d\n", target->srp_host->port);
2648}
2649
ee959b00
TJ
2650static ssize_t show_local_ib_device(struct device *dev,
2651 struct device_attribute *attr, char *buf)
ded7f1a1 2652{
ee959b00 2653 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1 2654
05321937 2655 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
ded7f1a1
IR
2656}
2657
d92c0da7
BVA
2658static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2659 char *buf)
2660{
2661 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2662
2663 return sprintf(buf, "%d\n", target->ch_count);
2664}
2665
4b5e5f41
BVA
2666static ssize_t show_comp_vector(struct device *dev,
2667 struct device_attribute *attr, char *buf)
2668{
2669 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2670
2671 return sprintf(buf, "%d\n", target->comp_vector);
2672}
2673
7bb312e4
VP
2674static ssize_t show_tl_retry_count(struct device *dev,
2675 struct device_attribute *attr, char *buf)
2676{
2677 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2678
2679 return sprintf(buf, "%d\n", target->tl_retry_count);
2680}
2681
49248644
DD
2682static ssize_t show_cmd_sg_entries(struct device *dev,
2683 struct device_attribute *attr, char *buf)
2684{
2685 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2686
2687 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2688}
2689
c07d424d
DD
2690static ssize_t show_allow_ext_sg(struct device *dev,
2691 struct device_attribute *attr, char *buf)
2692{
2693 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2694
2695 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2696}
2697
ee959b00
TJ
2698static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2699static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2700static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2701static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
848b3082 2702static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
ee959b00
TJ
2703static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2704static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
89de7486 2705static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
ee959b00
TJ
2706static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2707static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2708static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
d92c0da7 2709static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
4b5e5f41 2710static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
7bb312e4 2711static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
49248644 2712static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
c07d424d 2713static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
ee959b00
TJ
2714
2715static struct device_attribute *srp_host_attrs[] = {
2716 &dev_attr_id_ext,
2717 &dev_attr_ioc_guid,
2718 &dev_attr_service_id,
2719 &dev_attr_pkey,
848b3082 2720 &dev_attr_sgid,
ee959b00
TJ
2721 &dev_attr_dgid,
2722 &dev_attr_orig_dgid,
89de7486 2723 &dev_attr_req_lim,
ee959b00
TJ
2724 &dev_attr_zero_req_lim,
2725 &dev_attr_local_ib_port,
2726 &dev_attr_local_ib_device,
d92c0da7 2727 &dev_attr_ch_count,
4b5e5f41 2728 &dev_attr_comp_vector,
7bb312e4 2729 &dev_attr_tl_retry_count,
49248644 2730 &dev_attr_cmd_sg_entries,
c07d424d 2731 &dev_attr_allow_ext_sg,
6ecb0c84
RD
2732 NULL
2733};
2734
aef9ec39
RD
2735static struct scsi_host_template srp_template = {
2736 .module = THIS_MODULE,
b7f008fd
RD
2737 .name = "InfiniBand SRP initiator",
2738 .proc_name = DRV_NAME,
c9b03c1a 2739 .slave_configure = srp_slave_configure,
aef9ec39
RD
2740 .info = srp_target_info,
2741 .queuecommand = srp_queuecommand,
71444b97 2742 .change_queue_depth = srp_change_queue_depth,
aef9ec39
RD
2743 .eh_abort_handler = srp_abort,
2744 .eh_device_reset_handler = srp_reset_device,
2745 .eh_host_reset_handler = srp_reset_host,
2742c1da 2746 .skip_settle_delay = true,
49248644 2747 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
4d73f95f 2748 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
aef9ec39 2749 .this_id = -1,
4d73f95f 2750 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
6ecb0c84 2751 .use_clustering = ENABLE_CLUSTERING,
77f2c1a4
BVA
2752 .shost_attrs = srp_host_attrs,
2753 .use_blk_tags = 1,
c40ecc12 2754 .track_queue_depth = 1,
aef9ec39
RD
2755};
2756
34aa654e
BVA
2757static int srp_sdev_count(struct Scsi_Host *host)
2758{
2759 struct scsi_device *sdev;
2760 int c = 0;
2761
2762 shost_for_each_device(sdev, host)
2763 c++;
2764
2765 return c;
2766}
2767
aef9ec39
RD
2768static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2769{
3236822b
FT
2770 struct srp_rport_identifiers ids;
2771 struct srp_rport *rport;
2772
34aa654e 2773 target->state = SRP_TARGET_SCANNING;
aef9ec39
RD
2774 sprintf(target->target_name, "SRP.T10:%016llX",
2775 (unsigned long long) be64_to_cpu(target->id_ext));
2776
05321937 2777 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
aef9ec39
RD
2778 return -ENODEV;
2779
3236822b
FT
2780 memcpy(ids.port_id, &target->id_ext, 8);
2781 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
aebd5e47 2782 ids.roles = SRP_RPORT_ROLE_TARGET;
3236822b
FT
2783 rport = srp_rport_add(target->scsi_host, &ids);
2784 if (IS_ERR(rport)) {
2785 scsi_remove_host(target->scsi_host);
2786 return PTR_ERR(rport);
2787 }
2788
dc1bdbd9 2789 rport->lld_data = target;
9dd69a60 2790 target->rport = rport;
dc1bdbd9 2791
b3589fd4 2792 spin_lock(&host->target_lock);
aef9ec39 2793 list_add_tail(&target->list, &host->target_list);
b3589fd4 2794 spin_unlock(&host->target_lock);
aef9ec39 2795
aef9ec39 2796 scsi_scan_target(&target->scsi_host->shost_gendev,
1962a4a1 2797 0, target->scsi_id, SCAN_WILD_CARD, 0);
aef9ec39 2798
34aa654e
BVA
2799 if (!target->connected || target->qp_in_error) {
2800 shost_printk(KERN_INFO, target->scsi_host,
2801 PFX "SCSI scan failed - removing SCSI host\n");
2802 srp_queue_remove_work(target);
2803 goto out;
2804 }
2805
2806 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2807 dev_name(&target->scsi_host->shost_gendev),
2808 srp_sdev_count(target->scsi_host));
2809
2810 spin_lock_irq(&target->lock);
2811 if (target->state == SRP_TARGET_SCANNING)
2812 target->state = SRP_TARGET_LIVE;
2813 spin_unlock_irq(&target->lock);
2814
2815out:
aef9ec39
RD
2816 return 0;
2817}
2818
ee959b00 2819static void srp_release_dev(struct device *dev)
aef9ec39
RD
2820{
2821 struct srp_host *host =
ee959b00 2822 container_of(dev, struct srp_host, dev);
aef9ec39
RD
2823
2824 complete(&host->released);
2825}
2826
2827static struct class srp_class = {
2828 .name = "infiniband_srp",
ee959b00 2829 .dev_release = srp_release_dev
aef9ec39
RD
2830};
2831
96fc248a
BVA
2832/**
2833 * srp_conn_unique() - check whether the connection to a target is unique
af24663b
BVA
2834 * @host: SRP host.
2835 * @target: SRP target port.
96fc248a
BVA
2836 */
2837static bool srp_conn_unique(struct srp_host *host,
2838 struct srp_target_port *target)
2839{
2840 struct srp_target_port *t;
2841 bool ret = false;
2842
2843 if (target->state == SRP_TARGET_REMOVED)
2844 goto out;
2845
2846 ret = true;
2847
2848 spin_lock(&host->target_lock);
2849 list_for_each_entry(t, &host->target_list, list) {
2850 if (t != target &&
2851 target->id_ext == t->id_ext &&
2852 target->ioc_guid == t->ioc_guid &&
2853 target->initiator_ext == t->initiator_ext) {
2854 ret = false;
2855 break;
2856 }
2857 }
2858 spin_unlock(&host->target_lock);
2859
2860out:
2861 return ret;
2862}
2863
aef9ec39
RD
2864/*
2865 * Target ports are added by writing
2866 *
2867 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2868 * pkey=<P_Key>,service_id=<service ID>
2869 *
2870 * to the add_target sysfs attribute.
2871 */
2872enum {
2873 SRP_OPT_ERR = 0,
2874 SRP_OPT_ID_EXT = 1 << 0,
2875 SRP_OPT_IOC_GUID = 1 << 1,
2876 SRP_OPT_DGID = 1 << 2,
2877 SRP_OPT_PKEY = 1 << 3,
2878 SRP_OPT_SERVICE_ID = 1 << 4,
2879 SRP_OPT_MAX_SECT = 1 << 5,
52fb2b50 2880 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
0c0450db 2881 SRP_OPT_IO_CLASS = 1 << 7,
01cb9bcb 2882 SRP_OPT_INITIATOR_EXT = 1 << 8,
49248644 2883 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
c07d424d
DD
2884 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2885 SRP_OPT_SG_TABLESIZE = 1 << 11,
4b5e5f41 2886 SRP_OPT_COMP_VECTOR = 1 << 12,
7bb312e4 2887 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
4d73f95f 2888 SRP_OPT_QUEUE_SIZE = 1 << 14,
aef9ec39
RD
2889 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2890 SRP_OPT_IOC_GUID |
2891 SRP_OPT_DGID |
2892 SRP_OPT_PKEY |
2893 SRP_OPT_SERVICE_ID),
2894};
2895
a447c093 2896static const match_table_t srp_opt_tokens = {
52fb2b50
VP
2897 { SRP_OPT_ID_EXT, "id_ext=%s" },
2898 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2899 { SRP_OPT_DGID, "dgid=%s" },
2900 { SRP_OPT_PKEY, "pkey=%x" },
2901 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2902 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2903 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
0c0450db 2904 { SRP_OPT_IO_CLASS, "io_class=%x" },
01cb9bcb 2905 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
49248644 2906 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
c07d424d
DD
2907 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2908 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
4b5e5f41 2909 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
7bb312e4 2910 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
4d73f95f 2911 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
52fb2b50 2912 { SRP_OPT_ERR, NULL }
aef9ec39
RD
2913};
2914
2915static int srp_parse_options(const char *buf, struct srp_target_port *target)
2916{
2917 char *options, *sep_opt;
2918 char *p;
2919 char dgid[3];
2920 substring_t args[MAX_OPT_ARGS];
2921 int opt_mask = 0;
2922 int token;
2923 int ret = -EINVAL;
2924 int i;
2925
2926 options = kstrdup(buf, GFP_KERNEL);
2927 if (!options)
2928 return -ENOMEM;
2929
2930 sep_opt = options;
7dcf9c19 2931 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
aef9ec39
RD
2932 if (!*p)
2933 continue;
2934
2935 token = match_token(p, srp_opt_tokens, args);
2936 opt_mask |= token;
2937
2938 switch (token) {
2939 case SRP_OPT_ID_EXT:
2940 p = match_strdup(args);
a20f3a6d
IR
2941 if (!p) {
2942 ret = -ENOMEM;
2943 goto out;
2944 }
aef9ec39
RD
2945 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2946 kfree(p);
2947 break;
2948
2949 case SRP_OPT_IOC_GUID:
2950 p = match_strdup(args);
a20f3a6d
IR
2951 if (!p) {
2952 ret = -ENOMEM;
2953 goto out;
2954 }
aef9ec39
RD
2955 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2956 kfree(p);
2957 break;
2958
2959 case SRP_OPT_DGID:
2960 p = match_strdup(args);
a20f3a6d
IR
2961 if (!p) {
2962 ret = -ENOMEM;
2963 goto out;
2964 }
aef9ec39 2965 if (strlen(p) != 32) {
e0bda7d8 2966 pr_warn("bad dest GID parameter '%s'\n", p);
ce1823f0 2967 kfree(p);
aef9ec39
RD
2968 goto out;
2969 }
2970
2971 for (i = 0; i < 16; ++i) {
747fe000
BVA
2972 strlcpy(dgid, p + i * 2, sizeof(dgid));
2973 if (sscanf(dgid, "%hhx",
2974 &target->orig_dgid.raw[i]) < 1) {
2975 ret = -EINVAL;
2976 kfree(p);
2977 goto out;
2978 }
aef9ec39 2979 }
bf17c1c7 2980 kfree(p);
aef9ec39
RD
2981 break;
2982
2983 case SRP_OPT_PKEY:
2984 if (match_hex(args, &token)) {
e0bda7d8 2985 pr_warn("bad P_Key parameter '%s'\n", p);
aef9ec39
RD
2986 goto out;
2987 }
747fe000 2988 target->pkey = cpu_to_be16(token);
aef9ec39
RD
2989 break;
2990
2991 case SRP_OPT_SERVICE_ID:
2992 p = match_strdup(args);
a20f3a6d
IR
2993 if (!p) {
2994 ret = -ENOMEM;
2995 goto out;
2996 }
aef9ec39
RD
2997 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2998 kfree(p);
2999 break;
3000
3001 case SRP_OPT_MAX_SECT:
3002 if (match_int(args, &token)) {
e0bda7d8 3003 pr_warn("bad max sect parameter '%s'\n", p);
aef9ec39
RD
3004 goto out;
3005 }
3006 target->scsi_host->max_sectors = token;
3007 break;
3008
4d73f95f
BVA
3009 case SRP_OPT_QUEUE_SIZE:
3010 if (match_int(args, &token) || token < 1) {
3011 pr_warn("bad queue_size parameter '%s'\n", p);
3012 goto out;
3013 }
3014 target->scsi_host->can_queue = token;
3015 target->queue_size = token + SRP_RSP_SQ_SIZE +
3016 SRP_TSK_MGMT_SQ_SIZE;
3017 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3018 target->scsi_host->cmd_per_lun = token;
3019 break;
3020
52fb2b50 3021 case SRP_OPT_MAX_CMD_PER_LUN:
4d73f95f 3022 if (match_int(args, &token) || token < 1) {
e0bda7d8
BVA
3023 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3024 p);
52fb2b50
VP
3025 goto out;
3026 }
4d73f95f 3027 target->scsi_host->cmd_per_lun = token;
52fb2b50
VP
3028 break;
3029
0c0450db
R
3030 case SRP_OPT_IO_CLASS:
3031 if (match_hex(args, &token)) {
e0bda7d8 3032 pr_warn("bad IO class parameter '%s'\n", p);
0c0450db
R
3033 goto out;
3034 }
3035 if (token != SRP_REV10_IB_IO_CLASS &&
3036 token != SRP_REV16A_IB_IO_CLASS) {
e0bda7d8
BVA
3037 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3038 token, SRP_REV10_IB_IO_CLASS,
3039 SRP_REV16A_IB_IO_CLASS);
0c0450db
R
3040 goto out;
3041 }
3042 target->io_class = token;
3043 break;
3044
01cb9bcb
IR
3045 case SRP_OPT_INITIATOR_EXT:
3046 p = match_strdup(args);
a20f3a6d
IR
3047 if (!p) {
3048 ret = -ENOMEM;
3049 goto out;
3050 }
01cb9bcb
IR
3051 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3052 kfree(p);
3053 break;
3054
49248644
DD
3055 case SRP_OPT_CMD_SG_ENTRIES:
3056 if (match_int(args, &token) || token < 1 || token > 255) {
e0bda7d8
BVA
3057 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3058 p);
49248644
DD
3059 goto out;
3060 }
3061 target->cmd_sg_cnt = token;
3062 break;
3063
c07d424d
DD
3064 case SRP_OPT_ALLOW_EXT_SG:
3065 if (match_int(args, &token)) {
e0bda7d8 3066 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
c07d424d
DD
3067 goto out;
3068 }
3069 target->allow_ext_sg = !!token;
3070 break;
3071
3072 case SRP_OPT_SG_TABLESIZE:
3073 if (match_int(args, &token) || token < 1 ||
3074 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
e0bda7d8
BVA
3075 pr_warn("bad max sg_tablesize parameter '%s'\n",
3076 p);
c07d424d
DD
3077 goto out;
3078 }
3079 target->sg_tablesize = token;
3080 break;
3081
4b5e5f41
BVA
3082 case SRP_OPT_COMP_VECTOR:
3083 if (match_int(args, &token) || token < 0) {
3084 pr_warn("bad comp_vector parameter '%s'\n", p);
3085 goto out;
3086 }
3087 target->comp_vector = token;
3088 break;
3089
7bb312e4
VP
3090 case SRP_OPT_TL_RETRY_COUNT:
3091 if (match_int(args, &token) || token < 2 || token > 7) {
3092 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3093 p);
3094 goto out;
3095 }
3096 target->tl_retry_count = token;
3097 break;
3098
aef9ec39 3099 default:
e0bda7d8
BVA
3100 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3101 p);
aef9ec39
RD
3102 goto out;
3103 }
3104 }
3105
3106 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3107 ret = 0;
3108 else
3109 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3110 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3111 !(srp_opt_tokens[i].token & opt_mask))
e0bda7d8
BVA
3112 pr_warn("target creation request is missing parameter '%s'\n",
3113 srp_opt_tokens[i].pattern);
aef9ec39 3114
4d73f95f
BVA
3115 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3116 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3117 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3118 target->scsi_host->cmd_per_lun,
3119 target->scsi_host->can_queue);
3120
aef9ec39
RD
3121out:
3122 kfree(options);
3123 return ret;
3124}
3125
ee959b00
TJ
3126static ssize_t srp_create_target(struct device *dev,
3127 struct device_attribute *attr,
aef9ec39
RD
3128 const char *buf, size_t count)
3129{
3130 struct srp_host *host =
ee959b00 3131 container_of(dev, struct srp_host, dev);
aef9ec39
RD
3132 struct Scsi_Host *target_host;
3133 struct srp_target_port *target;
509c07bc 3134 struct srp_rdma_ch *ch;
d1b4289e
BVA
3135 struct srp_device *srp_dev = host->srp_dev;
3136 struct ib_device *ibdev = srp_dev->dev;
d92c0da7
BVA
3137 int ret, node_idx, node, cpu, i;
3138 bool multich = false;
aef9ec39
RD
3139
3140 target_host = scsi_host_alloc(&srp_template,
3141 sizeof (struct srp_target_port));
3142 if (!target_host)
3143 return -ENOMEM;
3144
49248644 3145 target_host->transportt = ib_srp_transport_template;
fd1b6c4a
BVA
3146 target_host->max_channel = 0;
3147 target_host->max_id = 1;
3c8edf0e
AR
3148 target_host->max_lun = SRP_MAX_LUN;
3149 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
5f068992 3150
aef9ec39 3151 target = host_to_target(target_host);
aef9ec39 3152
49248644
DD
3153 target->io_class = SRP_REV16A_IB_IO_CLASS;
3154 target->scsi_host = target_host;
3155 target->srp_host = host;
3156 target->lkey = host->srp_dev->mr->lkey;
3157 target->rkey = host->srp_dev->mr->rkey;
3158 target->cmd_sg_cnt = cmd_sg_entries;
c07d424d
DD
3159 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3160 target->allow_ext_sg = allow_ext_sg;
7bb312e4 3161 target->tl_retry_count = 7;
4d73f95f 3162 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
aef9ec39 3163
34aa654e
BVA
3164 /*
3165 * Avoid that the SCSI host can be removed by srp_remove_target()
3166 * before this function returns.
3167 */
3168 scsi_host_get(target->scsi_host);
3169
2d7091bc
BVA
3170 mutex_lock(&host->add_target_mutex);
3171
aef9ec39
RD
3172 ret = srp_parse_options(buf, target);
3173 if (ret)
3174 goto err;
3175
77f2c1a4
BVA
3176 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3177 if (ret)
3178 goto err;
3179
4d73f95f
BVA
3180 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3181
96fc248a
BVA
3182 if (!srp_conn_unique(target->srp_host, target)) {
3183 shost_printk(KERN_INFO, target->scsi_host,
3184 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3185 be64_to_cpu(target->id_ext),
3186 be64_to_cpu(target->ioc_guid),
3187 be64_to_cpu(target->initiator_ext));
3188 ret = -EEXIST;
3189 goto err;
3190 }
3191
5cfb1782 3192 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
d1b4289e 3193 target->cmd_sg_cnt < target->sg_tablesize) {
5cfb1782 3194 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
c07d424d
DD
3195 target->sg_tablesize = target->cmd_sg_cnt;
3196 }
3197
3198 target_host->sg_tablesize = target->sg_tablesize;
3199 target->indirect_size = target->sg_tablesize *
3200 sizeof (struct srp_direct_buf);
49248644
DD
3201 target->max_iu_len = sizeof (struct srp_cmd) +
3202 sizeof (struct srp_indirect_buf) +
3203 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3204
c1120f89 3205 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
ef6c49d8 3206 INIT_WORK(&target->remove_work, srp_remove_work);
8f26c9ff 3207 spin_lock_init(&target->lock);
747fe000 3208 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
2088ca66 3209 if (ret)
d92c0da7 3210 goto err;
aef9ec39 3211
d92c0da7
BVA
3212 ret = -ENOMEM;
3213 target->ch_count = max_t(unsigned, num_online_nodes(),
3214 min(ch_count ? :
3215 min(4 * num_online_nodes(),
3216 ibdev->num_comp_vectors),
3217 num_online_cpus()));
3218 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3219 GFP_KERNEL);
3220 if (!target->ch)
3221 goto err;
aef9ec39 3222
d92c0da7
BVA
3223 node_idx = 0;
3224 for_each_online_node(node) {
3225 const int ch_start = (node_idx * target->ch_count /
3226 num_online_nodes());
3227 const int ch_end = ((node_idx + 1) * target->ch_count /
3228 num_online_nodes());
3229 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3230 num_online_nodes() + target->comp_vector)
3231 % ibdev->num_comp_vectors;
3232 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3233 num_online_nodes() + target->comp_vector)
3234 % ibdev->num_comp_vectors;
3235 int cpu_idx = 0;
3236
3237 for_each_online_cpu(cpu) {
3238 if (cpu_to_node(cpu) != node)
3239 continue;
3240 if (ch_start + cpu_idx >= ch_end)
3241 continue;
3242 ch = &target->ch[ch_start + cpu_idx];
3243 ch->target = target;
3244 ch->comp_vector = cv_start == cv_end ? cv_start :
3245 cv_start + cpu_idx % (cv_end - cv_start);
3246 spin_lock_init(&ch->lock);
3247 INIT_LIST_HEAD(&ch->free_tx);
3248 ret = srp_new_cm_id(ch);
3249 if (ret)
3250 goto err_disconnect;
aef9ec39 3251
d92c0da7
BVA
3252 ret = srp_create_ch_ib(ch);
3253 if (ret)
3254 goto err_disconnect;
3255
3256 ret = srp_alloc_req_data(ch);
3257 if (ret)
3258 goto err_disconnect;
3259
3260 ret = srp_connect_ch(ch, multich);
3261 if (ret) {
3262 shost_printk(KERN_ERR, target->scsi_host,
3263 PFX "Connection %d/%d failed\n",
3264 ch_start + cpu_idx,
3265 target->ch_count);
3266 if (node_idx == 0 && cpu_idx == 0) {
3267 goto err_disconnect;
3268 } else {
3269 srp_free_ch_ib(target, ch);
3270 srp_free_req_data(target, ch);
3271 target->ch_count = ch - target->ch;
3272 break;
3273 }
3274 }
3275
3276 multich = true;
3277 cpu_idx++;
3278 }
3279 node_idx++;
aef9ec39
RD
3280 }
3281
d92c0da7
BVA
3282 target->scsi_host->nr_hw_queues = target->ch_count;
3283
aef9ec39
RD
3284 ret = srp_add_target(host, target);
3285 if (ret)
3286 goto err_disconnect;
3287
34aa654e
BVA
3288 if (target->state != SRP_TARGET_REMOVED) {
3289 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3290 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3291 be64_to_cpu(target->id_ext),
3292 be64_to_cpu(target->ioc_guid),
747fe000 3293 be16_to_cpu(target->pkey),
34aa654e 3294 be64_to_cpu(target->service_id),
747fe000 3295 target->sgid.raw, target->orig_dgid.raw);
34aa654e 3296 }
e7ffde01 3297
2d7091bc
BVA
3298 ret = count;
3299
3300out:
3301 mutex_unlock(&host->add_target_mutex);
34aa654e
BVA
3302
3303 scsi_host_put(target->scsi_host);
3304
2d7091bc 3305 return ret;
aef9ec39
RD
3306
3307err_disconnect:
3308 srp_disconnect_target(target);
3309
d92c0da7
BVA
3310 for (i = 0; i < target->ch_count; i++) {
3311 ch = &target->ch[i];
3312 srp_free_ch_ib(target, ch);
3313 srp_free_req_data(target, ch);
3314 }
aef9ec39 3315
d92c0da7 3316 kfree(target->ch);
8f26c9ff 3317
aef9ec39
RD
3318err:
3319 scsi_host_put(target_host);
2d7091bc 3320 goto out;
aef9ec39
RD
3321}
3322
ee959b00 3323static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
aef9ec39 3324
ee959b00
TJ
3325static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3326 char *buf)
aef9ec39 3327{
ee959b00 3328 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39 3329
05321937 3330 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
aef9ec39
RD
3331}
3332
ee959b00 3333static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
aef9ec39 3334
ee959b00
TJ
3335static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3336 char *buf)
aef9ec39 3337{
ee959b00 3338 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39
RD
3339
3340 return sprintf(buf, "%d\n", host->port);
3341}
3342
ee959b00 3343static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
aef9ec39 3344
f5358a17 3345static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
aef9ec39
RD
3346{
3347 struct srp_host *host;
3348
3349 host = kzalloc(sizeof *host, GFP_KERNEL);
3350 if (!host)
3351 return NULL;
3352
3353 INIT_LIST_HEAD(&host->target_list);
b3589fd4 3354 spin_lock_init(&host->target_lock);
aef9ec39 3355 init_completion(&host->released);
2d7091bc 3356 mutex_init(&host->add_target_mutex);
05321937 3357 host->srp_dev = device;
aef9ec39
RD
3358 host->port = port;
3359
ee959b00
TJ
3360 host->dev.class = &srp_class;
3361 host->dev.parent = device->dev->dma_device;
d927e38c 3362 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
aef9ec39 3363
ee959b00 3364 if (device_register(&host->dev))
f5358a17 3365 goto free_host;
ee959b00 3366 if (device_create_file(&host->dev, &dev_attr_add_target))
aef9ec39 3367 goto err_class;
ee959b00 3368 if (device_create_file(&host->dev, &dev_attr_ibdev))
aef9ec39 3369 goto err_class;
ee959b00 3370 if (device_create_file(&host->dev, &dev_attr_port))
aef9ec39
RD
3371 goto err_class;
3372
3373 return host;
3374
3375err_class:
ee959b00 3376 device_unregister(&host->dev);
aef9ec39 3377
f5358a17 3378free_host:
aef9ec39
RD
3379 kfree(host);
3380
3381 return NULL;
3382}
3383
3384static void srp_add_one(struct ib_device *device)
3385{
f5358a17
RD
3386 struct srp_device *srp_dev;
3387 struct ib_device_attr *dev_attr;
aef9ec39 3388 struct srp_host *host;
52ede08f
BVA
3389 int mr_page_shift, s, e, p;
3390 u64 max_pages_per_mr;
aef9ec39 3391
f5358a17
RD
3392 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3393 if (!dev_attr)
cf311cd4 3394 return;
aef9ec39 3395
f5358a17 3396 if (ib_query_device(device, dev_attr)) {
e0bda7d8 3397 pr_warn("Query device failed for %s\n", device->name);
f5358a17
RD
3398 goto free_attr;
3399 }
3400
3401 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3402 if (!srp_dev)
3403 goto free_attr;
3404
d1b4289e
BVA
3405 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3406 device->map_phys_fmr && device->unmap_fmr);
5cfb1782
BVA
3407 srp_dev->has_fr = (dev_attr->device_cap_flags &
3408 IB_DEVICE_MEM_MGT_EXTENSIONS);
3409 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3410 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3411
3412 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3413 (!srp_dev->has_fmr || prefer_fr));
d1b4289e 3414
f5358a17
RD
3415 /*
3416 * Use the smallest page size supported by the HCA, down to a
8f26c9ff
DD
3417 * minimum of 4096 bytes. We're unlikely to build large sglists
3418 * out of smaller entries.
f5358a17 3419 */
52ede08f
BVA
3420 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3421 srp_dev->mr_page_size = 1 << mr_page_shift;
3422 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3423 max_pages_per_mr = dev_attr->max_mr_size;
3424 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3425 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3426 max_pages_per_mr);
5cfb1782
BVA
3427 if (srp_dev->use_fast_reg) {
3428 srp_dev->max_pages_per_mr =
3429 min_t(u32, srp_dev->max_pages_per_mr,
3430 dev_attr->max_fast_reg_page_list_len);
3431 }
52ede08f
BVA
3432 srp_dev->mr_max_size = srp_dev->mr_page_size *
3433 srp_dev->max_pages_per_mr;
5cfb1782 3434 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
52ede08f 3435 device->name, mr_page_shift, dev_attr->max_mr_size,
5cfb1782 3436 dev_attr->max_fast_reg_page_list_len,
52ede08f 3437 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
f5358a17
RD
3438
3439 INIT_LIST_HEAD(&srp_dev->dev_list);
3440
3441 srp_dev->dev = device;
3442 srp_dev->pd = ib_alloc_pd(device);
3443 if (IS_ERR(srp_dev->pd))
3444 goto free_dev;
3445
3446 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3447 IB_ACCESS_LOCAL_WRITE |
3448 IB_ACCESS_REMOTE_READ |
3449 IB_ACCESS_REMOTE_WRITE);
3450 if (IS_ERR(srp_dev->mr))
3451 goto err_pd;
3452
07ebafba 3453 if (device->node_type == RDMA_NODE_IB_SWITCH) {
aef9ec39
RD
3454 s = 0;
3455 e = 0;
3456 } else {
3457 s = 1;
3458 e = device->phys_port_cnt;
3459 }
3460
3461 for (p = s; p <= e; ++p) {
f5358a17 3462 host = srp_add_port(srp_dev, p);
aef9ec39 3463 if (host)
f5358a17 3464 list_add_tail(&host->list, &srp_dev->dev_list);
aef9ec39
RD
3465 }
3466
f5358a17
RD
3467 ib_set_client_data(device, &srp_client, srp_dev);
3468
3469 goto free_attr;
3470
3471err_pd:
3472 ib_dealloc_pd(srp_dev->pd);
3473
3474free_dev:
3475 kfree(srp_dev);
3476
3477free_attr:
3478 kfree(dev_attr);
aef9ec39
RD
3479}
3480
3481static void srp_remove_one(struct ib_device *device)
3482{
f5358a17 3483 struct srp_device *srp_dev;
aef9ec39 3484 struct srp_host *host, *tmp_host;
ef6c49d8 3485 struct srp_target_port *target;
aef9ec39 3486
f5358a17 3487 srp_dev = ib_get_client_data(device, &srp_client);
1fe0cb84
DB
3488 if (!srp_dev)
3489 return;
aef9ec39 3490
f5358a17 3491 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
ee959b00 3492 device_unregister(&host->dev);
aef9ec39
RD
3493 /*
3494 * Wait for the sysfs entry to go away, so that no new
3495 * target ports can be created.
3496 */
3497 wait_for_completion(&host->released);
3498
3499 /*
ef6c49d8 3500 * Remove all target ports.
aef9ec39 3501 */
b3589fd4 3502 spin_lock(&host->target_lock);
ef6c49d8
BVA
3503 list_for_each_entry(target, &host->target_list, list)
3504 srp_queue_remove_work(target);
b3589fd4 3505 spin_unlock(&host->target_lock);
aef9ec39
RD
3506
3507 /*
bcc05910 3508 * Wait for tl_err and target port removal tasks.
aef9ec39 3509 */
ef6c49d8 3510 flush_workqueue(system_long_wq);
bcc05910 3511 flush_workqueue(srp_remove_wq);
aef9ec39 3512
aef9ec39
RD
3513 kfree(host);
3514 }
3515
f5358a17
RD
3516 ib_dereg_mr(srp_dev->mr);
3517 ib_dealloc_pd(srp_dev->pd);
3518
3519 kfree(srp_dev);
aef9ec39
RD
3520}
3521
3236822b 3522static struct srp_function_template ib_srp_transport_functions = {
ed9b2264
BVA
3523 .has_rport_state = true,
3524 .reset_timer_if_blocked = true,
a95cadb9 3525 .reconnect_delay = &srp_reconnect_delay,
ed9b2264
BVA
3526 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3527 .dev_loss_tmo = &srp_dev_loss_tmo,
3528 .reconnect = srp_rport_reconnect,
dc1bdbd9 3529 .rport_delete = srp_rport_delete,
ed9b2264 3530 .terminate_rport_io = srp_terminate_io,
3236822b
FT
3531};
3532
aef9ec39
RD
3533static int __init srp_init_module(void)
3534{
3535 int ret;
3536
dcb4cb85 3537 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
dd5e6e38 3538
49248644 3539 if (srp_sg_tablesize) {
e0bda7d8 3540 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
49248644
DD
3541 if (!cmd_sg_entries)
3542 cmd_sg_entries = srp_sg_tablesize;
3543 }
3544
3545 if (!cmd_sg_entries)
3546 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3547
3548 if (cmd_sg_entries > 255) {
e0bda7d8 3549 pr_warn("Clamping cmd_sg_entries to 255\n");
49248644 3550 cmd_sg_entries = 255;
1e89a194
DD
3551 }
3552
c07d424d
DD
3553 if (!indirect_sg_entries)
3554 indirect_sg_entries = cmd_sg_entries;
3555 else if (indirect_sg_entries < cmd_sg_entries) {
e0bda7d8
BVA
3556 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3557 cmd_sg_entries);
c07d424d
DD
3558 indirect_sg_entries = cmd_sg_entries;
3559 }
3560
bcc05910 3561 srp_remove_wq = create_workqueue("srp_remove");
da05be29
WY
3562 if (!srp_remove_wq) {
3563 ret = -ENOMEM;
bcc05910
BVA
3564 goto out;
3565 }
3566
3567 ret = -ENOMEM;
3236822b
FT
3568 ib_srp_transport_template =
3569 srp_attach_transport(&ib_srp_transport_functions);
3570 if (!ib_srp_transport_template)
bcc05910 3571 goto destroy_wq;
3236822b 3572
aef9ec39
RD
3573 ret = class_register(&srp_class);
3574 if (ret) {
e0bda7d8 3575 pr_err("couldn't register class infiniband_srp\n");
bcc05910 3576 goto release_tr;
aef9ec39
RD
3577 }
3578
c1a0b23b
MT
3579 ib_sa_register_client(&srp_sa_client);
3580
aef9ec39
RD
3581 ret = ib_register_client(&srp_client);
3582 if (ret) {
e0bda7d8 3583 pr_err("couldn't register IB client\n");
bcc05910 3584 goto unreg_sa;
aef9ec39
RD
3585 }
3586
bcc05910
BVA
3587out:
3588 return ret;
3589
3590unreg_sa:
3591 ib_sa_unregister_client(&srp_sa_client);
3592 class_unregister(&srp_class);
3593
3594release_tr:
3595 srp_release_transport(ib_srp_transport_template);
3596
3597destroy_wq:
3598 destroy_workqueue(srp_remove_wq);
3599 goto out;
aef9ec39
RD
3600}
3601
3602static void __exit srp_cleanup_module(void)
3603{
3604 ib_unregister_client(&srp_client);
c1a0b23b 3605 ib_sa_unregister_client(&srp_sa_client);
aef9ec39 3606 class_unregister(&srp_class);
3236822b 3607 srp_release_transport(ib_srp_transport_template);
bcc05910 3608 destroy_workqueue(srp_remove_wq);
aef9ec39
RD
3609}
3610
3611module_init(srp_init_module);
3612module_exit(srp_cleanup_module);