]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/infiniband/ulp/srp/ib_srp.c
IB/srp: Register the indirect data buffer descriptor
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / ulp / srp / ib_srp.c
CommitLineData
aef9ec39
RD
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
aef9ec39
RD
31 */
32
d236cd0e 33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
e0bda7d8 34
aef9ec39
RD
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
de25968c 42#include <linux/jiffies.h>
56b5390c 43#include <rdma/ib_cache.h>
aef9ec39 44
60063497 45#include <linux/atomic.h>
aef9ec39
RD
46
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
71444b97 50#include <scsi/scsi_tcq.h>
aef9ec39 51#include <scsi/srp.h>
3236822b 52#include <scsi/scsi_transport_srp.h>
aef9ec39 53
aef9ec39
RD
54#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
713ef24e
BVA
58#define DRV_VERSION "2.0"
59#define DRV_RELDATE "July 26, 2015"
aef9ec39
RD
60
61MODULE_AUTHOR("Roland Dreier");
33ab3e5b 62MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
aef9ec39 63MODULE_LICENSE("Dual BSD/GPL");
33ab3e5b
BVA
64MODULE_VERSION(DRV_VERSION);
65MODULE_INFO(release_date, DRV_RELDATE);
aef9ec39 66
49248644
DD
67static unsigned int srp_sg_tablesize;
68static unsigned int cmd_sg_entries;
c07d424d
DD
69static unsigned int indirect_sg_entries;
70static bool allow_ext_sg;
5cfb1782 71static bool prefer_fr;
b1b8854d 72static bool register_always;
49248644 73static int topspin_workarounds = 1;
74b0a15b 74
49248644
DD
75module_param(srp_sg_tablesize, uint, 0444);
76MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
74b0a15b 77
49248644
DD
78module_param(cmd_sg_entries, uint, 0444);
79MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
aef9ec39 81
c07d424d
DD
82module_param(indirect_sg_entries, uint, 0444);
83MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85
86module_param(allow_ext_sg, bool, 0444);
87MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89
aef9ec39
RD
90module_param(topspin_workarounds, int, 0444);
91MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93
5cfb1782
BVA
94module_param(prefer_fr, bool, 0444);
95MODULE_PARM_DESC(prefer_fr,
96"Whether to use fast registration if both FMR and fast registration are supported");
97
b1b8854d
BVA
98module_param(register_always, bool, 0444);
99MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
101
9c27847d 102static const struct kernel_param_ops srp_tmo_ops;
ed9b2264 103
a95cadb9
BVA
104static int srp_reconnect_delay = 10;
105module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106 S_IRUGO | S_IWUSR);
107MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108
ed9b2264
BVA
109static int srp_fast_io_fail_tmo = 15;
110module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111 S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
116
a95cadb9 117static int srp_dev_loss_tmo = 600;
ed9b2264
BVA
118module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119 S_IRUGO | S_IWUSR);
120MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
127
d92c0da7
BVA
128static unsigned ch_count;
129module_param(ch_count, uint, 0444);
130MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132
aef9ec39 133static void srp_add_one(struct ib_device *device);
7c1eb45a 134static void srp_remove_one(struct ib_device *device, void *client_data);
509c07bc
BVA
135static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
136static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
aef9ec39
RD
137static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
138
3236822b 139static struct scsi_transport_template *ib_srp_transport_template;
bcc05910 140static struct workqueue_struct *srp_remove_wq;
3236822b 141
aef9ec39
RD
142static struct ib_client srp_client = {
143 .name = "srp",
144 .add = srp_add_one,
145 .remove = srp_remove_one
146};
147
c1a0b23b
MT
148static struct ib_sa_client srp_sa_client;
149
ed9b2264
BVA
150static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
151{
152 int tmo = *(int *)kp->arg;
153
154 if (tmo >= 0)
155 return sprintf(buffer, "%d", tmo);
156 else
157 return sprintf(buffer, "off");
158}
159
160static int srp_tmo_set(const char *val, const struct kernel_param *kp)
161{
162 int tmo, res;
163
3fdf70ac
SG
164 res = srp_parse_tmo(&tmo, val);
165 if (res)
166 goto out;
167
a95cadb9
BVA
168 if (kp->arg == &srp_reconnect_delay)
169 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
170 srp_dev_loss_tmo);
171 else if (kp->arg == &srp_fast_io_fail_tmo)
172 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
ed9b2264 173 else
a95cadb9
BVA
174 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
175 tmo);
ed9b2264
BVA
176 if (res)
177 goto out;
178 *(int *)kp->arg = tmo;
179
180out:
181 return res;
182}
183
9c27847d 184static const struct kernel_param_ops srp_tmo_ops = {
ed9b2264
BVA
185 .get = srp_tmo_get,
186 .set = srp_tmo_set,
187};
188
aef9ec39
RD
189static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
190{
191 return (struct srp_target_port *) host->hostdata;
192}
193
194static const char *srp_target_info(struct Scsi_Host *host)
195{
196 return host_to_target(host)->target_name;
197}
198
5d7cbfd6
RD
199static int srp_target_is_topspin(struct srp_target_port *target)
200{
201 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
3d1ff48d 202 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
5d7cbfd6
RD
203
204 return topspin_workarounds &&
3d1ff48d
RK
205 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
206 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
5d7cbfd6
RD
207}
208
aef9ec39
RD
209static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
210 gfp_t gfp_mask,
211 enum dma_data_direction direction)
212{
213 struct srp_iu *iu;
214
215 iu = kmalloc(sizeof *iu, gfp_mask);
216 if (!iu)
217 goto out;
218
219 iu->buf = kzalloc(size, gfp_mask);
220 if (!iu->buf)
221 goto out_free_iu;
222
05321937
GKH
223 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
224 direction);
225 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
aef9ec39
RD
226 goto out_free_buf;
227
228 iu->size = size;
229 iu->direction = direction;
230
231 return iu;
232
233out_free_buf:
234 kfree(iu->buf);
235out_free_iu:
236 kfree(iu);
237out:
238 return NULL;
239}
240
241static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
242{
243 if (!iu)
244 return;
245
05321937
GKH
246 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
247 iu->direction);
aef9ec39
RD
248 kfree(iu->buf);
249 kfree(iu);
250}
251
252static void srp_qp_event(struct ib_event *event, void *context)
253{
57363d98
SG
254 pr_debug("QP event %s (%d)\n",
255 ib_event_msg(event->event), event->event);
aef9ec39
RD
256}
257
258static int srp_init_qp(struct srp_target_port *target,
259 struct ib_qp *qp)
260{
261 struct ib_qp_attr *attr;
262 int ret;
263
264 attr = kmalloc(sizeof *attr, GFP_KERNEL);
265 if (!attr)
266 return -ENOMEM;
267
56b5390c
BVA
268 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
269 target->srp_host->port,
270 be16_to_cpu(target->pkey),
271 &attr->pkey_index);
aef9ec39
RD
272 if (ret)
273 goto out;
274
275 attr->qp_state = IB_QPS_INIT;
276 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
277 IB_ACCESS_REMOTE_WRITE);
278 attr->port_num = target->srp_host->port;
279
280 ret = ib_modify_qp(qp, attr,
281 IB_QP_STATE |
282 IB_QP_PKEY_INDEX |
283 IB_QP_ACCESS_FLAGS |
284 IB_QP_PORT);
285
286out:
287 kfree(attr);
288 return ret;
289}
290
509c07bc 291static int srp_new_cm_id(struct srp_rdma_ch *ch)
9fe4bcf4 292{
509c07bc 293 struct srp_target_port *target = ch->target;
9fe4bcf4
DD
294 struct ib_cm_id *new_cm_id;
295
05321937 296 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
509c07bc 297 srp_cm_handler, ch);
9fe4bcf4
DD
298 if (IS_ERR(new_cm_id))
299 return PTR_ERR(new_cm_id);
300
509c07bc
BVA
301 if (ch->cm_id)
302 ib_destroy_cm_id(ch->cm_id);
303 ch->cm_id = new_cm_id;
304 ch->path.sgid = target->sgid;
305 ch->path.dgid = target->orig_dgid;
306 ch->path.pkey = target->pkey;
307 ch->path.service_id = target->service_id;
9fe4bcf4
DD
308
309 return 0;
310}
311
d1b4289e
BVA
312static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
313{
314 struct srp_device *dev = target->srp_host->srp_dev;
315 struct ib_fmr_pool_param fmr_param;
316
317 memset(&fmr_param, 0, sizeof(fmr_param));
318 fmr_param.pool_size = target->scsi_host->can_queue;
319 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
320 fmr_param.cache = 1;
52ede08f
BVA
321 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
322 fmr_param.page_shift = ilog2(dev->mr_page_size);
d1b4289e
BVA
323 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
324 IB_ACCESS_REMOTE_WRITE |
325 IB_ACCESS_REMOTE_READ);
326
327 return ib_create_fmr_pool(dev->pd, &fmr_param);
328}
329
5cfb1782
BVA
330/**
331 * srp_destroy_fr_pool() - free the resources owned by a pool
332 * @pool: Fast registration pool to be destroyed.
333 */
334static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
335{
336 int i;
337 struct srp_fr_desc *d;
338
339 if (!pool)
340 return;
341
342 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
343 if (d->frpl)
344 ib_free_fast_reg_page_list(d->frpl);
345 if (d->mr)
346 ib_dereg_mr(d->mr);
347 }
348 kfree(pool);
349}
350
351/**
352 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
353 * @device: IB device to allocate fast registration descriptors for.
354 * @pd: Protection domain associated with the FR descriptors.
355 * @pool_size: Number of descriptors to allocate.
356 * @max_page_list_len: Maximum fast registration work request page list length.
357 */
358static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
359 struct ib_pd *pd, int pool_size,
360 int max_page_list_len)
361{
362 struct srp_fr_pool *pool;
363 struct srp_fr_desc *d;
364 struct ib_mr *mr;
365 struct ib_fast_reg_page_list *frpl;
366 int i, ret = -EINVAL;
367
368 if (pool_size <= 0)
369 goto err;
370 ret = -ENOMEM;
371 pool = kzalloc(sizeof(struct srp_fr_pool) +
372 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
373 if (!pool)
374 goto err;
375 pool->size = pool_size;
376 pool->max_page_list_len = max_page_list_len;
377 spin_lock_init(&pool->lock);
378 INIT_LIST_HEAD(&pool->free_list);
379
380 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
563b67c5
SG
381 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
382 max_page_list_len);
5cfb1782
BVA
383 if (IS_ERR(mr)) {
384 ret = PTR_ERR(mr);
385 goto destroy_pool;
386 }
387 d->mr = mr;
388 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
389 if (IS_ERR(frpl)) {
390 ret = PTR_ERR(frpl);
391 goto destroy_pool;
392 }
393 d->frpl = frpl;
394 list_add_tail(&d->entry, &pool->free_list);
395 }
396
397out:
398 return pool;
399
400destroy_pool:
401 srp_destroy_fr_pool(pool);
402
403err:
404 pool = ERR_PTR(ret);
405 goto out;
406}
407
408/**
409 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
410 * @pool: Pool to obtain descriptor from.
411 */
412static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
413{
414 struct srp_fr_desc *d = NULL;
415 unsigned long flags;
416
417 spin_lock_irqsave(&pool->lock, flags);
418 if (!list_empty(&pool->free_list)) {
419 d = list_first_entry(&pool->free_list, typeof(*d), entry);
420 list_del(&d->entry);
421 }
422 spin_unlock_irqrestore(&pool->lock, flags);
423
424 return d;
425}
426
427/**
428 * srp_fr_pool_put() - put an FR descriptor back in the free list
429 * @pool: Pool the descriptor was allocated from.
430 * @desc: Pointer to an array of fast registration descriptor pointers.
431 * @n: Number of descriptors to put back.
432 *
433 * Note: The caller must already have queued an invalidation request for
434 * desc->mr->rkey before calling this function.
435 */
436static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
437 int n)
438{
439 unsigned long flags;
440 int i;
441
442 spin_lock_irqsave(&pool->lock, flags);
443 for (i = 0; i < n; i++)
444 list_add(&desc[i]->entry, &pool->free_list);
445 spin_unlock_irqrestore(&pool->lock, flags);
446}
447
448static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
449{
450 struct srp_device *dev = target->srp_host->srp_dev;
451
452 return srp_create_fr_pool(dev->dev, dev->pd,
453 target->scsi_host->can_queue,
454 dev->max_pages_per_mr);
455}
456
7dad6b2e
BVA
457/**
458 * srp_destroy_qp() - destroy an RDMA queue pair
459 * @ch: SRP RDMA channel.
460 *
461 * Change a queue pair into the error state and wait until all receive
462 * completions have been processed before destroying it. This avoids that
463 * the receive completion handler can access the queue pair while it is
464 * being destroyed.
465 */
466static void srp_destroy_qp(struct srp_rdma_ch *ch)
467{
7dad6b2e
BVA
468 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
469 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
470 struct ib_recv_wr *bad_wr;
471 int ret;
472
473 /* Destroying a QP and reusing ch->done is only safe if not connected */
c014c8cd 474 WARN_ON_ONCE(ch->connected);
7dad6b2e
BVA
475
476 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
477 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
478 if (ret)
479 goto out;
480
481 init_completion(&ch->done);
482 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
483 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
484 if (ret == 0)
485 wait_for_completion(&ch->done);
486
487out:
488 ib_destroy_qp(ch->qp);
489}
490
509c07bc 491static int srp_create_ch_ib(struct srp_rdma_ch *ch)
aef9ec39 492{
509c07bc 493 struct srp_target_port *target = ch->target;
62154b2e 494 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39 495 struct ib_qp_init_attr *init_attr;
73aa89ed
IR
496 struct ib_cq *recv_cq, *send_cq;
497 struct ib_qp *qp;
d1b4289e 498 struct ib_fmr_pool *fmr_pool = NULL;
5cfb1782
BVA
499 struct srp_fr_pool *fr_pool = NULL;
500 const int m = 1 + dev->use_fast_reg;
8e37210b 501 struct ib_cq_init_attr cq_attr = {};
aef9ec39
RD
502 int ret;
503
504 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
505 if (!init_attr)
506 return -ENOMEM;
507
7dad6b2e 508 /* + 1 for SRP_LAST_WR_ID */
8e37210b
MB
509 cq_attr.cqe = target->queue_size + 1;
510 cq_attr.comp_vector = ch->comp_vector;
509c07bc 511 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
8e37210b 512 &cq_attr);
73aa89ed
IR
513 if (IS_ERR(recv_cq)) {
514 ret = PTR_ERR(recv_cq);
da9d2f07 515 goto err;
aef9ec39
RD
516 }
517
8e37210b
MB
518 cq_attr.cqe = m * target->queue_size;
519 cq_attr.comp_vector = ch->comp_vector;
509c07bc 520 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
8e37210b 521 &cq_attr);
73aa89ed
IR
522 if (IS_ERR(send_cq)) {
523 ret = PTR_ERR(send_cq);
da9d2f07 524 goto err_recv_cq;
9c03dc9f
BVA
525 }
526
73aa89ed 527 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
aef9ec39
RD
528
529 init_attr->event_handler = srp_qp_event;
5cfb1782 530 init_attr->cap.max_send_wr = m * target->queue_size;
7dad6b2e 531 init_attr->cap.max_recv_wr = target->queue_size + 1;
aef9ec39
RD
532 init_attr->cap.max_recv_sge = 1;
533 init_attr->cap.max_send_sge = 1;
5cfb1782 534 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
aef9ec39 535 init_attr->qp_type = IB_QPT_RC;
73aa89ed
IR
536 init_attr->send_cq = send_cq;
537 init_attr->recv_cq = recv_cq;
aef9ec39 538
62154b2e 539 qp = ib_create_qp(dev->pd, init_attr);
73aa89ed
IR
540 if (IS_ERR(qp)) {
541 ret = PTR_ERR(qp);
da9d2f07 542 goto err_send_cq;
aef9ec39
RD
543 }
544
73aa89ed 545 ret = srp_init_qp(target, qp);
da9d2f07
RD
546 if (ret)
547 goto err_qp;
aef9ec39 548
002f1567 549 if (dev->use_fast_reg) {
5cfb1782
BVA
550 fr_pool = srp_alloc_fr_pool(target);
551 if (IS_ERR(fr_pool)) {
552 ret = PTR_ERR(fr_pool);
553 shost_printk(KERN_WARNING, target->scsi_host, PFX
554 "FR pool allocation failed (%d)\n", ret);
555 goto err_qp;
556 }
509c07bc
BVA
557 if (ch->fr_pool)
558 srp_destroy_fr_pool(ch->fr_pool);
559 ch->fr_pool = fr_pool;
002f1567 560 } else if (dev->use_fmr) {
d1b4289e
BVA
561 fmr_pool = srp_alloc_fmr_pool(target);
562 if (IS_ERR(fmr_pool)) {
563 ret = PTR_ERR(fmr_pool);
564 shost_printk(KERN_WARNING, target->scsi_host, PFX
565 "FMR pool allocation failed (%d)\n", ret);
566 goto err_qp;
567 }
509c07bc
BVA
568 if (ch->fmr_pool)
569 ib_destroy_fmr_pool(ch->fmr_pool);
570 ch->fmr_pool = fmr_pool;
d1b4289e
BVA
571 }
572
509c07bc 573 if (ch->qp)
7dad6b2e 574 srp_destroy_qp(ch);
509c07bc
BVA
575 if (ch->recv_cq)
576 ib_destroy_cq(ch->recv_cq);
577 if (ch->send_cq)
578 ib_destroy_cq(ch->send_cq);
73aa89ed 579
509c07bc
BVA
580 ch->qp = qp;
581 ch->recv_cq = recv_cq;
582 ch->send_cq = send_cq;
73aa89ed 583
da9d2f07
RD
584 kfree(init_attr);
585 return 0;
586
587err_qp:
73aa89ed 588 ib_destroy_qp(qp);
da9d2f07
RD
589
590err_send_cq:
73aa89ed 591 ib_destroy_cq(send_cq);
da9d2f07
RD
592
593err_recv_cq:
73aa89ed 594 ib_destroy_cq(recv_cq);
da9d2f07
RD
595
596err:
aef9ec39
RD
597 kfree(init_attr);
598 return ret;
599}
600
4d73f95f
BVA
601/*
602 * Note: this function may be called without srp_alloc_iu_bufs() having been
509c07bc 603 * invoked. Hence the ch->[rt]x_ring checks.
4d73f95f 604 */
509c07bc
BVA
605static void srp_free_ch_ib(struct srp_target_port *target,
606 struct srp_rdma_ch *ch)
aef9ec39 607{
5cfb1782 608 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39
RD
609 int i;
610
d92c0da7
BVA
611 if (!ch->target)
612 return;
613
509c07bc
BVA
614 if (ch->cm_id) {
615 ib_destroy_cm_id(ch->cm_id);
616 ch->cm_id = NULL;
394c595e
BVA
617 }
618
d92c0da7
BVA
619 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
620 if (!ch->qp)
621 return;
622
5cfb1782 623 if (dev->use_fast_reg) {
509c07bc
BVA
624 if (ch->fr_pool)
625 srp_destroy_fr_pool(ch->fr_pool);
002f1567 626 } else if (dev->use_fmr) {
509c07bc
BVA
627 if (ch->fmr_pool)
628 ib_destroy_fmr_pool(ch->fmr_pool);
5cfb1782 629 }
7dad6b2e 630 srp_destroy_qp(ch);
509c07bc
BVA
631 ib_destroy_cq(ch->send_cq);
632 ib_destroy_cq(ch->recv_cq);
aef9ec39 633
d92c0da7
BVA
634 /*
635 * Avoid that the SCSI error handler tries to use this channel after
636 * it has been freed. The SCSI error handler can namely continue
637 * trying to perform recovery actions after scsi_remove_host()
638 * returned.
639 */
640 ch->target = NULL;
641
509c07bc
BVA
642 ch->qp = NULL;
643 ch->send_cq = ch->recv_cq = NULL;
73aa89ed 644
509c07bc 645 if (ch->rx_ring) {
4d73f95f 646 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
647 srp_free_iu(target->srp_host, ch->rx_ring[i]);
648 kfree(ch->rx_ring);
649 ch->rx_ring = NULL;
4d73f95f 650 }
509c07bc 651 if (ch->tx_ring) {
4d73f95f 652 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
653 srp_free_iu(target->srp_host, ch->tx_ring[i]);
654 kfree(ch->tx_ring);
655 ch->tx_ring = NULL;
4d73f95f 656 }
aef9ec39
RD
657}
658
659static void srp_path_rec_completion(int status,
660 struct ib_sa_path_rec *pathrec,
509c07bc 661 void *ch_ptr)
aef9ec39 662{
509c07bc
BVA
663 struct srp_rdma_ch *ch = ch_ptr;
664 struct srp_target_port *target = ch->target;
aef9ec39 665
509c07bc 666 ch->status = status;
aef9ec39 667 if (status)
7aa54bd7
DD
668 shost_printk(KERN_ERR, target->scsi_host,
669 PFX "Got failed path rec status %d\n", status);
aef9ec39 670 else
509c07bc
BVA
671 ch->path = *pathrec;
672 complete(&ch->done);
aef9ec39
RD
673}
674
509c07bc 675static int srp_lookup_path(struct srp_rdma_ch *ch)
aef9ec39 676{
509c07bc 677 struct srp_target_port *target = ch->target;
a702adce
BVA
678 int ret;
679
509c07bc
BVA
680 ch->path.numb_path = 1;
681
682 init_completion(&ch->done);
683
684 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
685 target->srp_host->srp_dev->dev,
686 target->srp_host->port,
687 &ch->path,
688 IB_SA_PATH_REC_SERVICE_ID |
689 IB_SA_PATH_REC_DGID |
690 IB_SA_PATH_REC_SGID |
691 IB_SA_PATH_REC_NUMB_PATH |
692 IB_SA_PATH_REC_PKEY,
693 SRP_PATH_REC_TIMEOUT_MS,
694 GFP_KERNEL,
695 srp_path_rec_completion,
696 ch, &ch->path_query);
697 if (ch->path_query_id < 0)
698 return ch->path_query_id;
699
700 ret = wait_for_completion_interruptible(&ch->done);
a702adce
BVA
701 if (ret < 0)
702 return ret;
aef9ec39 703
509c07bc 704 if (ch->status < 0)
7aa54bd7
DD
705 shost_printk(KERN_WARNING, target->scsi_host,
706 PFX "Path record query failed\n");
aef9ec39 707
509c07bc 708 return ch->status;
aef9ec39
RD
709}
710
d92c0da7 711static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
aef9ec39 712{
509c07bc 713 struct srp_target_port *target = ch->target;
aef9ec39
RD
714 struct {
715 struct ib_cm_req_param param;
716 struct srp_login_req priv;
717 } *req = NULL;
718 int status;
719
720 req = kzalloc(sizeof *req, GFP_KERNEL);
721 if (!req)
722 return -ENOMEM;
723
509c07bc 724 req->param.primary_path = &ch->path;
aef9ec39
RD
725 req->param.alternate_path = NULL;
726 req->param.service_id = target->service_id;
509c07bc
BVA
727 req->param.qp_num = ch->qp->qp_num;
728 req->param.qp_type = ch->qp->qp_type;
aef9ec39
RD
729 req->param.private_data = &req->priv;
730 req->param.private_data_len = sizeof req->priv;
731 req->param.flow_control = 1;
732
733 get_random_bytes(&req->param.starting_psn, 4);
734 req->param.starting_psn &= 0xffffff;
735
736 /*
737 * Pick some arbitrary defaults here; we could make these
738 * module parameters if anyone cared about setting them.
739 */
740 req->param.responder_resources = 4;
741 req->param.remote_cm_response_timeout = 20;
742 req->param.local_cm_response_timeout = 20;
7bb312e4 743 req->param.retry_count = target->tl_retry_count;
aef9ec39
RD
744 req->param.rnr_retry_count = 7;
745 req->param.max_cm_retries = 15;
746
747 req->priv.opcode = SRP_LOGIN_REQ;
748 req->priv.tag = 0;
49248644 749 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
aef9ec39
RD
750 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
751 SRP_BUF_FORMAT_INDIRECT);
d92c0da7
BVA
752 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
753 SRP_MULTICHAN_SINGLE);
0c0450db 754 /*
3cd96564 755 * In the published SRP specification (draft rev. 16a), the
0c0450db
R
756 * port identifier format is 8 bytes of ID extension followed
757 * by 8 bytes of GUID. Older drafts put the two halves in the
758 * opposite order, so that the GUID comes first.
759 *
760 * Targets conforming to these obsolete drafts can be
761 * recognized by the I/O Class they report.
762 */
763 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
764 memcpy(req->priv.initiator_port_id,
747fe000 765 &target->sgid.global.interface_id, 8);
0c0450db 766 memcpy(req->priv.initiator_port_id + 8,
01cb9bcb 767 &target->initiator_ext, 8);
0c0450db
R
768 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
769 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
770 } else {
771 memcpy(req->priv.initiator_port_id,
01cb9bcb
IR
772 &target->initiator_ext, 8);
773 memcpy(req->priv.initiator_port_id + 8,
747fe000 774 &target->sgid.global.interface_id, 8);
0c0450db
R
775 memcpy(req->priv.target_port_id, &target->id_ext, 8);
776 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
777 }
778
aef9ec39
RD
779 /*
780 * Topspin/Cisco SRP targets will reject our login unless we
01cb9bcb
IR
781 * zero out the first 8 bytes of our initiator port ID and set
782 * the second 8 bytes to the local node GUID.
aef9ec39 783 */
5d7cbfd6 784 if (srp_target_is_topspin(target)) {
7aa54bd7
DD
785 shost_printk(KERN_DEBUG, target->scsi_host,
786 PFX "Topspin/Cisco initiator port ID workaround "
787 "activated for target GUID %016llx\n",
45c37cad 788 be64_to_cpu(target->ioc_guid));
aef9ec39 789 memset(req->priv.initiator_port_id, 0, 8);
01cb9bcb 790 memcpy(req->priv.initiator_port_id + 8,
05321937 791 &target->srp_host->srp_dev->dev->node_guid, 8);
aef9ec39 792 }
aef9ec39 793
509c07bc 794 status = ib_send_cm_req(ch->cm_id, &req->param);
aef9ec39
RD
795
796 kfree(req);
797
798 return status;
799}
800
ef6c49d8
BVA
801static bool srp_queue_remove_work(struct srp_target_port *target)
802{
803 bool changed = false;
804
805 spin_lock_irq(&target->lock);
806 if (target->state != SRP_TARGET_REMOVED) {
807 target->state = SRP_TARGET_REMOVED;
808 changed = true;
809 }
810 spin_unlock_irq(&target->lock);
811
812 if (changed)
bcc05910 813 queue_work(srp_remove_wq, &target->remove_work);
ef6c49d8
BVA
814
815 return changed;
816}
817
aef9ec39
RD
818static void srp_disconnect_target(struct srp_target_port *target)
819{
d92c0da7
BVA
820 struct srp_rdma_ch *ch;
821 int i;
509c07bc 822
c014c8cd 823 /* XXX should send SRP_I_LOGOUT request */
aef9ec39 824
c014c8cd
BVA
825 for (i = 0; i < target->ch_count; i++) {
826 ch = &target->ch[i];
827 ch->connected = false;
828 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
829 shost_printk(KERN_DEBUG, target->scsi_host,
830 PFX "Sending CM DREQ failed\n");
294c875a 831 }
e6581056 832 }
aef9ec39
RD
833}
834
509c07bc
BVA
835static void srp_free_req_data(struct srp_target_port *target,
836 struct srp_rdma_ch *ch)
8f26c9ff 837{
5cfb1782
BVA
838 struct srp_device *dev = target->srp_host->srp_dev;
839 struct ib_device *ibdev = dev->dev;
8f26c9ff
DD
840 struct srp_request *req;
841 int i;
842
47513cf4 843 if (!ch->req_ring)
4d73f95f
BVA
844 return;
845
846 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 847 req = &ch->req_ring[i];
5cfb1782
BVA
848 if (dev->use_fast_reg)
849 kfree(req->fr_list);
850 else
851 kfree(req->fmr_list);
8f26c9ff 852 kfree(req->map_page);
c07d424d
DD
853 if (req->indirect_dma_addr) {
854 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
855 target->indirect_size,
856 DMA_TO_DEVICE);
857 }
858 kfree(req->indirect_desc);
8f26c9ff 859 }
4d73f95f 860
509c07bc
BVA
861 kfree(ch->req_ring);
862 ch->req_ring = NULL;
8f26c9ff
DD
863}
864
509c07bc 865static int srp_alloc_req_data(struct srp_rdma_ch *ch)
b81d00bd 866{
509c07bc 867 struct srp_target_port *target = ch->target;
b81d00bd
BVA
868 struct srp_device *srp_dev = target->srp_host->srp_dev;
869 struct ib_device *ibdev = srp_dev->dev;
870 struct srp_request *req;
5cfb1782 871 void *mr_list;
b81d00bd
BVA
872 dma_addr_t dma_addr;
873 int i, ret = -ENOMEM;
874
509c07bc
BVA
875 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
876 GFP_KERNEL);
877 if (!ch->req_ring)
4d73f95f
BVA
878 goto out;
879
880 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 881 req = &ch->req_ring[i];
5cfb1782
BVA
882 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
883 GFP_KERNEL);
884 if (!mr_list)
885 goto out;
886 if (srp_dev->use_fast_reg)
887 req->fr_list = mr_list;
888 else
889 req->fmr_list = mr_list;
52ede08f 890 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
d1b4289e 891 sizeof(void *), GFP_KERNEL);
5cfb1782
BVA
892 if (!req->map_page)
893 goto out;
b81d00bd 894 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
5cfb1782 895 if (!req->indirect_desc)
b81d00bd
BVA
896 goto out;
897
898 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
899 target->indirect_size,
900 DMA_TO_DEVICE);
901 if (ib_dma_mapping_error(ibdev, dma_addr))
902 goto out;
903
904 req->indirect_dma_addr = dma_addr;
b81d00bd
BVA
905 }
906 ret = 0;
907
908out:
909 return ret;
910}
911
683b159a
BVA
912/**
913 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
914 * @shost: SCSI host whose attributes to remove from sysfs.
915 *
916 * Note: Any attributes defined in the host template and that did not exist
917 * before invocation of this function will be ignored.
918 */
919static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
920{
921 struct device_attribute **attr;
922
923 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
924 device_remove_file(&shost->shost_dev, *attr);
925}
926
ee12d6a8
BVA
927static void srp_remove_target(struct srp_target_port *target)
928{
d92c0da7
BVA
929 struct srp_rdma_ch *ch;
930 int i;
509c07bc 931
ef6c49d8
BVA
932 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
933
ee12d6a8 934 srp_del_scsi_host_attr(target->scsi_host);
9dd69a60 935 srp_rport_get(target->rport);
ee12d6a8
BVA
936 srp_remove_host(target->scsi_host);
937 scsi_remove_host(target->scsi_host);
93079162 938 srp_stop_rport_timers(target->rport);
ef6c49d8 939 srp_disconnect_target(target);
d92c0da7
BVA
940 for (i = 0; i < target->ch_count; i++) {
941 ch = &target->ch[i];
942 srp_free_ch_ib(target, ch);
943 }
c1120f89 944 cancel_work_sync(&target->tl_err_work);
9dd69a60 945 srp_rport_put(target->rport);
d92c0da7
BVA
946 for (i = 0; i < target->ch_count; i++) {
947 ch = &target->ch[i];
948 srp_free_req_data(target, ch);
949 }
950 kfree(target->ch);
951 target->ch = NULL;
65d7dd2f
VP
952
953 spin_lock(&target->srp_host->target_lock);
954 list_del(&target->list);
955 spin_unlock(&target->srp_host->target_lock);
956
ee12d6a8
BVA
957 scsi_host_put(target->scsi_host);
958}
959
c4028958 960static void srp_remove_work(struct work_struct *work)
aef9ec39 961{
c4028958 962 struct srp_target_port *target =
ef6c49d8 963 container_of(work, struct srp_target_port, remove_work);
aef9ec39 964
ef6c49d8 965 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
aef9ec39 966
96fc248a 967 srp_remove_target(target);
aef9ec39
RD
968}
969
dc1bdbd9
BVA
970static void srp_rport_delete(struct srp_rport *rport)
971{
972 struct srp_target_port *target = rport->lld_data;
973
974 srp_queue_remove_work(target);
975}
976
c014c8cd
BVA
977/**
978 * srp_connected_ch() - number of connected channels
979 * @target: SRP target port.
980 */
981static int srp_connected_ch(struct srp_target_port *target)
982{
983 int i, c = 0;
984
985 for (i = 0; i < target->ch_count; i++)
986 c += target->ch[i].connected;
987
988 return c;
989}
990
d92c0da7 991static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
aef9ec39 992{
509c07bc 993 struct srp_target_port *target = ch->target;
aef9ec39
RD
994 int ret;
995
c014c8cd 996 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
294c875a 997
509c07bc 998 ret = srp_lookup_path(ch);
aef9ec39
RD
999 if (ret)
1000 return ret;
1001
1002 while (1) {
509c07bc 1003 init_completion(&ch->done);
d92c0da7 1004 ret = srp_send_req(ch, multich);
aef9ec39
RD
1005 if (ret)
1006 return ret;
509c07bc 1007 ret = wait_for_completion_interruptible(&ch->done);
a702adce
BVA
1008 if (ret < 0)
1009 return ret;
aef9ec39
RD
1010
1011 /*
1012 * The CM event handling code will set status to
1013 * SRP_PORT_REDIRECT if we get a port redirect REJ
1014 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1015 * redirect REJ back.
1016 */
509c07bc 1017 switch (ch->status) {
aef9ec39 1018 case 0:
c014c8cd 1019 ch->connected = true;
aef9ec39
RD
1020 return 0;
1021
1022 case SRP_PORT_REDIRECT:
509c07bc 1023 ret = srp_lookup_path(ch);
aef9ec39
RD
1024 if (ret)
1025 return ret;
1026 break;
1027
1028 case SRP_DLID_REDIRECT:
1029 break;
1030
9fe4bcf4 1031 case SRP_STALE_CONN:
9fe4bcf4 1032 shost_printk(KERN_ERR, target->scsi_host, PFX
205619f2 1033 "giving up on stale connection\n");
509c07bc
BVA
1034 ch->status = -ECONNRESET;
1035 return ch->status;
9fe4bcf4 1036
aef9ec39 1037 default:
509c07bc 1038 return ch->status;
aef9ec39
RD
1039 }
1040 }
1041}
1042
509c07bc 1043static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
5cfb1782
BVA
1044{
1045 struct ib_send_wr *bad_wr;
1046 struct ib_send_wr wr = {
1047 .opcode = IB_WR_LOCAL_INV,
1048 .wr_id = LOCAL_INV_WR_ID_MASK,
1049 .next = NULL,
1050 .num_sge = 0,
1051 .send_flags = 0,
1052 .ex.invalidate_rkey = rkey,
1053 };
1054
509c07bc 1055 return ib_post_send(ch->qp, &wr, &bad_wr);
5cfb1782
BVA
1056}
1057
d945e1df 1058static void srp_unmap_data(struct scsi_cmnd *scmnd,
509c07bc 1059 struct srp_rdma_ch *ch,
d945e1df
RD
1060 struct srp_request *req)
1061{
509c07bc 1062 struct srp_target_port *target = ch->target;
5cfb1782
BVA
1063 struct srp_device *dev = target->srp_host->srp_dev;
1064 struct ib_device *ibdev = dev->dev;
1065 int i, res;
8f26c9ff 1066
bb350d1d 1067 if (!scsi_sglist(scmnd) ||
d945e1df
RD
1068 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1069 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1070 return;
1071
5cfb1782
BVA
1072 if (dev->use_fast_reg) {
1073 struct srp_fr_desc **pfr;
1074
1075 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
509c07bc 1076 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
5cfb1782
BVA
1077 if (res < 0) {
1078 shost_printk(KERN_ERR, target->scsi_host, PFX
1079 "Queueing INV WR for rkey %#x failed (%d)\n",
1080 (*pfr)->mr->rkey, res);
1081 queue_work(system_long_wq,
1082 &target->tl_err_work);
1083 }
1084 }
1085 if (req->nmdesc)
509c07bc 1086 srp_fr_pool_put(ch->fr_pool, req->fr_list,
5cfb1782 1087 req->nmdesc);
002f1567 1088 } else if (dev->use_fmr) {
5cfb1782
BVA
1089 struct ib_pool_fmr **pfmr;
1090
1091 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1092 ib_fmr_pool_unmap(*pfmr);
1093 }
f5358a17 1094
8f26c9ff
DD
1095 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1096 scmnd->sc_data_direction);
d945e1df
RD
1097}
1098
22032991
BVA
1099/**
1100 * srp_claim_req - Take ownership of the scmnd associated with a request.
509c07bc 1101 * @ch: SRP RDMA channel.
22032991 1102 * @req: SRP request.
b3fe628d 1103 * @sdev: If not NULL, only take ownership for this SCSI device.
22032991
BVA
1104 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1105 * ownership of @req->scmnd if it equals @scmnd.
1106 *
1107 * Return value:
1108 * Either NULL or a pointer to the SCSI command the caller became owner of.
1109 */
509c07bc 1110static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
22032991 1111 struct srp_request *req,
b3fe628d 1112 struct scsi_device *sdev,
22032991
BVA
1113 struct scsi_cmnd *scmnd)
1114{
1115 unsigned long flags;
1116
509c07bc 1117 spin_lock_irqsave(&ch->lock, flags);
b3fe628d
BVA
1118 if (req->scmnd &&
1119 (!sdev || req->scmnd->device == sdev) &&
1120 (!scmnd || req->scmnd == scmnd)) {
22032991
BVA
1121 scmnd = req->scmnd;
1122 req->scmnd = NULL;
22032991
BVA
1123 } else {
1124 scmnd = NULL;
1125 }
509c07bc 1126 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1127
1128 return scmnd;
1129}
1130
1131/**
1132 * srp_free_req() - Unmap data and add request to the free request list.
509c07bc 1133 * @ch: SRP RDMA channel.
af24663b
BVA
1134 * @req: Request to be freed.
1135 * @scmnd: SCSI command associated with @req.
1136 * @req_lim_delta: Amount to be added to @target->req_lim.
22032991 1137 */
509c07bc
BVA
1138static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1139 struct scsi_cmnd *scmnd, s32 req_lim_delta)
526b4caa 1140{
94a9174c
BVA
1141 unsigned long flags;
1142
509c07bc 1143 srp_unmap_data(scmnd, ch, req);
22032991 1144
509c07bc
BVA
1145 spin_lock_irqsave(&ch->lock, flags);
1146 ch->req_lim += req_lim_delta;
509c07bc 1147 spin_unlock_irqrestore(&ch->lock, flags);
526b4caa
IR
1148}
1149
509c07bc
BVA
1150static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1151 struct scsi_device *sdev, int result)
526b4caa 1152{
509c07bc 1153 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
22032991
BVA
1154
1155 if (scmnd) {
509c07bc 1156 srp_free_req(ch, req, scmnd, 0);
ed9b2264 1157 scmnd->result = result;
22032991 1158 scmnd->scsi_done(scmnd);
22032991 1159 }
526b4caa
IR
1160}
1161
ed9b2264 1162static void srp_terminate_io(struct srp_rport *rport)
aef9ec39 1163{
ed9b2264 1164 struct srp_target_port *target = rport->lld_data;
d92c0da7 1165 struct srp_rdma_ch *ch;
b3fe628d
BVA
1166 struct Scsi_Host *shost = target->scsi_host;
1167 struct scsi_device *sdev;
d92c0da7 1168 int i, j;
ed9b2264 1169
b3fe628d
BVA
1170 /*
1171 * Invoking srp_terminate_io() while srp_queuecommand() is running
1172 * is not safe. Hence the warning statement below.
1173 */
1174 shost_for_each_device(sdev, shost)
1175 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1176
d92c0da7
BVA
1177 for (i = 0; i < target->ch_count; i++) {
1178 ch = &target->ch[i];
509c07bc 1179
d92c0da7
BVA
1180 for (j = 0; j < target->req_ring_size; ++j) {
1181 struct srp_request *req = &ch->req_ring[j];
1182
1183 srp_finish_req(ch, req, NULL,
1184 DID_TRANSPORT_FAILFAST << 16);
1185 }
ed9b2264
BVA
1186 }
1187}
aef9ec39 1188
ed9b2264
BVA
1189/*
1190 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1191 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1192 * srp_reset_device() or srp_reset_host() calls will occur while this function
1193 * is in progress. One way to realize that is not to call this function
1194 * directly but to call srp_reconnect_rport() instead since that last function
1195 * serializes calls of this function via rport->mutex and also blocks
1196 * srp_queuecommand() calls before invoking this function.
1197 */
1198static int srp_rport_reconnect(struct srp_rport *rport)
1199{
1200 struct srp_target_port *target = rport->lld_data;
d92c0da7
BVA
1201 struct srp_rdma_ch *ch;
1202 int i, j, ret = 0;
1203 bool multich = false;
09be70a2 1204
aef9ec39 1205 srp_disconnect_target(target);
34aa654e
BVA
1206
1207 if (target->state == SRP_TARGET_SCANNING)
1208 return -ENODEV;
1209
aef9ec39 1210 /*
c7c4e7ff
BVA
1211 * Now get a new local CM ID so that we avoid confusing the target in
1212 * case things are really fouled up. Doing so also ensures that all CM
1213 * callbacks will have finished before a new QP is allocated.
aef9ec39 1214 */
d92c0da7
BVA
1215 for (i = 0; i < target->ch_count; i++) {
1216 ch = &target->ch[i];
d92c0da7 1217 ret += srp_new_cm_id(ch);
536ae14e 1218 }
d92c0da7
BVA
1219 for (i = 0; i < target->ch_count; i++) {
1220 ch = &target->ch[i];
d92c0da7
BVA
1221 for (j = 0; j < target->req_ring_size; ++j) {
1222 struct srp_request *req = &ch->req_ring[j];
aef9ec39 1223
d92c0da7
BVA
1224 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1225 }
1226 }
1227 for (i = 0; i < target->ch_count; i++) {
1228 ch = &target->ch[i];
d92c0da7
BVA
1229 /*
1230 * Whether or not creating a new CM ID succeeded, create a new
1231 * QP. This guarantees that all completion callback function
1232 * invocations have finished before request resetting starts.
1233 */
1234 ret += srp_create_ch_ib(ch);
aef9ec39 1235
d92c0da7
BVA
1236 INIT_LIST_HEAD(&ch->free_tx);
1237 for (j = 0; j < target->queue_size; ++j)
1238 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1239 }
8de9fe3a
BVA
1240
1241 target->qp_in_error = false;
1242
d92c0da7
BVA
1243 for (i = 0; i < target->ch_count; i++) {
1244 ch = &target->ch[i];
bbac5ccf 1245 if (ret)
d92c0da7 1246 break;
d92c0da7
BVA
1247 ret = srp_connect_ch(ch, multich);
1248 multich = true;
1249 }
09be70a2 1250
ed9b2264
BVA
1251 if (ret == 0)
1252 shost_printk(KERN_INFO, target->scsi_host,
1253 PFX "reconnect succeeded\n");
aef9ec39
RD
1254
1255 return ret;
1256}
1257
8f26c9ff
DD
1258static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1259 unsigned int dma_len, u32 rkey)
f5358a17 1260{
8f26c9ff 1261 struct srp_direct_buf *desc = state->desc;
f5358a17 1262
3ae95da8
BVA
1263 WARN_ON_ONCE(!dma_len);
1264
8f26c9ff
DD
1265 desc->va = cpu_to_be64(dma_addr);
1266 desc->key = cpu_to_be32(rkey);
1267 desc->len = cpu_to_be32(dma_len);
f5358a17 1268
8f26c9ff
DD
1269 state->total_len += dma_len;
1270 state->desc++;
1271 state->ndesc++;
1272}
559ce8f1 1273
8f26c9ff 1274static int srp_map_finish_fmr(struct srp_map_state *state,
509c07bc 1275 struct srp_rdma_ch *ch)
8f26c9ff 1276{
186fbc66
BVA
1277 struct srp_target_port *target = ch->target;
1278 struct srp_device *dev = target->srp_host->srp_dev;
8f26c9ff
DD
1279 struct ib_pool_fmr *fmr;
1280 u64 io_addr = 0;
85507bcc 1281
f731ed62
BVA
1282 if (state->fmr.next >= state->fmr.end)
1283 return -ENOMEM;
1284
509c07bc 1285 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
8f26c9ff
DD
1286 state->npages, io_addr);
1287 if (IS_ERR(fmr))
1288 return PTR_ERR(fmr);
f5358a17 1289
f731ed62 1290 *state->fmr.next++ = fmr;
52ede08f 1291 state->nmdesc++;
f5358a17 1292
186fbc66
BVA
1293 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1294 state->dma_len, fmr->fmr->rkey);
539dde6f 1295
8f26c9ff
DD
1296 return 0;
1297}
1298
5cfb1782 1299static int srp_map_finish_fr(struct srp_map_state *state,
509c07bc 1300 struct srp_rdma_ch *ch)
5cfb1782 1301{
509c07bc 1302 struct srp_target_port *target = ch->target;
5cfb1782
BVA
1303 struct srp_device *dev = target->srp_host->srp_dev;
1304 struct ib_send_wr *bad_wr;
1305 struct ib_send_wr wr;
1306 struct srp_fr_desc *desc;
1307 u32 rkey;
1308
f731ed62
BVA
1309 if (state->fr.next >= state->fr.end)
1310 return -ENOMEM;
1311
509c07bc 1312 desc = srp_fr_pool_get(ch->fr_pool);
5cfb1782
BVA
1313 if (!desc)
1314 return -ENOMEM;
1315
1316 rkey = ib_inc_rkey(desc->mr->rkey);
1317 ib_update_fast_reg_key(desc->mr, rkey);
1318
1319 memcpy(desc->frpl->page_list, state->pages,
1320 sizeof(state->pages[0]) * state->npages);
1321
1322 memset(&wr, 0, sizeof(wr));
1323 wr.opcode = IB_WR_FAST_REG_MR;
1324 wr.wr_id = FAST_REG_WR_ID_MASK;
1325 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1326 wr.wr.fast_reg.page_list = desc->frpl;
1327 wr.wr.fast_reg.page_list_len = state->npages;
1328 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1329 wr.wr.fast_reg.length = state->dma_len;
1330 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1331 IB_ACCESS_REMOTE_READ |
1332 IB_ACCESS_REMOTE_WRITE);
1333 wr.wr.fast_reg.rkey = desc->mr->lkey;
1334
f731ed62 1335 *state->fr.next++ = desc;
5cfb1782
BVA
1336 state->nmdesc++;
1337
1338 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1339 desc->mr->rkey);
1340
509c07bc 1341 return ib_post_send(ch->qp, &wr, &bad_wr);
5cfb1782
BVA
1342}
1343
539dde6f 1344static int srp_finish_mapping(struct srp_map_state *state,
509c07bc 1345 struct srp_rdma_ch *ch)
539dde6f 1346{
509c07bc 1347 struct srp_target_port *target = ch->target;
002f1567 1348 struct srp_device *dev = target->srp_host->srp_dev;
539dde6f
BVA
1349 int ret = 0;
1350
002f1567
BVA
1351 WARN_ON_ONCE(!dev->use_fast_reg && !dev->use_fmr);
1352
539dde6f
BVA
1353 if (state->npages == 0)
1354 return 0;
1355
b1b8854d 1356 if (state->npages == 1 && !register_always)
52ede08f 1357 srp_map_desc(state, state->base_dma_addr, state->dma_len,
539dde6f
BVA
1358 target->rkey);
1359 else
002f1567 1360 ret = dev->use_fast_reg ? srp_map_finish_fr(state, ch) :
509c07bc 1361 srp_map_finish_fmr(state, ch);
539dde6f
BVA
1362
1363 if (ret == 0) {
1364 state->npages = 0;
52ede08f 1365 state->dma_len = 0;
539dde6f
BVA
1366 }
1367
1368 return ret;
1369}
1370
8f26c9ff 1371static int srp_map_sg_entry(struct srp_map_state *state,
509c07bc 1372 struct srp_rdma_ch *ch,
3ae95da8 1373 struct scatterlist *sg, int sg_index)
8f26c9ff 1374{
509c07bc 1375 struct srp_target_port *target = ch->target;
8f26c9ff
DD
1376 struct srp_device *dev = target->srp_host->srp_dev;
1377 struct ib_device *ibdev = dev->dev;
1378 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1379 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
3ae95da8 1380 unsigned int len = 0;
8f26c9ff
DD
1381 int ret;
1382
3ae95da8 1383 WARN_ON_ONCE(!dma_len);
f5358a17 1384
8f26c9ff 1385 while (dma_len) {
5cfb1782
BVA
1386 unsigned offset = dma_addr & ~dev->mr_page_mask;
1387 if (state->npages == dev->max_pages_per_mr || offset != 0) {
509c07bc 1388 ret = srp_finish_mapping(state, ch);
8f26c9ff
DD
1389 if (ret)
1390 return ret;
8f26c9ff
DD
1391 }
1392
5cfb1782 1393 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
f5358a17 1394
8f26c9ff
DD
1395 if (!state->npages)
1396 state->base_dma_addr = dma_addr;
5cfb1782 1397 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
52ede08f 1398 state->dma_len += len;
8f26c9ff
DD
1399 dma_addr += len;
1400 dma_len -= len;
1401 }
1402
5cfb1782
BVA
1403 /*
1404 * If the last entry of the MR wasn't a full page, then we need to
8f26c9ff
DD
1405 * close it out and start a new one -- we can only merge at page
1406 * boundries.
1407 */
1408 ret = 0;
0e0d3a48 1409 if (len != dev->mr_page_size)
509c07bc 1410 ret = srp_finish_mapping(state, ch);
f5358a17
RD
1411 return ret;
1412}
1413
509c07bc
BVA
1414static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1415 struct srp_request *req, struct scatterlist *scat,
1416 int count)
76bc1e1d 1417{
509c07bc 1418 struct srp_target_port *target = ch->target;
76bc1e1d 1419 struct srp_device *dev = target->srp_host->srp_dev;
76bc1e1d 1420 struct scatterlist *sg;
0e0d3a48 1421 int i, ret;
76bc1e1d
BVA
1422
1423 state->desc = req->indirect_desc;
1424 state->pages = req->map_page;
5cfb1782 1425 if (dev->use_fast_reg) {
f731ed62
BVA
1426 state->fr.next = req->fr_list;
1427 state->fr.end = req->fr_list + target->cmd_sg_cnt;
002f1567 1428 } else if (dev->use_fmr) {
f731ed62
BVA
1429 state->fmr.next = req->fmr_list;
1430 state->fmr.end = req->fmr_list + target->cmd_sg_cnt;
5cfb1782 1431 }
76bc1e1d 1432
002f1567 1433 if (dev->use_fast_reg || dev->use_fmr) {
3ae95da8
BVA
1434 for_each_sg(scat, sg, count, i) {
1435 ret = srp_map_sg_entry(state, ch, sg, i);
1436 if (ret)
1437 goto out;
1438 }
0e0d3a48
BVA
1439 ret = srp_finish_mapping(state, ch);
1440 if (ret)
1441 goto out;
3ae95da8
BVA
1442 } else {
1443 for_each_sg(scat, sg, count, i) {
1444 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1445 ib_sg_dma_len(dev->dev, sg), target->rkey);
1446 }
0e0d3a48 1447 }
76bc1e1d 1448
52ede08f 1449 req->nmdesc = state->nmdesc;
0e0d3a48 1450 ret = 0;
5cfb1782 1451
0e0d3a48
BVA
1452out:
1453 return ret;
76bc1e1d
BVA
1454}
1455
330179f2
BVA
1456/*
1457 * Register the indirect data buffer descriptor with the HCA.
1458 *
1459 * Note: since the indirect data buffer descriptor has been allocated with
1460 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1461 * memory buffer.
1462 */
1463static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1464 void **next_mr, void **end_mr, u32 idb_len,
1465 __be32 *idb_rkey)
1466{
1467 struct srp_target_port *target = ch->target;
1468 struct srp_device *dev = target->srp_host->srp_dev;
1469 struct srp_map_state state;
1470 struct srp_direct_buf idb_desc;
1471 u64 idb_pages[1];
1472 int ret;
1473
1474 memset(&state, 0, sizeof(state));
1475 memset(&idb_desc, 0, sizeof(idb_desc));
1476 state.gen.next = next_mr;
1477 state.gen.end = end_mr;
1478 state.desc = &idb_desc;
1479 state.pages = idb_pages;
1480 state.pages[0] = (req->indirect_dma_addr &
1481 dev->mr_page_mask);
1482 state.npages = 1;
1483 state.base_dma_addr = req->indirect_dma_addr;
1484 state.dma_len = idb_len;
1485 ret = srp_finish_mapping(&state, ch);
1486 if (ret < 0)
1487 goto out;
1488
1489 *idb_rkey = idb_desc.key;
1490
1491out:
1492 return ret;
1493}
1494
509c07bc 1495static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
aef9ec39
RD
1496 struct srp_request *req)
1497{
509c07bc 1498 struct srp_target_port *target = ch->target;
76bc1e1d 1499 struct scatterlist *scat;
aef9ec39 1500 struct srp_cmd *cmd = req->cmd->buf;
330179f2 1501 int len, nents, count, ret;
85507bcc
RC
1502 struct srp_device *dev;
1503 struct ib_device *ibdev;
8f26c9ff
DD
1504 struct srp_map_state state;
1505 struct srp_indirect_buf *indirect_hdr;
330179f2
BVA
1506 u32 idb_len, table_len;
1507 __be32 idb_rkey;
8f26c9ff 1508 u8 fmt;
aef9ec39 1509
bb350d1d 1510 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
aef9ec39
RD
1511 return sizeof (struct srp_cmd);
1512
1513 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1514 scmnd->sc_data_direction != DMA_TO_DEVICE) {
7aa54bd7
DD
1515 shost_printk(KERN_WARNING, target->scsi_host,
1516 PFX "Unhandled data direction %d\n",
1517 scmnd->sc_data_direction);
aef9ec39
RD
1518 return -EINVAL;
1519 }
1520
bb350d1d
FT
1521 nents = scsi_sg_count(scmnd);
1522 scat = scsi_sglist(scmnd);
aef9ec39 1523
05321937 1524 dev = target->srp_host->srp_dev;
85507bcc
RC
1525 ibdev = dev->dev;
1526
1527 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
8f26c9ff
DD
1528 if (unlikely(count == 0))
1529 return -EIO;
f5358a17
RD
1530
1531 fmt = SRP_DATA_DESC_DIRECT;
1532 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
aef9ec39 1533
b1b8854d 1534 if (count == 1 && !register_always) {
f5358a17
RD
1535 /*
1536 * The midlayer only generated a single gather/scatter
1537 * entry, or DMA mapping coalesced everything to a
1538 * single entry. So a direct descriptor along with
1539 * the DMA MR suffices.
1540 */
cf368713 1541 struct srp_direct_buf *buf = (void *) cmd->add_data;
aef9ec39 1542
85507bcc 1543 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
9af76271 1544 buf->key = cpu_to_be32(target->rkey);
85507bcc 1545 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
8f26c9ff 1546
52ede08f 1547 req->nmdesc = 0;
8f26c9ff
DD
1548 goto map_complete;
1549 }
1550
5cfb1782
BVA
1551 /*
1552 * We have more than one scatter/gather entry, so build our indirect
1553 * descriptor table, trying to merge as many entries as we can.
8f26c9ff
DD
1554 */
1555 indirect_hdr = (void *) cmd->add_data;
1556
c07d424d
DD
1557 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1558 target->indirect_size, DMA_TO_DEVICE);
1559
8f26c9ff 1560 memset(&state, 0, sizeof(state));
509c07bc 1561 srp_map_sg(&state, ch, req, scat, count);
cf368713 1562
c07d424d
DD
1563 /* We've mapped the request, now pull as much of the indirect
1564 * descriptor table as we can into the command buffer. If this
1565 * target is not using an external indirect table, we are
1566 * guaranteed to fit into the command, as the SCSI layer won't
1567 * give us more S/G entries than we allow.
8f26c9ff 1568 */
8f26c9ff 1569 if (state.ndesc == 1) {
5cfb1782
BVA
1570 /*
1571 * Memory registration collapsed the sg-list into one entry,
8f26c9ff
DD
1572 * so use a direct descriptor.
1573 */
1574 struct srp_direct_buf *buf = (void *) cmd->add_data;
cf368713 1575
c07d424d 1576 *buf = req->indirect_desc[0];
8f26c9ff 1577 goto map_complete;
aef9ec39
RD
1578 }
1579
c07d424d
DD
1580 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1581 !target->allow_ext_sg)) {
1582 shost_printk(KERN_ERR, target->scsi_host,
1583 "Could not fit S/G list into SRP_CMD\n");
1584 return -EIO;
1585 }
1586
1587 count = min(state.ndesc, target->cmd_sg_cnt);
8f26c9ff 1588 table_len = state.ndesc * sizeof (struct srp_direct_buf);
330179f2 1589 idb_len = sizeof(struct srp_indirect_buf) + table_len;
8f26c9ff
DD
1590
1591 fmt = SRP_DATA_DESC_INDIRECT;
1592 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
c07d424d 1593 len += count * sizeof (struct srp_direct_buf);
8f26c9ff 1594
c07d424d
DD
1595 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1596 count * sizeof (struct srp_direct_buf));
8f26c9ff 1597
330179f2
BVA
1598 if (register_always && (dev->use_fast_reg || dev->use_fmr)) {
1599 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1600 idb_len, &idb_rkey);
1601 if (ret < 0)
1602 return ret;
1603 req->nmdesc++;
1604 } else {
1605 idb_rkey = target->rkey;
1606 }
1607
c07d424d 1608 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
330179f2 1609 indirect_hdr->table_desc.key = idb_rkey;
8f26c9ff
DD
1610 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1611 indirect_hdr->len = cpu_to_be32(state.total_len);
1612
1613 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
c07d424d 1614 cmd->data_out_desc_cnt = count;
8f26c9ff 1615 else
c07d424d
DD
1616 cmd->data_in_desc_cnt = count;
1617
1618 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1619 DMA_TO_DEVICE);
8f26c9ff
DD
1620
1621map_complete:
aef9ec39
RD
1622 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1623 cmd->buf_fmt = fmt << 4;
1624 else
1625 cmd->buf_fmt = fmt;
1626
aef9ec39
RD
1627 return len;
1628}
1629
76c75b25
BVA
1630/*
1631 * Return an IU and possible credit to the free pool
1632 */
509c07bc 1633static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
76c75b25
BVA
1634 enum srp_iu_type iu_type)
1635{
1636 unsigned long flags;
1637
509c07bc
BVA
1638 spin_lock_irqsave(&ch->lock, flags);
1639 list_add(&iu->list, &ch->free_tx);
76c75b25 1640 if (iu_type != SRP_IU_RSP)
509c07bc
BVA
1641 ++ch->req_lim;
1642 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25
BVA
1643}
1644
05a1d750 1645/*
509c07bc 1646 * Must be called with ch->lock held to protect req_lim and free_tx.
e9684678 1647 * If IU is not sent, it must be returned using srp_put_tx_iu().
05a1d750
DD
1648 *
1649 * Note:
1650 * An upper limit for the number of allocated information units for each
1651 * request type is:
1652 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1653 * more than Scsi_Host.can_queue requests.
1654 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1655 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1656 * one unanswered SRP request to an initiator.
1657 */
509c07bc 1658static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
05a1d750
DD
1659 enum srp_iu_type iu_type)
1660{
509c07bc 1661 struct srp_target_port *target = ch->target;
05a1d750
DD
1662 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1663 struct srp_iu *iu;
1664
509c07bc 1665 srp_send_completion(ch->send_cq, ch);
05a1d750 1666
509c07bc 1667 if (list_empty(&ch->free_tx))
05a1d750
DD
1668 return NULL;
1669
1670 /* Initiator responses to target requests do not consume credits */
76c75b25 1671 if (iu_type != SRP_IU_RSP) {
509c07bc 1672 if (ch->req_lim <= rsv) {
76c75b25
BVA
1673 ++target->zero_req_lim;
1674 return NULL;
1675 }
1676
509c07bc 1677 --ch->req_lim;
05a1d750
DD
1678 }
1679
509c07bc 1680 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
76c75b25 1681 list_del(&iu->list);
05a1d750
DD
1682 return iu;
1683}
1684
509c07bc 1685static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
05a1d750 1686{
509c07bc 1687 struct srp_target_port *target = ch->target;
05a1d750
DD
1688 struct ib_sge list;
1689 struct ib_send_wr wr, *bad_wr;
05a1d750
DD
1690
1691 list.addr = iu->dma;
1692 list.length = len;
9af76271 1693 list.lkey = target->lkey;
05a1d750
DD
1694
1695 wr.next = NULL;
dcb4cb85 1696 wr.wr_id = (uintptr_t) iu;
05a1d750
DD
1697 wr.sg_list = &list;
1698 wr.num_sge = 1;
1699 wr.opcode = IB_WR_SEND;
1700 wr.send_flags = IB_SEND_SIGNALED;
1701
509c07bc 1702 return ib_post_send(ch->qp, &wr, &bad_wr);
05a1d750
DD
1703}
1704
509c07bc 1705static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
c996bb47 1706{
509c07bc 1707 struct srp_target_port *target = ch->target;
c996bb47 1708 struct ib_recv_wr wr, *bad_wr;
dcb4cb85 1709 struct ib_sge list;
c996bb47
BVA
1710
1711 list.addr = iu->dma;
1712 list.length = iu->size;
9af76271 1713 list.lkey = target->lkey;
c996bb47
BVA
1714
1715 wr.next = NULL;
dcb4cb85 1716 wr.wr_id = (uintptr_t) iu;
c996bb47
BVA
1717 wr.sg_list = &list;
1718 wr.num_sge = 1;
1719
509c07bc 1720 return ib_post_recv(ch->qp, &wr, &bad_wr);
c996bb47
BVA
1721}
1722
509c07bc 1723static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
aef9ec39 1724{
509c07bc 1725 struct srp_target_port *target = ch->target;
aef9ec39
RD
1726 struct srp_request *req;
1727 struct scsi_cmnd *scmnd;
1728 unsigned long flags;
aef9ec39 1729
aef9ec39 1730 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
509c07bc
BVA
1731 spin_lock_irqsave(&ch->lock, flags);
1732 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1733 spin_unlock_irqrestore(&ch->lock, flags);
94a9174c 1734
509c07bc 1735 ch->tsk_mgmt_status = -1;
f8b6e31e 1736 if (be32_to_cpu(rsp->resp_data_len) >= 4)
509c07bc
BVA
1737 ch->tsk_mgmt_status = rsp->data[3];
1738 complete(&ch->tsk_mgmt_done);
aef9ec39 1739 } else {
77f2c1a4
BVA
1740 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1741 if (scmnd) {
1742 req = (void *)scmnd->host_scribble;
1743 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1744 }
22032991 1745 if (!scmnd) {
7aa54bd7 1746 shost_printk(KERN_ERR, target->scsi_host,
d92c0da7
BVA
1747 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1748 rsp->tag, ch - target->ch, ch->qp->qp_num);
22032991 1749
509c07bc
BVA
1750 spin_lock_irqsave(&ch->lock, flags);
1751 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1752 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1753
1754 return;
1755 }
aef9ec39
RD
1756 scmnd->result = rsp->status;
1757
1758 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1759 memcpy(scmnd->sense_buffer, rsp->data +
1760 be32_to_cpu(rsp->resp_data_len),
1761 min_t(int, be32_to_cpu(rsp->sense_data_len),
1762 SCSI_SENSE_BUFFERSIZE));
1763 }
1764
e714531a 1765 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
bb350d1d 1766 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
e714531a
BVA
1767 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1768 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1769 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1770 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1771 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1772 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
aef9ec39 1773
509c07bc 1774 srp_free_req(ch, req, scmnd,
22032991
BVA
1775 be32_to_cpu(rsp->req_lim_delta));
1776
f8b6e31e
DD
1777 scmnd->host_scribble = NULL;
1778 scmnd->scsi_done(scmnd);
aef9ec39 1779 }
aef9ec39
RD
1780}
1781
509c07bc 1782static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
bb12588a
DD
1783 void *rsp, int len)
1784{
509c07bc 1785 struct srp_target_port *target = ch->target;
76c75b25 1786 struct ib_device *dev = target->srp_host->srp_dev->dev;
bb12588a
DD
1787 unsigned long flags;
1788 struct srp_iu *iu;
76c75b25 1789 int err;
bb12588a 1790
509c07bc
BVA
1791 spin_lock_irqsave(&ch->lock, flags);
1792 ch->req_lim += req_delta;
1793 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1794 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25 1795
bb12588a
DD
1796 if (!iu) {
1797 shost_printk(KERN_ERR, target->scsi_host, PFX
1798 "no IU available to send response\n");
76c75b25 1799 return 1;
bb12588a
DD
1800 }
1801
1802 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1803 memcpy(iu->buf, rsp, len);
1804 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1805
509c07bc 1806 err = srp_post_send(ch, iu, len);
76c75b25 1807 if (err) {
bb12588a
DD
1808 shost_printk(KERN_ERR, target->scsi_host, PFX
1809 "unable to post response: %d\n", err);
509c07bc 1810 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
76c75b25 1811 }
bb12588a 1812
bb12588a
DD
1813 return err;
1814}
1815
509c07bc 1816static void srp_process_cred_req(struct srp_rdma_ch *ch,
bb12588a
DD
1817 struct srp_cred_req *req)
1818{
1819 struct srp_cred_rsp rsp = {
1820 .opcode = SRP_CRED_RSP,
1821 .tag = req->tag,
1822 };
1823 s32 delta = be32_to_cpu(req->req_lim_delta);
1824
509c07bc
BVA
1825 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1826 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
bb12588a
DD
1827 "problems processing SRP_CRED_REQ\n");
1828}
1829
509c07bc 1830static void srp_process_aer_req(struct srp_rdma_ch *ch,
bb12588a
DD
1831 struct srp_aer_req *req)
1832{
509c07bc 1833 struct srp_target_port *target = ch->target;
bb12588a
DD
1834 struct srp_aer_rsp rsp = {
1835 .opcode = SRP_AER_RSP,
1836 .tag = req->tag,
1837 };
1838 s32 delta = be32_to_cpu(req->req_lim_delta);
1839
1840 shost_printk(KERN_ERR, target->scsi_host, PFX
985aa495 1841 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
bb12588a 1842
509c07bc 1843 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
bb12588a
DD
1844 shost_printk(KERN_ERR, target->scsi_host, PFX
1845 "problems processing SRP_AER_REQ\n");
1846}
1847
509c07bc 1848static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
aef9ec39 1849{
509c07bc 1850 struct srp_target_port *target = ch->target;
dcb4cb85 1851 struct ib_device *dev = target->srp_host->srp_dev->dev;
737b94eb 1852 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
c996bb47 1853 int res;
aef9ec39
RD
1854 u8 opcode;
1855
509c07bc 1856 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 1857 DMA_FROM_DEVICE);
aef9ec39
RD
1858
1859 opcode = *(u8 *) iu->buf;
1860
1861 if (0) {
7aa54bd7
DD
1862 shost_printk(KERN_ERR, target->scsi_host,
1863 PFX "recv completion, opcode 0x%02x\n", opcode);
7a700811
BVA
1864 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1865 iu->buf, wc->byte_len, true);
aef9ec39
RD
1866 }
1867
1868 switch (opcode) {
1869 case SRP_RSP:
509c07bc 1870 srp_process_rsp(ch, iu->buf);
aef9ec39
RD
1871 break;
1872
bb12588a 1873 case SRP_CRED_REQ:
509c07bc 1874 srp_process_cred_req(ch, iu->buf);
bb12588a
DD
1875 break;
1876
1877 case SRP_AER_REQ:
509c07bc 1878 srp_process_aer_req(ch, iu->buf);
bb12588a
DD
1879 break;
1880
aef9ec39
RD
1881 case SRP_T_LOGOUT:
1882 /* XXX Handle target logout */
7aa54bd7
DD
1883 shost_printk(KERN_WARNING, target->scsi_host,
1884 PFX "Got target logout request\n");
aef9ec39
RD
1885 break;
1886
1887 default:
7aa54bd7
DD
1888 shost_printk(KERN_WARNING, target->scsi_host,
1889 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
aef9ec39
RD
1890 break;
1891 }
1892
509c07bc 1893 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 1894 DMA_FROM_DEVICE);
c996bb47 1895
509c07bc 1896 res = srp_post_recv(ch, iu);
c996bb47
BVA
1897 if (res != 0)
1898 shost_printk(KERN_ERR, target->scsi_host,
1899 PFX "Recv failed with error code %d\n", res);
aef9ec39
RD
1900}
1901
c1120f89
BVA
1902/**
1903 * srp_tl_err_work() - handle a transport layer error
af24663b 1904 * @work: Work structure embedded in an SRP target port.
c1120f89
BVA
1905 *
1906 * Note: This function may get invoked before the rport has been created,
1907 * hence the target->rport test.
1908 */
1909static void srp_tl_err_work(struct work_struct *work)
1910{
1911 struct srp_target_port *target;
1912
1913 target = container_of(work, struct srp_target_port, tl_err_work);
1914 if (target->rport)
1915 srp_start_tl_fail_timers(target->rport);
1916}
1917
5cfb1782 1918static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
7dad6b2e 1919 bool send_err, struct srp_rdma_ch *ch)
948d1e88 1920{
7dad6b2e
BVA
1921 struct srp_target_port *target = ch->target;
1922
1923 if (wr_id == SRP_LAST_WR_ID) {
1924 complete(&ch->done);
1925 return;
1926 }
1927
c014c8cd 1928 if (ch->connected && !target->qp_in_error) {
5cfb1782
BVA
1929 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1930 shost_printk(KERN_ERR, target->scsi_host, PFX
57363d98
SG
1931 "LOCAL_INV failed with status %s (%d)\n",
1932 ib_wc_status_msg(wc_status), wc_status);
5cfb1782
BVA
1933 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1934 shost_printk(KERN_ERR, target->scsi_host, PFX
57363d98
SG
1935 "FAST_REG_MR failed status %s (%d)\n",
1936 ib_wc_status_msg(wc_status), wc_status);
5cfb1782
BVA
1937 } else {
1938 shost_printk(KERN_ERR, target->scsi_host,
57363d98 1939 PFX "failed %s status %s (%d) for iu %p\n",
5cfb1782 1940 send_err ? "send" : "receive",
57363d98
SG
1941 ib_wc_status_msg(wc_status), wc_status,
1942 (void *)(uintptr_t)wr_id);
5cfb1782 1943 }
c1120f89 1944 queue_work(system_long_wq, &target->tl_err_work);
4f0af697 1945 }
948d1e88
BVA
1946 target->qp_in_error = true;
1947}
1948
509c07bc 1949static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
aef9ec39 1950{
509c07bc 1951 struct srp_rdma_ch *ch = ch_ptr;
aef9ec39 1952 struct ib_wc wc;
aef9ec39
RD
1953
1954 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1955 while (ib_poll_cq(cq, 1, &wc) > 0) {
948d1e88 1956 if (likely(wc.status == IB_WC_SUCCESS)) {
509c07bc 1957 srp_handle_recv(ch, &wc);
948d1e88 1958 } else {
7dad6b2e 1959 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
aef9ec39 1960 }
9c03dc9f
BVA
1961 }
1962}
1963
509c07bc 1964static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
9c03dc9f 1965{
509c07bc 1966 struct srp_rdma_ch *ch = ch_ptr;
9c03dc9f 1967 struct ib_wc wc;
dcb4cb85 1968 struct srp_iu *iu;
9c03dc9f
BVA
1969
1970 while (ib_poll_cq(cq, 1, &wc) > 0) {
948d1e88
BVA
1971 if (likely(wc.status == IB_WC_SUCCESS)) {
1972 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
509c07bc 1973 list_add(&iu->list, &ch->free_tx);
948d1e88 1974 } else {
7dad6b2e 1975 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
9c03dc9f 1976 }
aef9ec39
RD
1977 }
1978}
1979
76c75b25 1980static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
aef9ec39 1981{
76c75b25 1982 struct srp_target_port *target = host_to_target(shost);
a95cadb9 1983 struct srp_rport *rport = target->rport;
509c07bc 1984 struct srp_rdma_ch *ch;
aef9ec39
RD
1985 struct srp_request *req;
1986 struct srp_iu *iu;
1987 struct srp_cmd *cmd;
85507bcc 1988 struct ib_device *dev;
76c75b25 1989 unsigned long flags;
77f2c1a4
BVA
1990 u32 tag;
1991 u16 idx;
d1b4289e 1992 int len, ret;
a95cadb9
BVA
1993 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1994
1995 /*
1996 * The SCSI EH thread is the only context from which srp_queuecommand()
1997 * can get invoked for blocked devices (SDEV_BLOCK /
1998 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1999 * locking the rport mutex if invoked from inside the SCSI EH.
2000 */
2001 if (in_scsi_eh)
2002 mutex_lock(&rport->mutex);
aef9ec39 2003
d1b4289e
BVA
2004 scmnd->result = srp_chkready(target->rport);
2005 if (unlikely(scmnd->result))
2006 goto err;
2ce19e72 2007
77f2c1a4
BVA
2008 WARN_ON_ONCE(scmnd->request->tag < 0);
2009 tag = blk_mq_unique_tag(scmnd->request);
d92c0da7 2010 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
77f2c1a4
BVA
2011 idx = blk_mq_unique_tag_to_tag(tag);
2012 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2013 dev_name(&shost->shost_gendev), tag, idx,
2014 target->req_ring_size);
509c07bc
BVA
2015
2016 spin_lock_irqsave(&ch->lock, flags);
2017 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
509c07bc 2018 spin_unlock_irqrestore(&ch->lock, flags);
aef9ec39 2019
77f2c1a4
BVA
2020 if (!iu)
2021 goto err;
2022
2023 req = &ch->req_ring[idx];
05321937 2024 dev = target->srp_host->srp_dev->dev;
49248644 2025 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
85507bcc 2026 DMA_TO_DEVICE);
aef9ec39 2027
f8b6e31e 2028 scmnd->host_scribble = (void *) req;
aef9ec39
RD
2029
2030 cmd = iu->buf;
2031 memset(cmd, 0, sizeof *cmd);
2032
2033 cmd->opcode = SRP_CMD;
985aa495 2034 int_to_scsilun(scmnd->device->lun, &cmd->lun);
77f2c1a4 2035 cmd->tag = tag;
aef9ec39
RD
2036 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2037
aef9ec39
RD
2038 req->scmnd = scmnd;
2039 req->cmd = iu;
aef9ec39 2040
509c07bc 2041 len = srp_map_data(scmnd, ch, req);
aef9ec39 2042 if (len < 0) {
7aa54bd7 2043 shost_printk(KERN_ERR, target->scsi_host,
d1b4289e
BVA
2044 PFX "Failed to map data (%d)\n", len);
2045 /*
2046 * If we ran out of memory descriptors (-ENOMEM) because an
2047 * application is queuing many requests with more than
52ede08f 2048 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
d1b4289e
BVA
2049 * to reduce queue depth temporarily.
2050 */
2051 scmnd->result = len == -ENOMEM ?
2052 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
76c75b25 2053 goto err_iu;
aef9ec39
RD
2054 }
2055
49248644 2056 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
85507bcc 2057 DMA_TO_DEVICE);
aef9ec39 2058
509c07bc 2059 if (srp_post_send(ch, iu, len)) {
7aa54bd7 2060 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
aef9ec39
RD
2061 goto err_unmap;
2062 }
2063
d1b4289e
BVA
2064 ret = 0;
2065
a95cadb9
BVA
2066unlock_rport:
2067 if (in_scsi_eh)
2068 mutex_unlock(&rport->mutex);
2069
d1b4289e 2070 return ret;
aef9ec39
RD
2071
2072err_unmap:
509c07bc 2073 srp_unmap_data(scmnd, ch, req);
aef9ec39 2074
76c75b25 2075err_iu:
509c07bc 2076 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
76c75b25 2077
024ca901
BVA
2078 /*
2079 * Avoid that the loops that iterate over the request ring can
2080 * encounter a dangling SCSI command pointer.
2081 */
2082 req->scmnd = NULL;
2083
d1b4289e
BVA
2084err:
2085 if (scmnd->result) {
2086 scmnd->scsi_done(scmnd);
2087 ret = 0;
2088 } else {
2089 ret = SCSI_MLQUEUE_HOST_BUSY;
2090 }
a95cadb9 2091
d1b4289e 2092 goto unlock_rport;
aef9ec39
RD
2093}
2094
4d73f95f
BVA
2095/*
2096 * Note: the resources allocated in this function are freed in
509c07bc 2097 * srp_free_ch_ib().
4d73f95f 2098 */
509c07bc 2099static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
aef9ec39 2100{
509c07bc 2101 struct srp_target_port *target = ch->target;
aef9ec39
RD
2102 int i;
2103
509c07bc
BVA
2104 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2105 GFP_KERNEL);
2106 if (!ch->rx_ring)
4d73f95f 2107 goto err_no_ring;
509c07bc
BVA
2108 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2109 GFP_KERNEL);
2110 if (!ch->tx_ring)
4d73f95f
BVA
2111 goto err_no_ring;
2112
2113 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2114 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2115 ch->max_ti_iu_len,
2116 GFP_KERNEL, DMA_FROM_DEVICE);
2117 if (!ch->rx_ring[i])
aef9ec39
RD
2118 goto err;
2119 }
2120
4d73f95f 2121 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2122 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2123 target->max_iu_len,
2124 GFP_KERNEL, DMA_TO_DEVICE);
2125 if (!ch->tx_ring[i])
aef9ec39 2126 goto err;
dcb4cb85 2127
509c07bc 2128 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
aef9ec39
RD
2129 }
2130
2131 return 0;
2132
2133err:
4d73f95f 2134 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2135 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2136 srp_free_iu(target->srp_host, ch->tx_ring[i]);
aef9ec39
RD
2137 }
2138
4d73f95f
BVA
2139
2140err_no_ring:
509c07bc
BVA
2141 kfree(ch->tx_ring);
2142 ch->tx_ring = NULL;
2143 kfree(ch->rx_ring);
2144 ch->rx_ring = NULL;
4d73f95f 2145
aef9ec39
RD
2146 return -ENOMEM;
2147}
2148
c9b03c1a
BVA
2149static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2150{
2151 uint64_t T_tr_ns, max_compl_time_ms;
2152 uint32_t rq_tmo_jiffies;
2153
2154 /*
2155 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2156 * table 91), both the QP timeout and the retry count have to be set
2157 * for RC QP's during the RTR to RTS transition.
2158 */
2159 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2160 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2161
2162 /*
2163 * Set target->rq_tmo_jiffies to one second more than the largest time
2164 * it can take before an error completion is generated. See also
2165 * C9-140..142 in the IBTA spec for more information about how to
2166 * convert the QP Local ACK Timeout value to nanoseconds.
2167 */
2168 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2169 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2170 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2171 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2172
2173 return rq_tmo_jiffies;
2174}
2175
961e0be8 2176static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
e6300cbd 2177 const struct srp_login_rsp *lrsp,
509c07bc 2178 struct srp_rdma_ch *ch)
961e0be8 2179{
509c07bc 2180 struct srp_target_port *target = ch->target;
961e0be8
DD
2181 struct ib_qp_attr *qp_attr = NULL;
2182 int attr_mask = 0;
2183 int ret;
2184 int i;
2185
2186 if (lrsp->opcode == SRP_LOGIN_RSP) {
509c07bc
BVA
2187 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2188 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
961e0be8
DD
2189
2190 /*
2191 * Reserve credits for task management so we don't
2192 * bounce requests back to the SCSI mid-layer.
2193 */
2194 target->scsi_host->can_queue
509c07bc 2195 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
961e0be8 2196 target->scsi_host->can_queue);
4d73f95f
BVA
2197 target->scsi_host->cmd_per_lun
2198 = min_t(int, target->scsi_host->can_queue,
2199 target->scsi_host->cmd_per_lun);
961e0be8
DD
2200 } else {
2201 shost_printk(KERN_WARNING, target->scsi_host,
2202 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2203 ret = -ECONNRESET;
2204 goto error;
2205 }
2206
509c07bc
BVA
2207 if (!ch->rx_ring) {
2208 ret = srp_alloc_iu_bufs(ch);
961e0be8
DD
2209 if (ret)
2210 goto error;
2211 }
2212
2213 ret = -ENOMEM;
2214 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2215 if (!qp_attr)
2216 goto error;
2217
2218 qp_attr->qp_state = IB_QPS_RTR;
2219 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2220 if (ret)
2221 goto error_free;
2222
509c07bc 2223 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
961e0be8
DD
2224 if (ret)
2225 goto error_free;
2226
4d73f95f 2227 for (i = 0; i < target->queue_size; i++) {
509c07bc
BVA
2228 struct srp_iu *iu = ch->rx_ring[i];
2229
2230 ret = srp_post_recv(ch, iu);
961e0be8
DD
2231 if (ret)
2232 goto error_free;
2233 }
2234
2235 qp_attr->qp_state = IB_QPS_RTS;
2236 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2237 if (ret)
2238 goto error_free;
2239
c9b03c1a
BVA
2240 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2241
509c07bc 2242 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
961e0be8
DD
2243 if (ret)
2244 goto error_free;
2245
2246 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2247
2248error_free:
2249 kfree(qp_attr);
2250
2251error:
509c07bc 2252 ch->status = ret;
961e0be8
DD
2253}
2254
aef9ec39
RD
2255static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2256 struct ib_cm_event *event,
509c07bc 2257 struct srp_rdma_ch *ch)
aef9ec39 2258{
509c07bc 2259 struct srp_target_port *target = ch->target;
7aa54bd7 2260 struct Scsi_Host *shost = target->scsi_host;
aef9ec39
RD
2261 struct ib_class_port_info *cpi;
2262 int opcode;
2263
2264 switch (event->param.rej_rcvd.reason) {
2265 case IB_CM_REJ_PORT_CM_REDIRECT:
2266 cpi = event->param.rej_rcvd.ari;
509c07bc
BVA
2267 ch->path.dlid = cpi->redirect_lid;
2268 ch->path.pkey = cpi->redirect_pkey;
aef9ec39 2269 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
509c07bc 2270 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
aef9ec39 2271
509c07bc 2272 ch->status = ch->path.dlid ?
aef9ec39
RD
2273 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2274 break;
2275
2276 case IB_CM_REJ_PORT_REDIRECT:
5d7cbfd6 2277 if (srp_target_is_topspin(target)) {
aef9ec39
RD
2278 /*
2279 * Topspin/Cisco SRP gateways incorrectly send
2280 * reject reason code 25 when they mean 24
2281 * (port redirect).
2282 */
509c07bc 2283 memcpy(ch->path.dgid.raw,
aef9ec39
RD
2284 event->param.rej_rcvd.ari, 16);
2285
7aa54bd7
DD
2286 shost_printk(KERN_DEBUG, shost,
2287 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
509c07bc
BVA
2288 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2289 be64_to_cpu(ch->path.dgid.global.interface_id));
aef9ec39 2290
509c07bc 2291 ch->status = SRP_PORT_REDIRECT;
aef9ec39 2292 } else {
7aa54bd7
DD
2293 shost_printk(KERN_WARNING, shost,
2294 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
509c07bc 2295 ch->status = -ECONNRESET;
aef9ec39
RD
2296 }
2297 break;
2298
2299 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
7aa54bd7
DD
2300 shost_printk(KERN_WARNING, shost,
2301 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
509c07bc 2302 ch->status = -ECONNRESET;
aef9ec39
RD
2303 break;
2304
2305 case IB_CM_REJ_CONSUMER_DEFINED:
2306 opcode = *(u8 *) event->private_data;
2307 if (opcode == SRP_LOGIN_REJ) {
2308 struct srp_login_rej *rej = event->private_data;
2309 u32 reason = be32_to_cpu(rej->reason);
2310
2311 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
7aa54bd7
DD
2312 shost_printk(KERN_WARNING, shost,
2313 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
aef9ec39 2314 else
e7ffde01
BVA
2315 shost_printk(KERN_WARNING, shost, PFX
2316 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
747fe000
BVA
2317 target->sgid.raw,
2318 target->orig_dgid.raw, reason);
aef9ec39 2319 } else
7aa54bd7
DD
2320 shost_printk(KERN_WARNING, shost,
2321 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2322 " opcode 0x%02x\n", opcode);
509c07bc 2323 ch->status = -ECONNRESET;
aef9ec39
RD
2324 break;
2325
9fe4bcf4
DD
2326 case IB_CM_REJ_STALE_CONN:
2327 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
509c07bc 2328 ch->status = SRP_STALE_CONN;
9fe4bcf4
DD
2329 break;
2330
aef9ec39 2331 default:
7aa54bd7
DD
2332 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2333 event->param.rej_rcvd.reason);
509c07bc 2334 ch->status = -ECONNRESET;
aef9ec39
RD
2335 }
2336}
2337
2338static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2339{
509c07bc
BVA
2340 struct srp_rdma_ch *ch = cm_id->context;
2341 struct srp_target_port *target = ch->target;
aef9ec39 2342 int comp = 0;
aef9ec39
RD
2343
2344 switch (event->event) {
2345 case IB_CM_REQ_ERROR:
7aa54bd7
DD
2346 shost_printk(KERN_DEBUG, target->scsi_host,
2347 PFX "Sending CM REQ failed\n");
aef9ec39 2348 comp = 1;
509c07bc 2349 ch->status = -ECONNRESET;
aef9ec39
RD
2350 break;
2351
2352 case IB_CM_REP_RECEIVED:
2353 comp = 1;
509c07bc 2354 srp_cm_rep_handler(cm_id, event->private_data, ch);
aef9ec39
RD
2355 break;
2356
2357 case IB_CM_REJ_RECEIVED:
7aa54bd7 2358 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
aef9ec39
RD
2359 comp = 1;
2360
509c07bc 2361 srp_cm_rej_handler(cm_id, event, ch);
aef9ec39
RD
2362 break;
2363
b7ac4ab4 2364 case IB_CM_DREQ_RECEIVED:
7aa54bd7
DD
2365 shost_printk(KERN_WARNING, target->scsi_host,
2366 PFX "DREQ received - connection closed\n");
c014c8cd 2367 ch->connected = false;
b7ac4ab4 2368 if (ib_send_cm_drep(cm_id, NULL, 0))
7aa54bd7
DD
2369 shost_printk(KERN_ERR, target->scsi_host,
2370 PFX "Sending CM DREP failed\n");
c1120f89 2371 queue_work(system_long_wq, &target->tl_err_work);
aef9ec39
RD
2372 break;
2373
2374 case IB_CM_TIMEWAIT_EXIT:
7aa54bd7
DD
2375 shost_printk(KERN_ERR, target->scsi_host,
2376 PFX "connection closed\n");
ac72d766 2377 comp = 1;
aef9ec39 2378
509c07bc 2379 ch->status = 0;
aef9ec39
RD
2380 break;
2381
b7ac4ab4
IR
2382 case IB_CM_MRA_RECEIVED:
2383 case IB_CM_DREQ_ERROR:
2384 case IB_CM_DREP_RECEIVED:
2385 break;
2386
aef9ec39 2387 default:
7aa54bd7
DD
2388 shost_printk(KERN_WARNING, target->scsi_host,
2389 PFX "Unhandled CM event %d\n", event->event);
aef9ec39
RD
2390 break;
2391 }
2392
2393 if (comp)
509c07bc 2394 complete(&ch->done);
aef9ec39 2395
aef9ec39
RD
2396 return 0;
2397}
2398
71444b97
JW
2399/**
2400 * srp_change_queue_depth - setting device queue depth
2401 * @sdev: scsi device struct
2402 * @qdepth: requested queue depth
71444b97
JW
2403 *
2404 * Returns queue depth.
2405 */
2406static int
db5ed4df 2407srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
71444b97 2408{
c40ecc12 2409 if (!sdev->tagged_supported)
1e6f2416 2410 qdepth = 1;
db5ed4df 2411 return scsi_change_queue_depth(sdev, qdepth);
71444b97
JW
2412}
2413
985aa495
BVA
2414static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2415 u8 func)
aef9ec39 2416{
509c07bc 2417 struct srp_target_port *target = ch->target;
a95cadb9 2418 struct srp_rport *rport = target->rport;
19081f31 2419 struct ib_device *dev = target->srp_host->srp_dev->dev;
aef9ec39
RD
2420 struct srp_iu *iu;
2421 struct srp_tsk_mgmt *tsk_mgmt;
aef9ec39 2422
c014c8cd 2423 if (!ch->connected || target->qp_in_error)
3780d1f0
BVA
2424 return -1;
2425
509c07bc 2426 init_completion(&ch->tsk_mgmt_done);
aef9ec39 2427
a95cadb9 2428 /*
509c07bc 2429 * Lock the rport mutex to avoid that srp_create_ch_ib() is
a95cadb9
BVA
2430 * invoked while a task management function is being sent.
2431 */
2432 mutex_lock(&rport->mutex);
509c07bc
BVA
2433 spin_lock_irq(&ch->lock);
2434 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2435 spin_unlock_irq(&ch->lock);
76c75b25 2436
a95cadb9
BVA
2437 if (!iu) {
2438 mutex_unlock(&rport->mutex);
2439
76c75b25 2440 return -1;
a95cadb9 2441 }
aef9ec39 2442
19081f31
DD
2443 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2444 DMA_TO_DEVICE);
aef9ec39
RD
2445 tsk_mgmt = iu->buf;
2446 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2447
2448 tsk_mgmt->opcode = SRP_TSK_MGMT;
985aa495 2449 int_to_scsilun(lun, &tsk_mgmt->lun);
f8b6e31e 2450 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
aef9ec39 2451 tsk_mgmt->tsk_mgmt_func = func;
f8b6e31e 2452 tsk_mgmt->task_tag = req_tag;
aef9ec39 2453
19081f31
DD
2454 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2455 DMA_TO_DEVICE);
509c07bc
BVA
2456 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2457 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
a95cadb9
BVA
2458 mutex_unlock(&rport->mutex);
2459
76c75b25
BVA
2460 return -1;
2461 }
a95cadb9 2462 mutex_unlock(&rport->mutex);
d945e1df 2463
509c07bc 2464 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
aef9ec39 2465 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
d945e1df 2466 return -1;
aef9ec39 2467
d945e1df 2468 return 0;
d945e1df
RD
2469}
2470
aef9ec39
RD
2471static int srp_abort(struct scsi_cmnd *scmnd)
2472{
d945e1df 2473 struct srp_target_port *target = host_to_target(scmnd->device->host);
f8b6e31e 2474 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
77f2c1a4 2475 u32 tag;
d92c0da7 2476 u16 ch_idx;
509c07bc 2477 struct srp_rdma_ch *ch;
086f44f5 2478 int ret;
d945e1df 2479
7aa54bd7 2480 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
aef9ec39 2481
d92c0da7 2482 if (!req)
99b6697a 2483 return SUCCESS;
77f2c1a4 2484 tag = blk_mq_unique_tag(scmnd->request);
d92c0da7
BVA
2485 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2486 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2487 return SUCCESS;
2488 ch = &target->ch[ch_idx];
2489 if (!srp_claim_req(ch, req, NULL, scmnd))
2490 return SUCCESS;
2491 shost_printk(KERN_ERR, target->scsi_host,
2492 "Sending SRP abort for tag %#x\n", tag);
77f2c1a4 2493 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
80d5e8a2 2494 SRP_TSK_ABORT_TASK) == 0)
086f44f5 2495 ret = SUCCESS;
ed9b2264 2496 else if (target->rport->state == SRP_RPORT_LOST)
99e1c139 2497 ret = FAST_IO_FAIL;
086f44f5
BVA
2498 else
2499 ret = FAILED;
509c07bc 2500 srp_free_req(ch, req, scmnd, 0);
22032991 2501 scmnd->result = DID_ABORT << 16;
d8536670 2502 scmnd->scsi_done(scmnd);
d945e1df 2503
086f44f5 2504 return ret;
aef9ec39
RD
2505}
2506
2507static int srp_reset_device(struct scsi_cmnd *scmnd)
2508{
d945e1df 2509 struct srp_target_port *target = host_to_target(scmnd->device->host);
d92c0da7 2510 struct srp_rdma_ch *ch;
536ae14e 2511 int i;
d945e1df 2512
7aa54bd7 2513 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
aef9ec39 2514
d92c0da7 2515 ch = &target->ch[0];
509c07bc 2516 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
f8b6e31e 2517 SRP_TSK_LUN_RESET))
d945e1df 2518 return FAILED;
509c07bc 2519 if (ch->tsk_mgmt_status)
d945e1df
RD
2520 return FAILED;
2521
d92c0da7
BVA
2522 for (i = 0; i < target->ch_count; i++) {
2523 ch = &target->ch[i];
2524 for (i = 0; i < target->req_ring_size; ++i) {
2525 struct srp_request *req = &ch->req_ring[i];
509c07bc 2526
d92c0da7
BVA
2527 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2528 }
536ae14e 2529 }
d945e1df 2530
d945e1df 2531 return SUCCESS;
aef9ec39
RD
2532}
2533
2534static int srp_reset_host(struct scsi_cmnd *scmnd)
2535{
2536 struct srp_target_port *target = host_to_target(scmnd->device->host);
aef9ec39 2537
7aa54bd7 2538 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
aef9ec39 2539
ed9b2264 2540 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
aef9ec39
RD
2541}
2542
c9b03c1a
BVA
2543static int srp_slave_configure(struct scsi_device *sdev)
2544{
2545 struct Scsi_Host *shost = sdev->host;
2546 struct srp_target_port *target = host_to_target(shost);
2547 struct request_queue *q = sdev->request_queue;
2548 unsigned long timeout;
2549
2550 if (sdev->type == TYPE_DISK) {
2551 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2552 blk_queue_rq_timeout(q, timeout);
2553 }
2554
2555 return 0;
2556}
2557
ee959b00
TJ
2558static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2559 char *buf)
6ecb0c84 2560{
ee959b00 2561 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2562
45c37cad 2563 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
6ecb0c84
RD
2564}
2565
ee959b00
TJ
2566static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2567 char *buf)
6ecb0c84 2568{
ee959b00 2569 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2570
45c37cad 2571 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
6ecb0c84
RD
2572}
2573
ee959b00
TJ
2574static ssize_t show_service_id(struct device *dev,
2575 struct device_attribute *attr, char *buf)
6ecb0c84 2576{
ee959b00 2577 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2578
45c37cad 2579 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
6ecb0c84
RD
2580}
2581
ee959b00
TJ
2582static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2583 char *buf)
6ecb0c84 2584{
ee959b00 2585 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2586
747fe000 2587 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
6ecb0c84
RD
2588}
2589
848b3082
BVA
2590static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2591 char *buf)
2592{
2593 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2594
747fe000 2595 return sprintf(buf, "%pI6\n", target->sgid.raw);
848b3082
BVA
2596}
2597
ee959b00
TJ
2598static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2599 char *buf)
6ecb0c84 2600{
ee959b00 2601 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7 2602 struct srp_rdma_ch *ch = &target->ch[0];
6ecb0c84 2603
509c07bc 2604 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
6ecb0c84
RD
2605}
2606
ee959b00
TJ
2607static ssize_t show_orig_dgid(struct device *dev,
2608 struct device_attribute *attr, char *buf)
3633b3d0 2609{
ee959b00 2610 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3633b3d0 2611
747fe000 2612 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
3633b3d0
IR
2613}
2614
89de7486
BVA
2615static ssize_t show_req_lim(struct device *dev,
2616 struct device_attribute *attr, char *buf)
2617{
2618 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7
BVA
2619 struct srp_rdma_ch *ch;
2620 int i, req_lim = INT_MAX;
89de7486 2621
d92c0da7
BVA
2622 for (i = 0; i < target->ch_count; i++) {
2623 ch = &target->ch[i];
2624 req_lim = min(req_lim, ch->req_lim);
2625 }
2626 return sprintf(buf, "%d\n", req_lim);
89de7486
BVA
2627}
2628
ee959b00
TJ
2629static ssize_t show_zero_req_lim(struct device *dev,
2630 struct device_attribute *attr, char *buf)
6bfa24fa 2631{
ee959b00 2632 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6bfa24fa 2633
6bfa24fa
RD
2634 return sprintf(buf, "%d\n", target->zero_req_lim);
2635}
2636
ee959b00
TJ
2637static ssize_t show_local_ib_port(struct device *dev,
2638 struct device_attribute *attr, char *buf)
ded7f1a1 2639{
ee959b00 2640 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1
IR
2641
2642 return sprintf(buf, "%d\n", target->srp_host->port);
2643}
2644
ee959b00
TJ
2645static ssize_t show_local_ib_device(struct device *dev,
2646 struct device_attribute *attr, char *buf)
ded7f1a1 2647{
ee959b00 2648 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1 2649
05321937 2650 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
ded7f1a1
IR
2651}
2652
d92c0da7
BVA
2653static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2654 char *buf)
2655{
2656 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2657
2658 return sprintf(buf, "%d\n", target->ch_count);
2659}
2660
4b5e5f41
BVA
2661static ssize_t show_comp_vector(struct device *dev,
2662 struct device_attribute *attr, char *buf)
2663{
2664 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2665
2666 return sprintf(buf, "%d\n", target->comp_vector);
2667}
2668
7bb312e4
VP
2669static ssize_t show_tl_retry_count(struct device *dev,
2670 struct device_attribute *attr, char *buf)
2671{
2672 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2673
2674 return sprintf(buf, "%d\n", target->tl_retry_count);
2675}
2676
49248644
DD
2677static ssize_t show_cmd_sg_entries(struct device *dev,
2678 struct device_attribute *attr, char *buf)
2679{
2680 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2681
2682 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2683}
2684
c07d424d
DD
2685static ssize_t show_allow_ext_sg(struct device *dev,
2686 struct device_attribute *attr, char *buf)
2687{
2688 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2689
2690 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2691}
2692
ee959b00
TJ
2693static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2694static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2695static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2696static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
848b3082 2697static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
ee959b00
TJ
2698static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2699static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
89de7486 2700static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
ee959b00
TJ
2701static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2702static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2703static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
d92c0da7 2704static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
4b5e5f41 2705static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
7bb312e4 2706static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
49248644 2707static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
c07d424d 2708static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
ee959b00
TJ
2709
2710static struct device_attribute *srp_host_attrs[] = {
2711 &dev_attr_id_ext,
2712 &dev_attr_ioc_guid,
2713 &dev_attr_service_id,
2714 &dev_attr_pkey,
848b3082 2715 &dev_attr_sgid,
ee959b00
TJ
2716 &dev_attr_dgid,
2717 &dev_attr_orig_dgid,
89de7486 2718 &dev_attr_req_lim,
ee959b00
TJ
2719 &dev_attr_zero_req_lim,
2720 &dev_attr_local_ib_port,
2721 &dev_attr_local_ib_device,
d92c0da7 2722 &dev_attr_ch_count,
4b5e5f41 2723 &dev_attr_comp_vector,
7bb312e4 2724 &dev_attr_tl_retry_count,
49248644 2725 &dev_attr_cmd_sg_entries,
c07d424d 2726 &dev_attr_allow_ext_sg,
6ecb0c84
RD
2727 NULL
2728};
2729
aef9ec39
RD
2730static struct scsi_host_template srp_template = {
2731 .module = THIS_MODULE,
b7f008fd
RD
2732 .name = "InfiniBand SRP initiator",
2733 .proc_name = DRV_NAME,
c9b03c1a 2734 .slave_configure = srp_slave_configure,
aef9ec39
RD
2735 .info = srp_target_info,
2736 .queuecommand = srp_queuecommand,
71444b97 2737 .change_queue_depth = srp_change_queue_depth,
aef9ec39
RD
2738 .eh_abort_handler = srp_abort,
2739 .eh_device_reset_handler = srp_reset_device,
2740 .eh_host_reset_handler = srp_reset_host,
2742c1da 2741 .skip_settle_delay = true,
49248644 2742 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
4d73f95f 2743 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
aef9ec39 2744 .this_id = -1,
4d73f95f 2745 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
6ecb0c84 2746 .use_clustering = ENABLE_CLUSTERING,
77f2c1a4
BVA
2747 .shost_attrs = srp_host_attrs,
2748 .use_blk_tags = 1,
c40ecc12 2749 .track_queue_depth = 1,
aef9ec39
RD
2750};
2751
34aa654e
BVA
2752static int srp_sdev_count(struct Scsi_Host *host)
2753{
2754 struct scsi_device *sdev;
2755 int c = 0;
2756
2757 shost_for_each_device(sdev, host)
2758 c++;
2759
2760 return c;
2761}
2762
bc44bd1d
BVA
2763/*
2764 * Return values:
2765 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2766 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2767 * removal has been scheduled.
2768 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2769 */
aef9ec39
RD
2770static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2771{
3236822b
FT
2772 struct srp_rport_identifiers ids;
2773 struct srp_rport *rport;
2774
34aa654e 2775 target->state = SRP_TARGET_SCANNING;
aef9ec39 2776 sprintf(target->target_name, "SRP.T10:%016llX",
45c37cad 2777 be64_to_cpu(target->id_ext));
aef9ec39 2778
05321937 2779 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
aef9ec39
RD
2780 return -ENODEV;
2781
3236822b
FT
2782 memcpy(ids.port_id, &target->id_ext, 8);
2783 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
aebd5e47 2784 ids.roles = SRP_RPORT_ROLE_TARGET;
3236822b
FT
2785 rport = srp_rport_add(target->scsi_host, &ids);
2786 if (IS_ERR(rport)) {
2787 scsi_remove_host(target->scsi_host);
2788 return PTR_ERR(rport);
2789 }
2790
dc1bdbd9 2791 rport->lld_data = target;
9dd69a60 2792 target->rport = rport;
dc1bdbd9 2793
b3589fd4 2794 spin_lock(&host->target_lock);
aef9ec39 2795 list_add_tail(&target->list, &host->target_list);
b3589fd4 2796 spin_unlock(&host->target_lock);
aef9ec39 2797
aef9ec39 2798 scsi_scan_target(&target->scsi_host->shost_gendev,
1962a4a1 2799 0, target->scsi_id, SCAN_WILD_CARD, 0);
aef9ec39 2800
c014c8cd
BVA
2801 if (srp_connected_ch(target) < target->ch_count ||
2802 target->qp_in_error) {
34aa654e
BVA
2803 shost_printk(KERN_INFO, target->scsi_host,
2804 PFX "SCSI scan failed - removing SCSI host\n");
2805 srp_queue_remove_work(target);
2806 goto out;
2807 }
2808
2809 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2810 dev_name(&target->scsi_host->shost_gendev),
2811 srp_sdev_count(target->scsi_host));
2812
2813 spin_lock_irq(&target->lock);
2814 if (target->state == SRP_TARGET_SCANNING)
2815 target->state = SRP_TARGET_LIVE;
2816 spin_unlock_irq(&target->lock);
2817
2818out:
aef9ec39
RD
2819 return 0;
2820}
2821
ee959b00 2822static void srp_release_dev(struct device *dev)
aef9ec39
RD
2823{
2824 struct srp_host *host =
ee959b00 2825 container_of(dev, struct srp_host, dev);
aef9ec39
RD
2826
2827 complete(&host->released);
2828}
2829
2830static struct class srp_class = {
2831 .name = "infiniband_srp",
ee959b00 2832 .dev_release = srp_release_dev
aef9ec39
RD
2833};
2834
96fc248a
BVA
2835/**
2836 * srp_conn_unique() - check whether the connection to a target is unique
af24663b
BVA
2837 * @host: SRP host.
2838 * @target: SRP target port.
96fc248a
BVA
2839 */
2840static bool srp_conn_unique(struct srp_host *host,
2841 struct srp_target_port *target)
2842{
2843 struct srp_target_port *t;
2844 bool ret = false;
2845
2846 if (target->state == SRP_TARGET_REMOVED)
2847 goto out;
2848
2849 ret = true;
2850
2851 spin_lock(&host->target_lock);
2852 list_for_each_entry(t, &host->target_list, list) {
2853 if (t != target &&
2854 target->id_ext == t->id_ext &&
2855 target->ioc_guid == t->ioc_guid &&
2856 target->initiator_ext == t->initiator_ext) {
2857 ret = false;
2858 break;
2859 }
2860 }
2861 spin_unlock(&host->target_lock);
2862
2863out:
2864 return ret;
2865}
2866
aef9ec39
RD
2867/*
2868 * Target ports are added by writing
2869 *
2870 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2871 * pkey=<P_Key>,service_id=<service ID>
2872 *
2873 * to the add_target sysfs attribute.
2874 */
2875enum {
2876 SRP_OPT_ERR = 0,
2877 SRP_OPT_ID_EXT = 1 << 0,
2878 SRP_OPT_IOC_GUID = 1 << 1,
2879 SRP_OPT_DGID = 1 << 2,
2880 SRP_OPT_PKEY = 1 << 3,
2881 SRP_OPT_SERVICE_ID = 1 << 4,
2882 SRP_OPT_MAX_SECT = 1 << 5,
52fb2b50 2883 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
0c0450db 2884 SRP_OPT_IO_CLASS = 1 << 7,
01cb9bcb 2885 SRP_OPT_INITIATOR_EXT = 1 << 8,
49248644 2886 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
c07d424d
DD
2887 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2888 SRP_OPT_SG_TABLESIZE = 1 << 11,
4b5e5f41 2889 SRP_OPT_COMP_VECTOR = 1 << 12,
7bb312e4 2890 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
4d73f95f 2891 SRP_OPT_QUEUE_SIZE = 1 << 14,
aef9ec39
RD
2892 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2893 SRP_OPT_IOC_GUID |
2894 SRP_OPT_DGID |
2895 SRP_OPT_PKEY |
2896 SRP_OPT_SERVICE_ID),
2897};
2898
a447c093 2899static const match_table_t srp_opt_tokens = {
52fb2b50
VP
2900 { SRP_OPT_ID_EXT, "id_ext=%s" },
2901 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2902 { SRP_OPT_DGID, "dgid=%s" },
2903 { SRP_OPT_PKEY, "pkey=%x" },
2904 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2905 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2906 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
0c0450db 2907 { SRP_OPT_IO_CLASS, "io_class=%x" },
01cb9bcb 2908 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
49248644 2909 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
c07d424d
DD
2910 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2911 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
4b5e5f41 2912 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
7bb312e4 2913 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
4d73f95f 2914 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
52fb2b50 2915 { SRP_OPT_ERR, NULL }
aef9ec39
RD
2916};
2917
2918static int srp_parse_options(const char *buf, struct srp_target_port *target)
2919{
2920 char *options, *sep_opt;
2921 char *p;
2922 char dgid[3];
2923 substring_t args[MAX_OPT_ARGS];
2924 int opt_mask = 0;
2925 int token;
2926 int ret = -EINVAL;
2927 int i;
2928
2929 options = kstrdup(buf, GFP_KERNEL);
2930 if (!options)
2931 return -ENOMEM;
2932
2933 sep_opt = options;
7dcf9c19 2934 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
aef9ec39
RD
2935 if (!*p)
2936 continue;
2937
2938 token = match_token(p, srp_opt_tokens, args);
2939 opt_mask |= token;
2940
2941 switch (token) {
2942 case SRP_OPT_ID_EXT:
2943 p = match_strdup(args);
a20f3a6d
IR
2944 if (!p) {
2945 ret = -ENOMEM;
2946 goto out;
2947 }
aef9ec39
RD
2948 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2949 kfree(p);
2950 break;
2951
2952 case SRP_OPT_IOC_GUID:
2953 p = match_strdup(args);
a20f3a6d
IR
2954 if (!p) {
2955 ret = -ENOMEM;
2956 goto out;
2957 }
aef9ec39
RD
2958 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2959 kfree(p);
2960 break;
2961
2962 case SRP_OPT_DGID:
2963 p = match_strdup(args);
a20f3a6d
IR
2964 if (!p) {
2965 ret = -ENOMEM;
2966 goto out;
2967 }
aef9ec39 2968 if (strlen(p) != 32) {
e0bda7d8 2969 pr_warn("bad dest GID parameter '%s'\n", p);
ce1823f0 2970 kfree(p);
aef9ec39
RD
2971 goto out;
2972 }
2973
2974 for (i = 0; i < 16; ++i) {
747fe000
BVA
2975 strlcpy(dgid, p + i * 2, sizeof(dgid));
2976 if (sscanf(dgid, "%hhx",
2977 &target->orig_dgid.raw[i]) < 1) {
2978 ret = -EINVAL;
2979 kfree(p);
2980 goto out;
2981 }
aef9ec39 2982 }
bf17c1c7 2983 kfree(p);
aef9ec39
RD
2984 break;
2985
2986 case SRP_OPT_PKEY:
2987 if (match_hex(args, &token)) {
e0bda7d8 2988 pr_warn("bad P_Key parameter '%s'\n", p);
aef9ec39
RD
2989 goto out;
2990 }
747fe000 2991 target->pkey = cpu_to_be16(token);
aef9ec39
RD
2992 break;
2993
2994 case SRP_OPT_SERVICE_ID:
2995 p = match_strdup(args);
a20f3a6d
IR
2996 if (!p) {
2997 ret = -ENOMEM;
2998 goto out;
2999 }
aef9ec39
RD
3000 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3001 kfree(p);
3002 break;
3003
3004 case SRP_OPT_MAX_SECT:
3005 if (match_int(args, &token)) {
e0bda7d8 3006 pr_warn("bad max sect parameter '%s'\n", p);
aef9ec39
RD
3007 goto out;
3008 }
3009 target->scsi_host->max_sectors = token;
3010 break;
3011
4d73f95f
BVA
3012 case SRP_OPT_QUEUE_SIZE:
3013 if (match_int(args, &token) || token < 1) {
3014 pr_warn("bad queue_size parameter '%s'\n", p);
3015 goto out;
3016 }
3017 target->scsi_host->can_queue = token;
3018 target->queue_size = token + SRP_RSP_SQ_SIZE +
3019 SRP_TSK_MGMT_SQ_SIZE;
3020 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3021 target->scsi_host->cmd_per_lun = token;
3022 break;
3023
52fb2b50 3024 case SRP_OPT_MAX_CMD_PER_LUN:
4d73f95f 3025 if (match_int(args, &token) || token < 1) {
e0bda7d8
BVA
3026 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3027 p);
52fb2b50
VP
3028 goto out;
3029 }
4d73f95f 3030 target->scsi_host->cmd_per_lun = token;
52fb2b50
VP
3031 break;
3032
0c0450db
R
3033 case SRP_OPT_IO_CLASS:
3034 if (match_hex(args, &token)) {
e0bda7d8 3035 pr_warn("bad IO class parameter '%s'\n", p);
0c0450db
R
3036 goto out;
3037 }
3038 if (token != SRP_REV10_IB_IO_CLASS &&
3039 token != SRP_REV16A_IB_IO_CLASS) {
e0bda7d8
BVA
3040 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3041 token, SRP_REV10_IB_IO_CLASS,
3042 SRP_REV16A_IB_IO_CLASS);
0c0450db
R
3043 goto out;
3044 }
3045 target->io_class = token;
3046 break;
3047
01cb9bcb
IR
3048 case SRP_OPT_INITIATOR_EXT:
3049 p = match_strdup(args);
a20f3a6d
IR
3050 if (!p) {
3051 ret = -ENOMEM;
3052 goto out;
3053 }
01cb9bcb
IR
3054 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3055 kfree(p);
3056 break;
3057
49248644
DD
3058 case SRP_OPT_CMD_SG_ENTRIES:
3059 if (match_int(args, &token) || token < 1 || token > 255) {
e0bda7d8
BVA
3060 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3061 p);
49248644
DD
3062 goto out;
3063 }
3064 target->cmd_sg_cnt = token;
3065 break;
3066
c07d424d
DD
3067 case SRP_OPT_ALLOW_EXT_SG:
3068 if (match_int(args, &token)) {
e0bda7d8 3069 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
c07d424d
DD
3070 goto out;
3071 }
3072 target->allow_ext_sg = !!token;
3073 break;
3074
3075 case SRP_OPT_SG_TABLESIZE:
3076 if (match_int(args, &token) || token < 1 ||
3077 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
e0bda7d8
BVA
3078 pr_warn("bad max sg_tablesize parameter '%s'\n",
3079 p);
c07d424d
DD
3080 goto out;
3081 }
3082 target->sg_tablesize = token;
3083 break;
3084
4b5e5f41
BVA
3085 case SRP_OPT_COMP_VECTOR:
3086 if (match_int(args, &token) || token < 0) {
3087 pr_warn("bad comp_vector parameter '%s'\n", p);
3088 goto out;
3089 }
3090 target->comp_vector = token;
3091 break;
3092
7bb312e4
VP
3093 case SRP_OPT_TL_RETRY_COUNT:
3094 if (match_int(args, &token) || token < 2 || token > 7) {
3095 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3096 p);
3097 goto out;
3098 }
3099 target->tl_retry_count = token;
3100 break;
3101
aef9ec39 3102 default:
e0bda7d8
BVA
3103 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3104 p);
aef9ec39
RD
3105 goto out;
3106 }
3107 }
3108
3109 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3110 ret = 0;
3111 else
3112 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3113 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3114 !(srp_opt_tokens[i].token & opt_mask))
e0bda7d8
BVA
3115 pr_warn("target creation request is missing parameter '%s'\n",
3116 srp_opt_tokens[i].pattern);
aef9ec39 3117
4d73f95f
BVA
3118 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3119 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3120 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3121 target->scsi_host->cmd_per_lun,
3122 target->scsi_host->can_queue);
3123
aef9ec39
RD
3124out:
3125 kfree(options);
3126 return ret;
3127}
3128
ee959b00
TJ
3129static ssize_t srp_create_target(struct device *dev,
3130 struct device_attribute *attr,
aef9ec39
RD
3131 const char *buf, size_t count)
3132{
3133 struct srp_host *host =
ee959b00 3134 container_of(dev, struct srp_host, dev);
aef9ec39
RD
3135 struct Scsi_Host *target_host;
3136 struct srp_target_port *target;
509c07bc 3137 struct srp_rdma_ch *ch;
d1b4289e
BVA
3138 struct srp_device *srp_dev = host->srp_dev;
3139 struct ib_device *ibdev = srp_dev->dev;
d92c0da7
BVA
3140 int ret, node_idx, node, cpu, i;
3141 bool multich = false;
aef9ec39
RD
3142
3143 target_host = scsi_host_alloc(&srp_template,
3144 sizeof (struct srp_target_port));
3145 if (!target_host)
3146 return -ENOMEM;
3147
49248644 3148 target_host->transportt = ib_srp_transport_template;
fd1b6c4a
BVA
3149 target_host->max_channel = 0;
3150 target_host->max_id = 1;
985aa495 3151 target_host->max_lun = -1LL;
3c8edf0e 3152 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
5f068992 3153
aef9ec39 3154 target = host_to_target(target_host);
aef9ec39 3155
49248644
DD
3156 target->io_class = SRP_REV16A_IB_IO_CLASS;
3157 target->scsi_host = target_host;
3158 target->srp_host = host;
e6bf5f48 3159 target->lkey = host->srp_dev->pd->local_dma_lkey;
49248644
DD
3160 target->rkey = host->srp_dev->mr->rkey;
3161 target->cmd_sg_cnt = cmd_sg_entries;
c07d424d
DD
3162 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3163 target->allow_ext_sg = allow_ext_sg;
7bb312e4 3164 target->tl_retry_count = 7;
4d73f95f 3165 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
aef9ec39 3166
34aa654e
BVA
3167 /*
3168 * Avoid that the SCSI host can be removed by srp_remove_target()
3169 * before this function returns.
3170 */
3171 scsi_host_get(target->scsi_host);
3172
2d7091bc
BVA
3173 mutex_lock(&host->add_target_mutex);
3174
aef9ec39
RD
3175 ret = srp_parse_options(buf, target);
3176 if (ret)
fb49c8bb 3177 goto out;
aef9ec39 3178
77f2c1a4
BVA
3179 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3180 if (ret)
fb49c8bb 3181 goto out;
77f2c1a4 3182
4d73f95f
BVA
3183 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3184
96fc248a
BVA
3185 if (!srp_conn_unique(target->srp_host, target)) {
3186 shost_printk(KERN_INFO, target->scsi_host,
3187 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3188 be64_to_cpu(target->id_ext),
3189 be64_to_cpu(target->ioc_guid),
3190 be64_to_cpu(target->initiator_ext));
3191 ret = -EEXIST;
fb49c8bb 3192 goto out;
96fc248a
BVA
3193 }
3194
5cfb1782 3195 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
d1b4289e 3196 target->cmd_sg_cnt < target->sg_tablesize) {
5cfb1782 3197 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
c07d424d
DD
3198 target->sg_tablesize = target->cmd_sg_cnt;
3199 }
3200
3201 target_host->sg_tablesize = target->sg_tablesize;
3202 target->indirect_size = target->sg_tablesize *
3203 sizeof (struct srp_direct_buf);
49248644
DD
3204 target->max_iu_len = sizeof (struct srp_cmd) +
3205 sizeof (struct srp_indirect_buf) +
3206 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3207
c1120f89 3208 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
ef6c49d8 3209 INIT_WORK(&target->remove_work, srp_remove_work);
8f26c9ff 3210 spin_lock_init(&target->lock);
747fe000 3211 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
2088ca66 3212 if (ret)
fb49c8bb 3213 goto out;
aef9ec39 3214
d92c0da7
BVA
3215 ret = -ENOMEM;
3216 target->ch_count = max_t(unsigned, num_online_nodes(),
3217 min(ch_count ? :
3218 min(4 * num_online_nodes(),
3219 ibdev->num_comp_vectors),
3220 num_online_cpus()));
3221 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3222 GFP_KERNEL);
3223 if (!target->ch)
fb49c8bb 3224 goto out;
aef9ec39 3225
d92c0da7
BVA
3226 node_idx = 0;
3227 for_each_online_node(node) {
3228 const int ch_start = (node_idx * target->ch_count /
3229 num_online_nodes());
3230 const int ch_end = ((node_idx + 1) * target->ch_count /
3231 num_online_nodes());
3232 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3233 num_online_nodes() + target->comp_vector)
3234 % ibdev->num_comp_vectors;
3235 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3236 num_online_nodes() + target->comp_vector)
3237 % ibdev->num_comp_vectors;
3238 int cpu_idx = 0;
3239
3240 for_each_online_cpu(cpu) {
3241 if (cpu_to_node(cpu) != node)
3242 continue;
3243 if (ch_start + cpu_idx >= ch_end)
3244 continue;
3245 ch = &target->ch[ch_start + cpu_idx];
3246 ch->target = target;
3247 ch->comp_vector = cv_start == cv_end ? cv_start :
3248 cv_start + cpu_idx % (cv_end - cv_start);
3249 spin_lock_init(&ch->lock);
3250 INIT_LIST_HEAD(&ch->free_tx);
3251 ret = srp_new_cm_id(ch);
3252 if (ret)
3253 goto err_disconnect;
aef9ec39 3254
d92c0da7
BVA
3255 ret = srp_create_ch_ib(ch);
3256 if (ret)
3257 goto err_disconnect;
3258
3259 ret = srp_alloc_req_data(ch);
3260 if (ret)
3261 goto err_disconnect;
3262
3263 ret = srp_connect_ch(ch, multich);
3264 if (ret) {
3265 shost_printk(KERN_ERR, target->scsi_host,
3266 PFX "Connection %d/%d failed\n",
3267 ch_start + cpu_idx,
3268 target->ch_count);
3269 if (node_idx == 0 && cpu_idx == 0) {
3270 goto err_disconnect;
3271 } else {
3272 srp_free_ch_ib(target, ch);
3273 srp_free_req_data(target, ch);
3274 target->ch_count = ch - target->ch;
c257ea6f 3275 goto connected;
d92c0da7
BVA
3276 }
3277 }
3278
3279 multich = true;
3280 cpu_idx++;
3281 }
3282 node_idx++;
aef9ec39
RD
3283 }
3284
c257ea6f 3285connected:
d92c0da7
BVA
3286 target->scsi_host->nr_hw_queues = target->ch_count;
3287
aef9ec39
RD
3288 ret = srp_add_target(host, target);
3289 if (ret)
3290 goto err_disconnect;
3291
34aa654e
BVA
3292 if (target->state != SRP_TARGET_REMOVED) {
3293 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3294 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3295 be64_to_cpu(target->id_ext),
3296 be64_to_cpu(target->ioc_guid),
747fe000 3297 be16_to_cpu(target->pkey),
34aa654e 3298 be64_to_cpu(target->service_id),
747fe000 3299 target->sgid.raw, target->orig_dgid.raw);
34aa654e 3300 }
e7ffde01 3301
2d7091bc
BVA
3302 ret = count;
3303
3304out:
3305 mutex_unlock(&host->add_target_mutex);
34aa654e
BVA
3306
3307 scsi_host_put(target->scsi_host);
bc44bd1d
BVA
3308 if (ret < 0)
3309 scsi_host_put(target->scsi_host);
34aa654e 3310
2d7091bc 3311 return ret;
aef9ec39
RD
3312
3313err_disconnect:
3314 srp_disconnect_target(target);
3315
d92c0da7
BVA
3316 for (i = 0; i < target->ch_count; i++) {
3317 ch = &target->ch[i];
3318 srp_free_ch_ib(target, ch);
3319 srp_free_req_data(target, ch);
3320 }
aef9ec39 3321
d92c0da7 3322 kfree(target->ch);
2d7091bc 3323 goto out;
aef9ec39
RD
3324}
3325
ee959b00 3326static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
aef9ec39 3327
ee959b00
TJ
3328static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3329 char *buf)
aef9ec39 3330{
ee959b00 3331 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39 3332
05321937 3333 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
aef9ec39
RD
3334}
3335
ee959b00 3336static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
aef9ec39 3337
ee959b00
TJ
3338static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3339 char *buf)
aef9ec39 3340{
ee959b00 3341 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39
RD
3342
3343 return sprintf(buf, "%d\n", host->port);
3344}
3345
ee959b00 3346static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
aef9ec39 3347
f5358a17 3348static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
aef9ec39
RD
3349{
3350 struct srp_host *host;
3351
3352 host = kzalloc(sizeof *host, GFP_KERNEL);
3353 if (!host)
3354 return NULL;
3355
3356 INIT_LIST_HEAD(&host->target_list);
b3589fd4 3357 spin_lock_init(&host->target_lock);
aef9ec39 3358 init_completion(&host->released);
2d7091bc 3359 mutex_init(&host->add_target_mutex);
05321937 3360 host->srp_dev = device;
aef9ec39
RD
3361 host->port = port;
3362
ee959b00
TJ
3363 host->dev.class = &srp_class;
3364 host->dev.parent = device->dev->dma_device;
d927e38c 3365 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
aef9ec39 3366
ee959b00 3367 if (device_register(&host->dev))
f5358a17 3368 goto free_host;
ee959b00 3369 if (device_create_file(&host->dev, &dev_attr_add_target))
aef9ec39 3370 goto err_class;
ee959b00 3371 if (device_create_file(&host->dev, &dev_attr_ibdev))
aef9ec39 3372 goto err_class;
ee959b00 3373 if (device_create_file(&host->dev, &dev_attr_port))
aef9ec39
RD
3374 goto err_class;
3375
3376 return host;
3377
3378err_class:
ee959b00 3379 device_unregister(&host->dev);
aef9ec39 3380
f5358a17 3381free_host:
aef9ec39
RD
3382 kfree(host);
3383
3384 return NULL;
3385}
3386
3387static void srp_add_one(struct ib_device *device)
3388{
f5358a17
RD
3389 struct srp_device *srp_dev;
3390 struct ib_device_attr *dev_attr;
aef9ec39 3391 struct srp_host *host;
4139032b 3392 int mr_page_shift, p;
52ede08f 3393 u64 max_pages_per_mr;
aef9ec39 3394
f5358a17
RD
3395 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3396 if (!dev_attr)
cf311cd4 3397 return;
aef9ec39 3398
f5358a17 3399 if (ib_query_device(device, dev_attr)) {
e0bda7d8 3400 pr_warn("Query device failed for %s\n", device->name);
f5358a17
RD
3401 goto free_attr;
3402 }
3403
3404 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3405 if (!srp_dev)
3406 goto free_attr;
3407
d1b4289e
BVA
3408 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3409 device->map_phys_fmr && device->unmap_fmr);
5cfb1782
BVA
3410 srp_dev->has_fr = (dev_attr->device_cap_flags &
3411 IB_DEVICE_MEM_MGT_EXTENSIONS);
3412 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3413 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3414
3415 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3416 (!srp_dev->has_fmr || prefer_fr));
002f1567 3417 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
d1b4289e 3418
f5358a17
RD
3419 /*
3420 * Use the smallest page size supported by the HCA, down to a
8f26c9ff
DD
3421 * minimum of 4096 bytes. We're unlikely to build large sglists
3422 * out of smaller entries.
f5358a17 3423 */
52ede08f
BVA
3424 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3425 srp_dev->mr_page_size = 1 << mr_page_shift;
3426 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3427 max_pages_per_mr = dev_attr->max_mr_size;
3428 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3429 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3430 max_pages_per_mr);
5cfb1782
BVA
3431 if (srp_dev->use_fast_reg) {
3432 srp_dev->max_pages_per_mr =
3433 min_t(u32, srp_dev->max_pages_per_mr,
3434 dev_attr->max_fast_reg_page_list_len);
3435 }
52ede08f
BVA
3436 srp_dev->mr_max_size = srp_dev->mr_page_size *
3437 srp_dev->max_pages_per_mr;
5cfb1782 3438 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
52ede08f 3439 device->name, mr_page_shift, dev_attr->max_mr_size,
5cfb1782 3440 dev_attr->max_fast_reg_page_list_len,
52ede08f 3441 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
f5358a17
RD
3442
3443 INIT_LIST_HEAD(&srp_dev->dev_list);
3444
3445 srp_dev->dev = device;
3446 srp_dev->pd = ib_alloc_pd(device);
3447 if (IS_ERR(srp_dev->pd))
3448 goto free_dev;
3449
3450 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3451 IB_ACCESS_LOCAL_WRITE |
3452 IB_ACCESS_REMOTE_READ |
3453 IB_ACCESS_REMOTE_WRITE);
3454 if (IS_ERR(srp_dev->mr))
3455 goto err_pd;
3456
4139032b 3457 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
f5358a17 3458 host = srp_add_port(srp_dev, p);
aef9ec39 3459 if (host)
f5358a17 3460 list_add_tail(&host->list, &srp_dev->dev_list);
aef9ec39
RD
3461 }
3462
f5358a17
RD
3463 ib_set_client_data(device, &srp_client, srp_dev);
3464
3465 goto free_attr;
3466
3467err_pd:
3468 ib_dealloc_pd(srp_dev->pd);
3469
3470free_dev:
3471 kfree(srp_dev);
3472
3473free_attr:
3474 kfree(dev_attr);
aef9ec39
RD
3475}
3476
7c1eb45a 3477static void srp_remove_one(struct ib_device *device, void *client_data)
aef9ec39 3478{
f5358a17 3479 struct srp_device *srp_dev;
aef9ec39 3480 struct srp_host *host, *tmp_host;
ef6c49d8 3481 struct srp_target_port *target;
aef9ec39 3482
7c1eb45a 3483 srp_dev = client_data;
1fe0cb84
DB
3484 if (!srp_dev)
3485 return;
aef9ec39 3486
f5358a17 3487 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
ee959b00 3488 device_unregister(&host->dev);
aef9ec39
RD
3489 /*
3490 * Wait for the sysfs entry to go away, so that no new
3491 * target ports can be created.
3492 */
3493 wait_for_completion(&host->released);
3494
3495 /*
ef6c49d8 3496 * Remove all target ports.
aef9ec39 3497 */
b3589fd4 3498 spin_lock(&host->target_lock);
ef6c49d8
BVA
3499 list_for_each_entry(target, &host->target_list, list)
3500 srp_queue_remove_work(target);
b3589fd4 3501 spin_unlock(&host->target_lock);
aef9ec39
RD
3502
3503 /*
bcc05910 3504 * Wait for tl_err and target port removal tasks.
aef9ec39 3505 */
ef6c49d8 3506 flush_workqueue(system_long_wq);
bcc05910 3507 flush_workqueue(srp_remove_wq);
aef9ec39 3508
aef9ec39
RD
3509 kfree(host);
3510 }
3511
f5358a17
RD
3512 ib_dereg_mr(srp_dev->mr);
3513 ib_dealloc_pd(srp_dev->pd);
3514
3515 kfree(srp_dev);
aef9ec39
RD
3516}
3517
3236822b 3518static struct srp_function_template ib_srp_transport_functions = {
ed9b2264
BVA
3519 .has_rport_state = true,
3520 .reset_timer_if_blocked = true,
a95cadb9 3521 .reconnect_delay = &srp_reconnect_delay,
ed9b2264
BVA
3522 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3523 .dev_loss_tmo = &srp_dev_loss_tmo,
3524 .reconnect = srp_rport_reconnect,
dc1bdbd9 3525 .rport_delete = srp_rport_delete,
ed9b2264 3526 .terminate_rport_io = srp_terminate_io,
3236822b
FT
3527};
3528
aef9ec39
RD
3529static int __init srp_init_module(void)
3530{
3531 int ret;
3532
dcb4cb85 3533 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
dd5e6e38 3534
49248644 3535 if (srp_sg_tablesize) {
e0bda7d8 3536 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
49248644
DD
3537 if (!cmd_sg_entries)
3538 cmd_sg_entries = srp_sg_tablesize;
3539 }
3540
3541 if (!cmd_sg_entries)
3542 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3543
3544 if (cmd_sg_entries > 255) {
e0bda7d8 3545 pr_warn("Clamping cmd_sg_entries to 255\n");
49248644 3546 cmd_sg_entries = 255;
1e89a194
DD
3547 }
3548
c07d424d
DD
3549 if (!indirect_sg_entries)
3550 indirect_sg_entries = cmd_sg_entries;
3551 else if (indirect_sg_entries < cmd_sg_entries) {
e0bda7d8
BVA
3552 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3553 cmd_sg_entries);
c07d424d
DD
3554 indirect_sg_entries = cmd_sg_entries;
3555 }
3556
bcc05910 3557 srp_remove_wq = create_workqueue("srp_remove");
da05be29
WY
3558 if (!srp_remove_wq) {
3559 ret = -ENOMEM;
bcc05910
BVA
3560 goto out;
3561 }
3562
3563 ret = -ENOMEM;
3236822b
FT
3564 ib_srp_transport_template =
3565 srp_attach_transport(&ib_srp_transport_functions);
3566 if (!ib_srp_transport_template)
bcc05910 3567 goto destroy_wq;
3236822b 3568
aef9ec39
RD
3569 ret = class_register(&srp_class);
3570 if (ret) {
e0bda7d8 3571 pr_err("couldn't register class infiniband_srp\n");
bcc05910 3572 goto release_tr;
aef9ec39
RD
3573 }
3574
c1a0b23b
MT
3575 ib_sa_register_client(&srp_sa_client);
3576
aef9ec39
RD
3577 ret = ib_register_client(&srp_client);
3578 if (ret) {
e0bda7d8 3579 pr_err("couldn't register IB client\n");
bcc05910 3580 goto unreg_sa;
aef9ec39
RD
3581 }
3582
bcc05910
BVA
3583out:
3584 return ret;
3585
3586unreg_sa:
3587 ib_sa_unregister_client(&srp_sa_client);
3588 class_unregister(&srp_class);
3589
3590release_tr:
3591 srp_release_transport(ib_srp_transport_template);
3592
3593destroy_wq:
3594 destroy_workqueue(srp_remove_wq);
3595 goto out;
aef9ec39
RD
3596}
3597
3598static void __exit srp_cleanup_module(void)
3599{
3600 ib_unregister_client(&srp_client);
c1a0b23b 3601 ib_sa_unregister_client(&srp_sa_client);
aef9ec39 3602 class_unregister(&srp_class);
3236822b 3603 srp_release_transport(ib_srp_transport_template);
bcc05910 3604 destroy_workqueue(srp_remove_wq);
aef9ec39
RD
3605}
3606
3607module_init(srp_init_module);
3608module_exit(srp_cleanup_module);