]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/infiniband/ulp/srp/ib_srp.c
scsi: remove eh_timed_out methods in the transport template
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / ulp / srp / ib_srp.c
CommitLineData
aef9ec39
RD
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
aef9ec39
RD
31 */
32
d236cd0e 33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
e0bda7d8 34
aef9ec39
RD
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
de25968c 42#include <linux/jiffies.h>
56b5390c 43#include <rdma/ib_cache.h>
aef9ec39 44
60063497 45#include <linux/atomic.h>
aef9ec39
RD
46
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
71444b97 50#include <scsi/scsi_tcq.h>
aef9ec39 51#include <scsi/srp.h>
3236822b 52#include <scsi/scsi_transport_srp.h>
aef9ec39 53
aef9ec39
RD
54#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
713ef24e
BVA
58#define DRV_VERSION "2.0"
59#define DRV_RELDATE "July 26, 2015"
aef9ec39
RD
60
61MODULE_AUTHOR("Roland Dreier");
33ab3e5b 62MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
aef9ec39 63MODULE_LICENSE("Dual BSD/GPL");
33ab3e5b
BVA
64MODULE_VERSION(DRV_VERSION);
65MODULE_INFO(release_date, DRV_RELDATE);
aef9ec39 66
1a1faf7a
BVA
67#if !defined(CONFIG_DYNAMIC_DEBUG)
68#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
69#define DYNAMIC_DEBUG_BRANCH(descriptor) false
70#endif
71
49248644
DD
72static unsigned int srp_sg_tablesize;
73static unsigned int cmd_sg_entries;
c07d424d
DD
74static unsigned int indirect_sg_entries;
75static bool allow_ext_sg;
03f6fb93
BVA
76static bool prefer_fr = true;
77static bool register_always = true;
c222a39f 78static bool never_register;
49248644 79static int topspin_workarounds = 1;
74b0a15b 80
49248644
DD
81module_param(srp_sg_tablesize, uint, 0444);
82MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
74b0a15b 83
49248644
DD
84module_param(cmd_sg_entries, uint, 0444);
85MODULE_PARM_DESC(cmd_sg_entries,
86 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
aef9ec39 87
c07d424d
DD
88module_param(indirect_sg_entries, uint, 0444);
89MODULE_PARM_DESC(indirect_sg_entries,
65e8617f 90 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
c07d424d
DD
91
92module_param(allow_ext_sg, bool, 0444);
93MODULE_PARM_DESC(allow_ext_sg,
94 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
95
aef9ec39
RD
96module_param(topspin_workarounds, int, 0444);
97MODULE_PARM_DESC(topspin_workarounds,
98 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
99
5cfb1782
BVA
100module_param(prefer_fr, bool, 0444);
101MODULE_PARM_DESC(prefer_fr,
102"Whether to use fast registration if both FMR and fast registration are supported");
103
b1b8854d
BVA
104module_param(register_always, bool, 0444);
105MODULE_PARM_DESC(register_always,
106 "Use memory registration even for contiguous memory regions");
107
c222a39f
BVA
108module_param(never_register, bool, 0444);
109MODULE_PARM_DESC(never_register, "Never register memory");
110
9c27847d 111static const struct kernel_param_ops srp_tmo_ops;
ed9b2264 112
a95cadb9
BVA
113static int srp_reconnect_delay = 10;
114module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
115 S_IRUGO | S_IWUSR);
116MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
117
ed9b2264
BVA
118static int srp_fast_io_fail_tmo = 15;
119module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
120 S_IRUGO | S_IWUSR);
121MODULE_PARM_DESC(fast_io_fail_tmo,
122 "Number of seconds between the observation of a transport"
123 " layer error and failing all I/O. \"off\" means that this"
124 " functionality is disabled.");
125
a95cadb9 126static int srp_dev_loss_tmo = 600;
ed9b2264
BVA
127module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
128 S_IRUGO | S_IWUSR);
129MODULE_PARM_DESC(dev_loss_tmo,
130 "Maximum number of seconds that the SRP transport should"
131 " insulate transport layer errors. After this time has been"
132 " exceeded the SCSI host is removed. Should be"
133 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
134 " if fast_io_fail_tmo has not been set. \"off\" means that"
135 " this functionality is disabled.");
136
d92c0da7
BVA
137static unsigned ch_count;
138module_param(ch_count, uint, 0444);
139MODULE_PARM_DESC(ch_count,
140 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
141
aef9ec39 142static void srp_add_one(struct ib_device *device);
7c1eb45a 143static void srp_remove_one(struct ib_device *device, void *client_data);
1dc7b1f1
CH
144static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
145static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
146 const char *opname);
aef9ec39
RD
147static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
148
3236822b 149static struct scsi_transport_template *ib_srp_transport_template;
bcc05910 150static struct workqueue_struct *srp_remove_wq;
3236822b 151
aef9ec39
RD
152static struct ib_client srp_client = {
153 .name = "srp",
154 .add = srp_add_one,
155 .remove = srp_remove_one
156};
157
c1a0b23b
MT
158static struct ib_sa_client srp_sa_client;
159
ed9b2264
BVA
160static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
161{
162 int tmo = *(int *)kp->arg;
163
164 if (tmo >= 0)
165 return sprintf(buffer, "%d", tmo);
166 else
167 return sprintf(buffer, "off");
168}
169
170static int srp_tmo_set(const char *val, const struct kernel_param *kp)
171{
172 int tmo, res;
173
3fdf70ac
SG
174 res = srp_parse_tmo(&tmo, val);
175 if (res)
176 goto out;
177
a95cadb9
BVA
178 if (kp->arg == &srp_reconnect_delay)
179 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
180 srp_dev_loss_tmo);
181 else if (kp->arg == &srp_fast_io_fail_tmo)
182 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
ed9b2264 183 else
a95cadb9
BVA
184 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
185 tmo);
ed9b2264
BVA
186 if (res)
187 goto out;
188 *(int *)kp->arg = tmo;
189
190out:
191 return res;
192}
193
9c27847d 194static const struct kernel_param_ops srp_tmo_ops = {
ed9b2264
BVA
195 .get = srp_tmo_get,
196 .set = srp_tmo_set,
197};
198
aef9ec39
RD
199static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
200{
201 return (struct srp_target_port *) host->hostdata;
202}
203
204static const char *srp_target_info(struct Scsi_Host *host)
205{
206 return host_to_target(host)->target_name;
207}
208
5d7cbfd6
RD
209static int srp_target_is_topspin(struct srp_target_port *target)
210{
211 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
3d1ff48d 212 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
5d7cbfd6
RD
213
214 return topspin_workarounds &&
3d1ff48d
RK
215 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
216 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
5d7cbfd6
RD
217}
218
aef9ec39
RD
219static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
220 gfp_t gfp_mask,
221 enum dma_data_direction direction)
222{
223 struct srp_iu *iu;
224
225 iu = kmalloc(sizeof *iu, gfp_mask);
226 if (!iu)
227 goto out;
228
229 iu->buf = kzalloc(size, gfp_mask);
230 if (!iu->buf)
231 goto out_free_iu;
232
05321937
GKH
233 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
234 direction);
235 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
aef9ec39
RD
236 goto out_free_buf;
237
238 iu->size = size;
239 iu->direction = direction;
240
241 return iu;
242
243out_free_buf:
244 kfree(iu->buf);
245out_free_iu:
246 kfree(iu);
247out:
248 return NULL;
249}
250
251static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
252{
253 if (!iu)
254 return;
255
05321937
GKH
256 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
257 iu->direction);
aef9ec39
RD
258 kfree(iu->buf);
259 kfree(iu);
260}
261
262static void srp_qp_event(struct ib_event *event, void *context)
263{
57363d98
SG
264 pr_debug("QP event %s (%d)\n",
265 ib_event_msg(event->event), event->event);
aef9ec39
RD
266}
267
268static int srp_init_qp(struct srp_target_port *target,
269 struct ib_qp *qp)
270{
271 struct ib_qp_attr *attr;
272 int ret;
273
274 attr = kmalloc(sizeof *attr, GFP_KERNEL);
275 if (!attr)
276 return -ENOMEM;
277
56b5390c
BVA
278 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
279 target->srp_host->port,
280 be16_to_cpu(target->pkey),
281 &attr->pkey_index);
aef9ec39
RD
282 if (ret)
283 goto out;
284
285 attr->qp_state = IB_QPS_INIT;
286 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
287 IB_ACCESS_REMOTE_WRITE);
288 attr->port_num = target->srp_host->port;
289
290 ret = ib_modify_qp(qp, attr,
291 IB_QP_STATE |
292 IB_QP_PKEY_INDEX |
293 IB_QP_ACCESS_FLAGS |
294 IB_QP_PORT);
295
296out:
297 kfree(attr);
298 return ret;
299}
300
509c07bc 301static int srp_new_cm_id(struct srp_rdma_ch *ch)
9fe4bcf4 302{
509c07bc 303 struct srp_target_port *target = ch->target;
9fe4bcf4
DD
304 struct ib_cm_id *new_cm_id;
305
05321937 306 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
509c07bc 307 srp_cm_handler, ch);
9fe4bcf4
DD
308 if (IS_ERR(new_cm_id))
309 return PTR_ERR(new_cm_id);
310
509c07bc
BVA
311 if (ch->cm_id)
312 ib_destroy_cm_id(ch->cm_id);
313 ch->cm_id = new_cm_id;
314 ch->path.sgid = target->sgid;
315 ch->path.dgid = target->orig_dgid;
316 ch->path.pkey = target->pkey;
317 ch->path.service_id = target->service_id;
9fe4bcf4
DD
318
319 return 0;
320}
321
d1b4289e
BVA
322static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
323{
324 struct srp_device *dev = target->srp_host->srp_dev;
325 struct ib_fmr_pool_param fmr_param;
326
327 memset(&fmr_param, 0, sizeof(fmr_param));
fa9863f8 328 fmr_param.pool_size = target->mr_pool_size;
d1b4289e
BVA
329 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
330 fmr_param.cache = 1;
52ede08f
BVA
331 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
332 fmr_param.page_shift = ilog2(dev->mr_page_size);
d1b4289e
BVA
333 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
334 IB_ACCESS_REMOTE_WRITE |
335 IB_ACCESS_REMOTE_READ);
336
337 return ib_create_fmr_pool(dev->pd, &fmr_param);
338}
339
5cfb1782
BVA
340/**
341 * srp_destroy_fr_pool() - free the resources owned by a pool
342 * @pool: Fast registration pool to be destroyed.
343 */
344static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
345{
346 int i;
347 struct srp_fr_desc *d;
348
349 if (!pool)
350 return;
351
352 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
5cfb1782
BVA
353 if (d->mr)
354 ib_dereg_mr(d->mr);
355 }
356 kfree(pool);
357}
358
359/**
360 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
361 * @device: IB device to allocate fast registration descriptors for.
362 * @pd: Protection domain associated with the FR descriptors.
363 * @pool_size: Number of descriptors to allocate.
364 * @max_page_list_len: Maximum fast registration work request page list length.
365 */
366static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
367 struct ib_pd *pd, int pool_size,
368 int max_page_list_len)
369{
370 struct srp_fr_pool *pool;
371 struct srp_fr_desc *d;
372 struct ib_mr *mr;
5cfb1782
BVA
373 int i, ret = -EINVAL;
374
375 if (pool_size <= 0)
376 goto err;
377 ret = -ENOMEM;
378 pool = kzalloc(sizeof(struct srp_fr_pool) +
379 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
380 if (!pool)
381 goto err;
382 pool->size = pool_size;
383 pool->max_page_list_len = max_page_list_len;
384 spin_lock_init(&pool->lock);
385 INIT_LIST_HEAD(&pool->free_list);
386
387 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
563b67c5
SG
388 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
389 max_page_list_len);
5cfb1782
BVA
390 if (IS_ERR(mr)) {
391 ret = PTR_ERR(mr);
3787d990
BVA
392 if (ret == -ENOMEM)
393 pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
394 dev_name(&device->dev));
5cfb1782
BVA
395 goto destroy_pool;
396 }
397 d->mr = mr;
5cfb1782
BVA
398 list_add_tail(&d->entry, &pool->free_list);
399 }
400
401out:
402 return pool;
403
404destroy_pool:
405 srp_destroy_fr_pool(pool);
406
407err:
408 pool = ERR_PTR(ret);
409 goto out;
410}
411
412/**
413 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
414 * @pool: Pool to obtain descriptor from.
415 */
416static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
417{
418 struct srp_fr_desc *d = NULL;
419 unsigned long flags;
420
421 spin_lock_irqsave(&pool->lock, flags);
422 if (!list_empty(&pool->free_list)) {
423 d = list_first_entry(&pool->free_list, typeof(*d), entry);
424 list_del(&d->entry);
425 }
426 spin_unlock_irqrestore(&pool->lock, flags);
427
428 return d;
429}
430
431/**
432 * srp_fr_pool_put() - put an FR descriptor back in the free list
433 * @pool: Pool the descriptor was allocated from.
434 * @desc: Pointer to an array of fast registration descriptor pointers.
435 * @n: Number of descriptors to put back.
436 *
437 * Note: The caller must already have queued an invalidation request for
438 * desc->mr->rkey before calling this function.
439 */
440static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
441 int n)
442{
443 unsigned long flags;
444 int i;
445
446 spin_lock_irqsave(&pool->lock, flags);
447 for (i = 0; i < n; i++)
448 list_add(&desc[i]->entry, &pool->free_list);
449 spin_unlock_irqrestore(&pool->lock, flags);
450}
451
452static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
453{
454 struct srp_device *dev = target->srp_host->srp_dev;
455
fa9863f8 456 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
5cfb1782
BVA
457 dev->max_pages_per_mr);
458}
459
7dad6b2e
BVA
460/**
461 * srp_destroy_qp() - destroy an RDMA queue pair
f83b2561 462 * @qp: RDMA queue pair.
7dad6b2e 463 *
561392d4
SW
464 * Drain the qp before destroying it. This avoids that the receive
465 * completion handler can access the queue pair while it is
7dad6b2e
BVA
466 * being destroyed.
467 */
f83b2561 468static void srp_destroy_qp(struct ib_qp *qp)
7dad6b2e 469{
f83b2561
BVA
470 ib_drain_rq(qp);
471 ib_destroy_qp(qp);
7dad6b2e
BVA
472}
473
509c07bc 474static int srp_create_ch_ib(struct srp_rdma_ch *ch)
aef9ec39 475{
509c07bc 476 struct srp_target_port *target = ch->target;
62154b2e 477 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39 478 struct ib_qp_init_attr *init_attr;
73aa89ed
IR
479 struct ib_cq *recv_cq, *send_cq;
480 struct ib_qp *qp;
d1b4289e 481 struct ib_fmr_pool *fmr_pool = NULL;
5cfb1782 482 struct srp_fr_pool *fr_pool = NULL;
509c5f33 483 const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
aef9ec39
RD
484 int ret;
485
486 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
487 if (!init_attr)
488 return -ENOMEM;
489
561392d4 490 /* queue_size + 1 for ib_drain_rq() */
1dc7b1f1
CH
491 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
492 ch->comp_vector, IB_POLL_SOFTIRQ);
73aa89ed
IR
493 if (IS_ERR(recv_cq)) {
494 ret = PTR_ERR(recv_cq);
da9d2f07 495 goto err;
aef9ec39
RD
496 }
497
1dc7b1f1
CH
498 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
499 ch->comp_vector, IB_POLL_DIRECT);
73aa89ed
IR
500 if (IS_ERR(send_cq)) {
501 ret = PTR_ERR(send_cq);
da9d2f07 502 goto err_recv_cq;
9c03dc9f
BVA
503 }
504
aef9ec39 505 init_attr->event_handler = srp_qp_event;
5cfb1782 506 init_attr->cap.max_send_wr = m * target->queue_size;
7dad6b2e 507 init_attr->cap.max_recv_wr = target->queue_size + 1;
aef9ec39
RD
508 init_attr->cap.max_recv_sge = 1;
509 init_attr->cap.max_send_sge = 1;
5cfb1782 510 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
aef9ec39 511 init_attr->qp_type = IB_QPT_RC;
73aa89ed
IR
512 init_attr->send_cq = send_cq;
513 init_attr->recv_cq = recv_cq;
aef9ec39 514
62154b2e 515 qp = ib_create_qp(dev->pd, init_attr);
73aa89ed
IR
516 if (IS_ERR(qp)) {
517 ret = PTR_ERR(qp);
da9d2f07 518 goto err_send_cq;
aef9ec39
RD
519 }
520
73aa89ed 521 ret = srp_init_qp(target, qp);
da9d2f07
RD
522 if (ret)
523 goto err_qp;
aef9ec39 524
002f1567 525 if (dev->use_fast_reg) {
5cfb1782
BVA
526 fr_pool = srp_alloc_fr_pool(target);
527 if (IS_ERR(fr_pool)) {
528 ret = PTR_ERR(fr_pool);
529 shost_printk(KERN_WARNING, target->scsi_host, PFX
530 "FR pool allocation failed (%d)\n", ret);
531 goto err_qp;
532 }
002f1567 533 } else if (dev->use_fmr) {
d1b4289e
BVA
534 fmr_pool = srp_alloc_fmr_pool(target);
535 if (IS_ERR(fmr_pool)) {
536 ret = PTR_ERR(fmr_pool);
537 shost_printk(KERN_WARNING, target->scsi_host, PFX
538 "FMR pool allocation failed (%d)\n", ret);
539 goto err_qp;
540 }
d1b4289e
BVA
541 }
542
509c07bc 543 if (ch->qp)
f83b2561 544 srp_destroy_qp(ch->qp);
509c07bc 545 if (ch->recv_cq)
1dc7b1f1 546 ib_free_cq(ch->recv_cq);
509c07bc 547 if (ch->send_cq)
1dc7b1f1 548 ib_free_cq(ch->send_cq);
73aa89ed 549
509c07bc
BVA
550 ch->qp = qp;
551 ch->recv_cq = recv_cq;
552 ch->send_cq = send_cq;
73aa89ed 553
7fbc67df
SG
554 if (dev->use_fast_reg) {
555 if (ch->fr_pool)
556 srp_destroy_fr_pool(ch->fr_pool);
557 ch->fr_pool = fr_pool;
558 } else if (dev->use_fmr) {
559 if (ch->fmr_pool)
560 ib_destroy_fmr_pool(ch->fmr_pool);
561 ch->fmr_pool = fmr_pool;
562 }
563
da9d2f07
RD
564 kfree(init_attr);
565 return 0;
566
567err_qp:
f83b2561 568 srp_destroy_qp(qp);
da9d2f07
RD
569
570err_send_cq:
1dc7b1f1 571 ib_free_cq(send_cq);
da9d2f07
RD
572
573err_recv_cq:
1dc7b1f1 574 ib_free_cq(recv_cq);
da9d2f07
RD
575
576err:
aef9ec39
RD
577 kfree(init_attr);
578 return ret;
579}
580
4d73f95f
BVA
581/*
582 * Note: this function may be called without srp_alloc_iu_bufs() having been
509c07bc 583 * invoked. Hence the ch->[rt]x_ring checks.
4d73f95f 584 */
509c07bc
BVA
585static void srp_free_ch_ib(struct srp_target_port *target,
586 struct srp_rdma_ch *ch)
aef9ec39 587{
5cfb1782 588 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39
RD
589 int i;
590
d92c0da7
BVA
591 if (!ch->target)
592 return;
593
509c07bc
BVA
594 if (ch->cm_id) {
595 ib_destroy_cm_id(ch->cm_id);
596 ch->cm_id = NULL;
394c595e
BVA
597 }
598
d92c0da7
BVA
599 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
600 if (!ch->qp)
601 return;
602
5cfb1782 603 if (dev->use_fast_reg) {
509c07bc
BVA
604 if (ch->fr_pool)
605 srp_destroy_fr_pool(ch->fr_pool);
002f1567 606 } else if (dev->use_fmr) {
509c07bc
BVA
607 if (ch->fmr_pool)
608 ib_destroy_fmr_pool(ch->fmr_pool);
5cfb1782 609 }
1dc7b1f1 610
f83b2561 611 srp_destroy_qp(ch->qp);
1dc7b1f1
CH
612 ib_free_cq(ch->send_cq);
613 ib_free_cq(ch->recv_cq);
aef9ec39 614
d92c0da7
BVA
615 /*
616 * Avoid that the SCSI error handler tries to use this channel after
617 * it has been freed. The SCSI error handler can namely continue
618 * trying to perform recovery actions after scsi_remove_host()
619 * returned.
620 */
621 ch->target = NULL;
622
509c07bc
BVA
623 ch->qp = NULL;
624 ch->send_cq = ch->recv_cq = NULL;
73aa89ed 625
509c07bc 626 if (ch->rx_ring) {
4d73f95f 627 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
628 srp_free_iu(target->srp_host, ch->rx_ring[i]);
629 kfree(ch->rx_ring);
630 ch->rx_ring = NULL;
4d73f95f 631 }
509c07bc 632 if (ch->tx_ring) {
4d73f95f 633 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
634 srp_free_iu(target->srp_host, ch->tx_ring[i]);
635 kfree(ch->tx_ring);
636 ch->tx_ring = NULL;
4d73f95f 637 }
aef9ec39
RD
638}
639
640static void srp_path_rec_completion(int status,
641 struct ib_sa_path_rec *pathrec,
509c07bc 642 void *ch_ptr)
aef9ec39 643{
509c07bc
BVA
644 struct srp_rdma_ch *ch = ch_ptr;
645 struct srp_target_port *target = ch->target;
aef9ec39 646
509c07bc 647 ch->status = status;
aef9ec39 648 if (status)
7aa54bd7
DD
649 shost_printk(KERN_ERR, target->scsi_host,
650 PFX "Got failed path rec status %d\n", status);
aef9ec39 651 else
509c07bc
BVA
652 ch->path = *pathrec;
653 complete(&ch->done);
aef9ec39
RD
654}
655
509c07bc 656static int srp_lookup_path(struct srp_rdma_ch *ch)
aef9ec39 657{
509c07bc 658 struct srp_target_port *target = ch->target;
a702adce
BVA
659 int ret;
660
509c07bc
BVA
661 ch->path.numb_path = 1;
662
663 init_completion(&ch->done);
664
665 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
666 target->srp_host->srp_dev->dev,
667 target->srp_host->port,
668 &ch->path,
669 IB_SA_PATH_REC_SERVICE_ID |
670 IB_SA_PATH_REC_DGID |
671 IB_SA_PATH_REC_SGID |
672 IB_SA_PATH_REC_NUMB_PATH |
673 IB_SA_PATH_REC_PKEY,
674 SRP_PATH_REC_TIMEOUT_MS,
675 GFP_KERNEL,
676 srp_path_rec_completion,
677 ch, &ch->path_query);
678 if (ch->path_query_id < 0)
679 return ch->path_query_id;
680
681 ret = wait_for_completion_interruptible(&ch->done);
a702adce
BVA
682 if (ret < 0)
683 return ret;
aef9ec39 684
509c07bc 685 if (ch->status < 0)
7aa54bd7
DD
686 shost_printk(KERN_WARNING, target->scsi_host,
687 PFX "Path record query failed\n");
aef9ec39 688
509c07bc 689 return ch->status;
aef9ec39
RD
690}
691
d92c0da7 692static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
aef9ec39 693{
509c07bc 694 struct srp_target_port *target = ch->target;
aef9ec39
RD
695 struct {
696 struct ib_cm_req_param param;
697 struct srp_login_req priv;
698 } *req = NULL;
699 int status;
700
701 req = kzalloc(sizeof *req, GFP_KERNEL);
702 if (!req)
703 return -ENOMEM;
704
509c07bc 705 req->param.primary_path = &ch->path;
aef9ec39
RD
706 req->param.alternate_path = NULL;
707 req->param.service_id = target->service_id;
509c07bc
BVA
708 req->param.qp_num = ch->qp->qp_num;
709 req->param.qp_type = ch->qp->qp_type;
aef9ec39
RD
710 req->param.private_data = &req->priv;
711 req->param.private_data_len = sizeof req->priv;
712 req->param.flow_control = 1;
713
714 get_random_bytes(&req->param.starting_psn, 4);
715 req->param.starting_psn &= 0xffffff;
716
717 /*
718 * Pick some arbitrary defaults here; we could make these
719 * module parameters if anyone cared about setting them.
720 */
721 req->param.responder_resources = 4;
722 req->param.remote_cm_response_timeout = 20;
723 req->param.local_cm_response_timeout = 20;
7bb312e4 724 req->param.retry_count = target->tl_retry_count;
aef9ec39
RD
725 req->param.rnr_retry_count = 7;
726 req->param.max_cm_retries = 15;
727
728 req->priv.opcode = SRP_LOGIN_REQ;
729 req->priv.tag = 0;
49248644 730 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
aef9ec39
RD
731 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
732 SRP_BUF_FORMAT_INDIRECT);
d92c0da7
BVA
733 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
734 SRP_MULTICHAN_SINGLE);
0c0450db 735 /*
3cd96564 736 * In the published SRP specification (draft rev. 16a), the
0c0450db
R
737 * port identifier format is 8 bytes of ID extension followed
738 * by 8 bytes of GUID. Older drafts put the two halves in the
739 * opposite order, so that the GUID comes first.
740 *
741 * Targets conforming to these obsolete drafts can be
742 * recognized by the I/O Class they report.
743 */
744 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
745 memcpy(req->priv.initiator_port_id,
747fe000 746 &target->sgid.global.interface_id, 8);
0c0450db 747 memcpy(req->priv.initiator_port_id + 8,
01cb9bcb 748 &target->initiator_ext, 8);
0c0450db
R
749 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
750 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
751 } else {
752 memcpy(req->priv.initiator_port_id,
01cb9bcb
IR
753 &target->initiator_ext, 8);
754 memcpy(req->priv.initiator_port_id + 8,
747fe000 755 &target->sgid.global.interface_id, 8);
0c0450db
R
756 memcpy(req->priv.target_port_id, &target->id_ext, 8);
757 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
758 }
759
aef9ec39
RD
760 /*
761 * Topspin/Cisco SRP targets will reject our login unless we
01cb9bcb
IR
762 * zero out the first 8 bytes of our initiator port ID and set
763 * the second 8 bytes to the local node GUID.
aef9ec39 764 */
5d7cbfd6 765 if (srp_target_is_topspin(target)) {
7aa54bd7
DD
766 shost_printk(KERN_DEBUG, target->scsi_host,
767 PFX "Topspin/Cisco initiator port ID workaround "
768 "activated for target GUID %016llx\n",
45c37cad 769 be64_to_cpu(target->ioc_guid));
aef9ec39 770 memset(req->priv.initiator_port_id, 0, 8);
01cb9bcb 771 memcpy(req->priv.initiator_port_id + 8,
05321937 772 &target->srp_host->srp_dev->dev->node_guid, 8);
aef9ec39 773 }
aef9ec39 774
509c07bc 775 status = ib_send_cm_req(ch->cm_id, &req->param);
aef9ec39
RD
776
777 kfree(req);
778
779 return status;
780}
781
ef6c49d8
BVA
782static bool srp_queue_remove_work(struct srp_target_port *target)
783{
784 bool changed = false;
785
786 spin_lock_irq(&target->lock);
787 if (target->state != SRP_TARGET_REMOVED) {
788 target->state = SRP_TARGET_REMOVED;
789 changed = true;
790 }
791 spin_unlock_irq(&target->lock);
792
793 if (changed)
bcc05910 794 queue_work(srp_remove_wq, &target->remove_work);
ef6c49d8
BVA
795
796 return changed;
797}
798
aef9ec39
RD
799static void srp_disconnect_target(struct srp_target_port *target)
800{
d92c0da7
BVA
801 struct srp_rdma_ch *ch;
802 int i;
509c07bc 803
c014c8cd 804 /* XXX should send SRP_I_LOGOUT request */
aef9ec39 805
c014c8cd
BVA
806 for (i = 0; i < target->ch_count; i++) {
807 ch = &target->ch[i];
808 ch->connected = false;
809 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
810 shost_printk(KERN_DEBUG, target->scsi_host,
811 PFX "Sending CM DREQ failed\n");
294c875a 812 }
e6581056 813 }
aef9ec39
RD
814}
815
509c07bc
BVA
816static void srp_free_req_data(struct srp_target_port *target,
817 struct srp_rdma_ch *ch)
8f26c9ff 818{
5cfb1782
BVA
819 struct srp_device *dev = target->srp_host->srp_dev;
820 struct ib_device *ibdev = dev->dev;
8f26c9ff
DD
821 struct srp_request *req;
822 int i;
823
47513cf4 824 if (!ch->req_ring)
4d73f95f
BVA
825 return;
826
827 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 828 req = &ch->req_ring[i];
9a21be53 829 if (dev->use_fast_reg) {
5cfb1782 830 kfree(req->fr_list);
9a21be53 831 } else {
5cfb1782 832 kfree(req->fmr_list);
9a21be53
SG
833 kfree(req->map_page);
834 }
c07d424d
DD
835 if (req->indirect_dma_addr) {
836 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
837 target->indirect_size,
838 DMA_TO_DEVICE);
839 }
840 kfree(req->indirect_desc);
8f26c9ff 841 }
4d73f95f 842
509c07bc
BVA
843 kfree(ch->req_ring);
844 ch->req_ring = NULL;
8f26c9ff
DD
845}
846
509c07bc 847static int srp_alloc_req_data(struct srp_rdma_ch *ch)
b81d00bd 848{
509c07bc 849 struct srp_target_port *target = ch->target;
b81d00bd
BVA
850 struct srp_device *srp_dev = target->srp_host->srp_dev;
851 struct ib_device *ibdev = srp_dev->dev;
852 struct srp_request *req;
5cfb1782 853 void *mr_list;
b81d00bd
BVA
854 dma_addr_t dma_addr;
855 int i, ret = -ENOMEM;
856
509c07bc
BVA
857 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
858 GFP_KERNEL);
859 if (!ch->req_ring)
4d73f95f
BVA
860 goto out;
861
862 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 863 req = &ch->req_ring[i];
509c5f33 864 mr_list = kmalloc(target->mr_per_cmd * sizeof(void *),
5cfb1782
BVA
865 GFP_KERNEL);
866 if (!mr_list)
867 goto out;
9a21be53 868 if (srp_dev->use_fast_reg) {
5cfb1782 869 req->fr_list = mr_list;
9a21be53 870 } else {
5cfb1782 871 req->fmr_list = mr_list;
9a21be53
SG
872 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
873 sizeof(void *), GFP_KERNEL);
874 if (!req->map_page)
875 goto out;
876 }
b81d00bd 877 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
5cfb1782 878 if (!req->indirect_desc)
b81d00bd
BVA
879 goto out;
880
881 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
882 target->indirect_size,
883 DMA_TO_DEVICE);
884 if (ib_dma_mapping_error(ibdev, dma_addr))
885 goto out;
886
887 req->indirect_dma_addr = dma_addr;
b81d00bd
BVA
888 }
889 ret = 0;
890
891out:
892 return ret;
893}
894
683b159a
BVA
895/**
896 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
897 * @shost: SCSI host whose attributes to remove from sysfs.
898 *
899 * Note: Any attributes defined in the host template and that did not exist
900 * before invocation of this function will be ignored.
901 */
902static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
903{
904 struct device_attribute **attr;
905
906 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
907 device_remove_file(&shost->shost_dev, *attr);
908}
909
ee12d6a8
BVA
910static void srp_remove_target(struct srp_target_port *target)
911{
d92c0da7
BVA
912 struct srp_rdma_ch *ch;
913 int i;
509c07bc 914
ef6c49d8
BVA
915 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
916
ee12d6a8 917 srp_del_scsi_host_attr(target->scsi_host);
9dd69a60 918 srp_rport_get(target->rport);
ee12d6a8
BVA
919 srp_remove_host(target->scsi_host);
920 scsi_remove_host(target->scsi_host);
93079162 921 srp_stop_rport_timers(target->rport);
ef6c49d8 922 srp_disconnect_target(target);
d92c0da7
BVA
923 for (i = 0; i < target->ch_count; i++) {
924 ch = &target->ch[i];
925 srp_free_ch_ib(target, ch);
926 }
c1120f89 927 cancel_work_sync(&target->tl_err_work);
9dd69a60 928 srp_rport_put(target->rport);
d92c0da7
BVA
929 for (i = 0; i < target->ch_count; i++) {
930 ch = &target->ch[i];
931 srp_free_req_data(target, ch);
932 }
933 kfree(target->ch);
934 target->ch = NULL;
65d7dd2f
VP
935
936 spin_lock(&target->srp_host->target_lock);
937 list_del(&target->list);
938 spin_unlock(&target->srp_host->target_lock);
939
ee12d6a8
BVA
940 scsi_host_put(target->scsi_host);
941}
942
c4028958 943static void srp_remove_work(struct work_struct *work)
aef9ec39 944{
c4028958 945 struct srp_target_port *target =
ef6c49d8 946 container_of(work, struct srp_target_port, remove_work);
aef9ec39 947
ef6c49d8 948 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
aef9ec39 949
96fc248a 950 srp_remove_target(target);
aef9ec39
RD
951}
952
dc1bdbd9
BVA
953static void srp_rport_delete(struct srp_rport *rport)
954{
955 struct srp_target_port *target = rport->lld_data;
956
957 srp_queue_remove_work(target);
958}
959
c014c8cd
BVA
960/**
961 * srp_connected_ch() - number of connected channels
962 * @target: SRP target port.
963 */
964static int srp_connected_ch(struct srp_target_port *target)
965{
966 int i, c = 0;
967
968 for (i = 0; i < target->ch_count; i++)
969 c += target->ch[i].connected;
970
971 return c;
972}
973
d92c0da7 974static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
aef9ec39 975{
509c07bc 976 struct srp_target_port *target = ch->target;
aef9ec39
RD
977 int ret;
978
c014c8cd 979 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
294c875a 980
509c07bc 981 ret = srp_lookup_path(ch);
aef9ec39 982 if (ret)
4d59ad29 983 goto out;
aef9ec39
RD
984
985 while (1) {
509c07bc 986 init_completion(&ch->done);
d92c0da7 987 ret = srp_send_req(ch, multich);
aef9ec39 988 if (ret)
4d59ad29 989 goto out;
509c07bc 990 ret = wait_for_completion_interruptible(&ch->done);
a702adce 991 if (ret < 0)
4d59ad29 992 goto out;
aef9ec39
RD
993
994 /*
995 * The CM event handling code will set status to
996 * SRP_PORT_REDIRECT if we get a port redirect REJ
997 * back, or SRP_DLID_REDIRECT if we get a lid/qp
998 * redirect REJ back.
999 */
4d59ad29
BVA
1000 ret = ch->status;
1001 switch (ret) {
aef9ec39 1002 case 0:
c014c8cd 1003 ch->connected = true;
4d59ad29 1004 goto out;
aef9ec39
RD
1005
1006 case SRP_PORT_REDIRECT:
509c07bc 1007 ret = srp_lookup_path(ch);
aef9ec39 1008 if (ret)
4d59ad29 1009 goto out;
aef9ec39
RD
1010 break;
1011
1012 case SRP_DLID_REDIRECT:
1013 break;
1014
9fe4bcf4 1015 case SRP_STALE_CONN:
9fe4bcf4 1016 shost_printk(KERN_ERR, target->scsi_host, PFX
205619f2 1017 "giving up on stale connection\n");
4d59ad29
BVA
1018 ret = -ECONNRESET;
1019 goto out;
9fe4bcf4 1020
aef9ec39 1021 default:
4d59ad29 1022 goto out;
aef9ec39
RD
1023 }
1024 }
4d59ad29
BVA
1025
1026out:
1027 return ret <= 0 ? ret : -ENODEV;
aef9ec39
RD
1028}
1029
1dc7b1f1
CH
1030static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1031{
1032 srp_handle_qp_err(cq, wc, "INV RKEY");
1033}
1034
1035static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1036 u32 rkey)
5cfb1782
BVA
1037{
1038 struct ib_send_wr *bad_wr;
1039 struct ib_send_wr wr = {
1040 .opcode = IB_WR_LOCAL_INV,
5cfb1782
BVA
1041 .next = NULL,
1042 .num_sge = 0,
1043 .send_flags = 0,
1044 .ex.invalidate_rkey = rkey,
1045 };
1046
1dc7b1f1
CH
1047 wr.wr_cqe = &req->reg_cqe;
1048 req->reg_cqe.done = srp_inv_rkey_err_done;
509c07bc 1049 return ib_post_send(ch->qp, &wr, &bad_wr);
5cfb1782
BVA
1050}
1051
d945e1df 1052static void srp_unmap_data(struct scsi_cmnd *scmnd,
509c07bc 1053 struct srp_rdma_ch *ch,
d945e1df
RD
1054 struct srp_request *req)
1055{
509c07bc 1056 struct srp_target_port *target = ch->target;
5cfb1782
BVA
1057 struct srp_device *dev = target->srp_host->srp_dev;
1058 struct ib_device *ibdev = dev->dev;
1059 int i, res;
8f26c9ff 1060
bb350d1d 1061 if (!scsi_sglist(scmnd) ||
d945e1df
RD
1062 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1063 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1064 return;
1065
5cfb1782
BVA
1066 if (dev->use_fast_reg) {
1067 struct srp_fr_desc **pfr;
1068
1069 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1dc7b1f1 1070 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
5cfb1782
BVA
1071 if (res < 0) {
1072 shost_printk(KERN_ERR, target->scsi_host, PFX
1073 "Queueing INV WR for rkey %#x failed (%d)\n",
1074 (*pfr)->mr->rkey, res);
1075 queue_work(system_long_wq,
1076 &target->tl_err_work);
1077 }
1078 }
1079 if (req->nmdesc)
509c07bc 1080 srp_fr_pool_put(ch->fr_pool, req->fr_list,
5cfb1782 1081 req->nmdesc);
002f1567 1082 } else if (dev->use_fmr) {
5cfb1782
BVA
1083 struct ib_pool_fmr **pfmr;
1084
1085 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1086 ib_fmr_pool_unmap(*pfmr);
1087 }
f5358a17 1088
8f26c9ff
DD
1089 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1090 scmnd->sc_data_direction);
d945e1df
RD
1091}
1092
22032991
BVA
1093/**
1094 * srp_claim_req - Take ownership of the scmnd associated with a request.
509c07bc 1095 * @ch: SRP RDMA channel.
22032991 1096 * @req: SRP request.
b3fe628d 1097 * @sdev: If not NULL, only take ownership for this SCSI device.
22032991
BVA
1098 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1099 * ownership of @req->scmnd if it equals @scmnd.
1100 *
1101 * Return value:
1102 * Either NULL or a pointer to the SCSI command the caller became owner of.
1103 */
509c07bc 1104static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
22032991 1105 struct srp_request *req,
b3fe628d 1106 struct scsi_device *sdev,
22032991
BVA
1107 struct scsi_cmnd *scmnd)
1108{
1109 unsigned long flags;
1110
509c07bc 1111 spin_lock_irqsave(&ch->lock, flags);
b3fe628d
BVA
1112 if (req->scmnd &&
1113 (!sdev || req->scmnd->device == sdev) &&
1114 (!scmnd || req->scmnd == scmnd)) {
22032991
BVA
1115 scmnd = req->scmnd;
1116 req->scmnd = NULL;
22032991
BVA
1117 } else {
1118 scmnd = NULL;
1119 }
509c07bc 1120 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1121
1122 return scmnd;
1123}
1124
1125/**
6ec2ba02 1126 * srp_free_req() - Unmap data and adjust ch->req_lim.
509c07bc 1127 * @ch: SRP RDMA channel.
af24663b
BVA
1128 * @req: Request to be freed.
1129 * @scmnd: SCSI command associated with @req.
1130 * @req_lim_delta: Amount to be added to @target->req_lim.
22032991 1131 */
509c07bc
BVA
1132static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1133 struct scsi_cmnd *scmnd, s32 req_lim_delta)
526b4caa 1134{
94a9174c
BVA
1135 unsigned long flags;
1136
509c07bc 1137 srp_unmap_data(scmnd, ch, req);
22032991 1138
509c07bc
BVA
1139 spin_lock_irqsave(&ch->lock, flags);
1140 ch->req_lim += req_lim_delta;
509c07bc 1141 spin_unlock_irqrestore(&ch->lock, flags);
526b4caa
IR
1142}
1143
509c07bc
BVA
1144static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1145 struct scsi_device *sdev, int result)
526b4caa 1146{
509c07bc 1147 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
22032991
BVA
1148
1149 if (scmnd) {
509c07bc 1150 srp_free_req(ch, req, scmnd, 0);
ed9b2264 1151 scmnd->result = result;
22032991 1152 scmnd->scsi_done(scmnd);
22032991 1153 }
526b4caa
IR
1154}
1155
ed9b2264 1156static void srp_terminate_io(struct srp_rport *rport)
aef9ec39 1157{
ed9b2264 1158 struct srp_target_port *target = rport->lld_data;
d92c0da7 1159 struct srp_rdma_ch *ch;
b3fe628d
BVA
1160 struct Scsi_Host *shost = target->scsi_host;
1161 struct scsi_device *sdev;
d92c0da7 1162 int i, j;
ed9b2264 1163
b3fe628d
BVA
1164 /*
1165 * Invoking srp_terminate_io() while srp_queuecommand() is running
1166 * is not safe. Hence the warning statement below.
1167 */
1168 shost_for_each_device(sdev, shost)
1169 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1170
d92c0da7
BVA
1171 for (i = 0; i < target->ch_count; i++) {
1172 ch = &target->ch[i];
509c07bc 1173
d92c0da7
BVA
1174 for (j = 0; j < target->req_ring_size; ++j) {
1175 struct srp_request *req = &ch->req_ring[j];
1176
1177 srp_finish_req(ch, req, NULL,
1178 DID_TRANSPORT_FAILFAST << 16);
1179 }
ed9b2264
BVA
1180 }
1181}
aef9ec39 1182
ed9b2264
BVA
1183/*
1184 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1185 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1186 * srp_reset_device() or srp_reset_host() calls will occur while this function
1187 * is in progress. One way to realize that is not to call this function
1188 * directly but to call srp_reconnect_rport() instead since that last function
1189 * serializes calls of this function via rport->mutex and also blocks
1190 * srp_queuecommand() calls before invoking this function.
1191 */
1192static int srp_rport_reconnect(struct srp_rport *rport)
1193{
1194 struct srp_target_port *target = rport->lld_data;
d92c0da7
BVA
1195 struct srp_rdma_ch *ch;
1196 int i, j, ret = 0;
1197 bool multich = false;
09be70a2 1198
aef9ec39 1199 srp_disconnect_target(target);
34aa654e
BVA
1200
1201 if (target->state == SRP_TARGET_SCANNING)
1202 return -ENODEV;
1203
aef9ec39 1204 /*
c7c4e7ff
BVA
1205 * Now get a new local CM ID so that we avoid confusing the target in
1206 * case things are really fouled up. Doing so also ensures that all CM
1207 * callbacks will have finished before a new QP is allocated.
aef9ec39 1208 */
d92c0da7
BVA
1209 for (i = 0; i < target->ch_count; i++) {
1210 ch = &target->ch[i];
d92c0da7 1211 ret += srp_new_cm_id(ch);
536ae14e 1212 }
d92c0da7
BVA
1213 for (i = 0; i < target->ch_count; i++) {
1214 ch = &target->ch[i];
d92c0da7
BVA
1215 for (j = 0; j < target->req_ring_size; ++j) {
1216 struct srp_request *req = &ch->req_ring[j];
aef9ec39 1217
d92c0da7
BVA
1218 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1219 }
1220 }
1221 for (i = 0; i < target->ch_count; i++) {
1222 ch = &target->ch[i];
d92c0da7
BVA
1223 /*
1224 * Whether or not creating a new CM ID succeeded, create a new
1225 * QP. This guarantees that all completion callback function
1226 * invocations have finished before request resetting starts.
1227 */
1228 ret += srp_create_ch_ib(ch);
aef9ec39 1229
d92c0da7
BVA
1230 INIT_LIST_HEAD(&ch->free_tx);
1231 for (j = 0; j < target->queue_size; ++j)
1232 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1233 }
8de9fe3a
BVA
1234
1235 target->qp_in_error = false;
1236
d92c0da7
BVA
1237 for (i = 0; i < target->ch_count; i++) {
1238 ch = &target->ch[i];
bbac5ccf 1239 if (ret)
d92c0da7 1240 break;
d92c0da7
BVA
1241 ret = srp_connect_ch(ch, multich);
1242 multich = true;
1243 }
09be70a2 1244
ed9b2264
BVA
1245 if (ret == 0)
1246 shost_printk(KERN_INFO, target->scsi_host,
1247 PFX "reconnect succeeded\n");
aef9ec39
RD
1248
1249 return ret;
1250}
1251
8f26c9ff
DD
1252static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1253 unsigned int dma_len, u32 rkey)
f5358a17 1254{
8f26c9ff 1255 struct srp_direct_buf *desc = state->desc;
f5358a17 1256
3ae95da8
BVA
1257 WARN_ON_ONCE(!dma_len);
1258
8f26c9ff
DD
1259 desc->va = cpu_to_be64(dma_addr);
1260 desc->key = cpu_to_be32(rkey);
1261 desc->len = cpu_to_be32(dma_len);
f5358a17 1262
8f26c9ff
DD
1263 state->total_len += dma_len;
1264 state->desc++;
1265 state->ndesc++;
1266}
559ce8f1 1267
8f26c9ff 1268static int srp_map_finish_fmr(struct srp_map_state *state,
509c07bc 1269 struct srp_rdma_ch *ch)
8f26c9ff 1270{
186fbc66
BVA
1271 struct srp_target_port *target = ch->target;
1272 struct srp_device *dev = target->srp_host->srp_dev;
5f071777 1273 struct ib_pd *pd = target->pd;
8f26c9ff
DD
1274 struct ib_pool_fmr *fmr;
1275 u64 io_addr = 0;
85507bcc 1276
290081b4
BVA
1277 if (state->fmr.next >= state->fmr.end) {
1278 shost_printk(KERN_ERR, ch->target->scsi_host,
1279 PFX "Out of MRs (mr_per_cmd = %d)\n",
1280 ch->target->mr_per_cmd);
f731ed62 1281 return -ENOMEM;
290081b4 1282 }
f731ed62 1283
26630e8a
SG
1284 WARN_ON_ONCE(!dev->use_fmr);
1285
1286 if (state->npages == 0)
1287 return 0;
1288
5f071777 1289 if (state->npages == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
26630e8a 1290 srp_map_desc(state, state->base_dma_addr, state->dma_len,
5f071777 1291 pd->unsafe_global_rkey);
26630e8a
SG
1292 goto reset_state;
1293 }
1294
509c07bc 1295 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
8f26c9ff
DD
1296 state->npages, io_addr);
1297 if (IS_ERR(fmr))
1298 return PTR_ERR(fmr);
f5358a17 1299
f731ed62 1300 *state->fmr.next++ = fmr;
52ede08f 1301 state->nmdesc++;
f5358a17 1302
186fbc66
BVA
1303 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1304 state->dma_len, fmr->fmr->rkey);
539dde6f 1305
26630e8a
SG
1306reset_state:
1307 state->npages = 0;
1308 state->dma_len = 0;
1309
8f26c9ff
DD
1310 return 0;
1311}
1312
1dc7b1f1
CH
1313static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1314{
1315 srp_handle_qp_err(cq, wc, "FAST REG");
1316}
1317
509c5f33
BVA
1318/*
1319 * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1320 * where to start in the first element. If sg_offset_p != NULL then
1321 * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1322 * byte that has not yet been mapped.
1323 */
5cfb1782 1324static int srp_map_finish_fr(struct srp_map_state *state,
1dc7b1f1 1325 struct srp_request *req,
509c5f33
BVA
1326 struct srp_rdma_ch *ch, int sg_nents,
1327 unsigned int *sg_offset_p)
5cfb1782 1328{
509c07bc 1329 struct srp_target_port *target = ch->target;
5cfb1782 1330 struct srp_device *dev = target->srp_host->srp_dev;
5f071777 1331 struct ib_pd *pd = target->pd;
5cfb1782 1332 struct ib_send_wr *bad_wr;
f7f7aab1 1333 struct ib_reg_wr wr;
5cfb1782
BVA
1334 struct srp_fr_desc *desc;
1335 u32 rkey;
f7f7aab1 1336 int n, err;
5cfb1782 1337
290081b4
BVA
1338 if (state->fr.next >= state->fr.end) {
1339 shost_printk(KERN_ERR, ch->target->scsi_host,
1340 PFX "Out of MRs (mr_per_cmd = %d)\n",
1341 ch->target->mr_per_cmd);
f731ed62 1342 return -ENOMEM;
290081b4 1343 }
f731ed62 1344
26630e8a
SG
1345 WARN_ON_ONCE(!dev->use_fast_reg);
1346
5f071777 1347 if (sg_nents == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
509c5f33
BVA
1348 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1349
1350 srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1351 sg_dma_len(state->sg) - sg_offset,
5f071777 1352 pd->unsafe_global_rkey);
509c5f33
BVA
1353 if (sg_offset_p)
1354 *sg_offset_p = 0;
f7f7aab1 1355 return 1;
26630e8a
SG
1356 }
1357
509c07bc 1358 desc = srp_fr_pool_get(ch->fr_pool);
5cfb1782
BVA
1359 if (!desc)
1360 return -ENOMEM;
1361
1362 rkey = ib_inc_rkey(desc->mr->rkey);
1363 ib_update_fast_reg_key(desc->mr, rkey);
1364
509c5f33
BVA
1365 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1366 dev->mr_page_size);
9d8e7d0d
BVA
1367 if (unlikely(n < 0)) {
1368 srp_fr_pool_put(ch->fr_pool, &desc, 1);
509c5f33 1369 pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
9d8e7d0d 1370 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
509c5f33 1371 sg_offset_p ? *sg_offset_p : -1, n);
f7f7aab1 1372 return n;
9d8e7d0d 1373 }
5cfb1782 1374
509c5f33 1375 WARN_ON_ONCE(desc->mr->length == 0);
5cfb1782 1376
1dc7b1f1
CH
1377 req->reg_cqe.done = srp_reg_mr_err_done;
1378
f7f7aab1
SG
1379 wr.wr.next = NULL;
1380 wr.wr.opcode = IB_WR_REG_MR;
1dc7b1f1 1381 wr.wr.wr_cqe = &req->reg_cqe;
f7f7aab1
SG
1382 wr.wr.num_sge = 0;
1383 wr.wr.send_flags = 0;
1384 wr.mr = desc->mr;
1385 wr.key = desc->mr->rkey;
1386 wr.access = (IB_ACCESS_LOCAL_WRITE |
1387 IB_ACCESS_REMOTE_READ |
1388 IB_ACCESS_REMOTE_WRITE);
5cfb1782 1389
f731ed62 1390 *state->fr.next++ = desc;
5cfb1782
BVA
1391 state->nmdesc++;
1392
f7f7aab1
SG
1393 srp_map_desc(state, desc->mr->iova,
1394 desc->mr->length, desc->mr->rkey);
5cfb1782 1395
26630e8a 1396 err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
509c5f33
BVA
1397 if (unlikely(err)) {
1398 WARN_ON_ONCE(err == -ENOMEM);
26630e8a 1399 return err;
509c5f33 1400 }
26630e8a 1401
f7f7aab1 1402 return n;
5cfb1782
BVA
1403}
1404
8f26c9ff 1405static int srp_map_sg_entry(struct srp_map_state *state,
509c07bc 1406 struct srp_rdma_ch *ch,
52bb8c62 1407 struct scatterlist *sg)
8f26c9ff 1408{
509c07bc 1409 struct srp_target_port *target = ch->target;
8f26c9ff
DD
1410 struct srp_device *dev = target->srp_host->srp_dev;
1411 struct ib_device *ibdev = dev->dev;
1412 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1413 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
3ae95da8 1414 unsigned int len = 0;
8f26c9ff
DD
1415 int ret;
1416
3ae95da8 1417 WARN_ON_ONCE(!dma_len);
f5358a17 1418
8f26c9ff 1419 while (dma_len) {
5cfb1782 1420 unsigned offset = dma_addr & ~dev->mr_page_mask;
681cc360
BVA
1421
1422 if (state->npages == dev->max_pages_per_mr ||
1423 (state->npages > 0 && offset != 0)) {
f7f7aab1 1424 ret = srp_map_finish_fmr(state, ch);
8f26c9ff
DD
1425 if (ret)
1426 return ret;
8f26c9ff
DD
1427 }
1428
5cfb1782 1429 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
f5358a17 1430
8f26c9ff
DD
1431 if (!state->npages)
1432 state->base_dma_addr = dma_addr;
5cfb1782 1433 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
52ede08f 1434 state->dma_len += len;
8f26c9ff
DD
1435 dma_addr += len;
1436 dma_len -= len;
1437 }
1438
5cfb1782 1439 /*
681cc360 1440 * If the end of the MR is not on a page boundary then we need to
8f26c9ff 1441 * close it out and start a new one -- we can only merge at page
1d3d98c4 1442 * boundaries.
8f26c9ff
DD
1443 */
1444 ret = 0;
681cc360 1445 if ((dma_addr & ~dev->mr_page_mask) != 0)
f7f7aab1 1446 ret = srp_map_finish_fmr(state, ch);
f5358a17
RD
1447 return ret;
1448}
1449
26630e8a
SG
1450static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1451 struct srp_request *req, struct scatterlist *scat,
1452 int count)
76bc1e1d 1453{
76bc1e1d 1454 struct scatterlist *sg;
0e0d3a48 1455 int i, ret;
76bc1e1d 1456
26630e8a
SG
1457 state->pages = req->map_page;
1458 state->fmr.next = req->fmr_list;
509c5f33 1459 state->fmr.end = req->fmr_list + ch->target->mr_per_cmd;
26630e8a
SG
1460
1461 for_each_sg(scat, sg, count, i) {
52bb8c62 1462 ret = srp_map_sg_entry(state, ch, sg);
26630e8a
SG
1463 if (ret)
1464 return ret;
5cfb1782 1465 }
76bc1e1d 1466
f7f7aab1 1467 ret = srp_map_finish_fmr(state, ch);
26630e8a
SG
1468 if (ret)
1469 return ret;
1470
26630e8a
SG
1471 return 0;
1472}
1473
1474static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1475 struct srp_request *req, struct scatterlist *scat,
1476 int count)
1477{
509c5f33
BVA
1478 unsigned int sg_offset = 0;
1479
f7f7aab1 1480 state->fr.next = req->fr_list;
509c5f33 1481 state->fr.end = req->fr_list + ch->target->mr_per_cmd;
f7f7aab1 1482 state->sg = scat;
26630e8a 1483
3b59b7a6
BVA
1484 if (count == 0)
1485 return 0;
1486
57b0be9c 1487 while (count) {
f7f7aab1 1488 int i, n;
26630e8a 1489
509c5f33 1490 n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
f7f7aab1
SG
1491 if (unlikely(n < 0))
1492 return n;
1493
57b0be9c 1494 count -= n;
f7f7aab1
SG
1495 for (i = 0; i < n; i++)
1496 state->sg = sg_next(state->sg);
1497 }
26630e8a 1498
26630e8a
SG
1499 return 0;
1500}
1501
1502static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1503 struct srp_request *req, struct scatterlist *scat,
1504 int count)
1505{
1506 struct srp_target_port *target = ch->target;
1507 struct srp_device *dev = target->srp_host->srp_dev;
1508 struct scatterlist *sg;
1509 int i;
1510
26630e8a
SG
1511 for_each_sg(scat, sg, count, i) {
1512 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1513 ib_sg_dma_len(dev->dev, sg),
5f071777 1514 target->pd->unsafe_global_rkey);
0e0d3a48 1515 }
76bc1e1d 1516
26630e8a 1517 return 0;
76bc1e1d
BVA
1518}
1519
330179f2
BVA
1520/*
1521 * Register the indirect data buffer descriptor with the HCA.
1522 *
1523 * Note: since the indirect data buffer descriptor has been allocated with
1524 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1525 * memory buffer.
1526 */
1527static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1528 void **next_mr, void **end_mr, u32 idb_len,
1529 __be32 *idb_rkey)
1530{
1531 struct srp_target_port *target = ch->target;
1532 struct srp_device *dev = target->srp_host->srp_dev;
1533 struct srp_map_state state;
1534 struct srp_direct_buf idb_desc;
1535 u64 idb_pages[1];
f7f7aab1 1536 struct scatterlist idb_sg[1];
330179f2
BVA
1537 int ret;
1538
1539 memset(&state, 0, sizeof(state));
1540 memset(&idb_desc, 0, sizeof(idb_desc));
1541 state.gen.next = next_mr;
1542 state.gen.end = end_mr;
1543 state.desc = &idb_desc;
330179f2
BVA
1544 state.base_dma_addr = req->indirect_dma_addr;
1545 state.dma_len = idb_len;
f7f7aab1
SG
1546
1547 if (dev->use_fast_reg) {
1548 state.sg = idb_sg;
54f5c9c5 1549 sg_init_one(idb_sg, req->indirect_desc, idb_len);
f7f7aab1 1550 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
fc925518
CH
1551#ifdef CONFIG_NEED_SG_DMA_LENGTH
1552 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1553#endif
509c5f33 1554 ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
f7f7aab1
SG
1555 if (ret < 0)
1556 return ret;
509c5f33 1557 WARN_ON_ONCE(ret < 1);
f7f7aab1
SG
1558 } else if (dev->use_fmr) {
1559 state.pages = idb_pages;
1560 state.pages[0] = (req->indirect_dma_addr &
1561 dev->mr_page_mask);
1562 state.npages = 1;
1563 ret = srp_map_finish_fmr(&state, ch);
1564 if (ret < 0)
1565 return ret;
1566 } else {
1567 return -EINVAL;
1568 }
330179f2
BVA
1569
1570 *idb_rkey = idb_desc.key;
1571
f7f7aab1 1572 return 0;
330179f2
BVA
1573}
1574
509c5f33
BVA
1575static void srp_check_mapping(struct srp_map_state *state,
1576 struct srp_rdma_ch *ch, struct srp_request *req,
1577 struct scatterlist *scat, int count)
1578{
1579 struct srp_device *dev = ch->target->srp_host->srp_dev;
1580 struct srp_fr_desc **pfr;
1581 u64 desc_len = 0, mr_len = 0;
1582 int i;
1583
1584 for (i = 0; i < state->ndesc; i++)
1585 desc_len += be32_to_cpu(req->indirect_desc[i].len);
1586 if (dev->use_fast_reg)
1587 for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1588 mr_len += (*pfr)->mr->length;
1589 else if (dev->use_fmr)
1590 for (i = 0; i < state->nmdesc; i++)
1591 mr_len += be32_to_cpu(req->indirect_desc[i].len);
1592 if (desc_len != scsi_bufflen(req->scmnd) ||
1593 mr_len > scsi_bufflen(req->scmnd))
1594 pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1595 scsi_bufflen(req->scmnd), desc_len, mr_len,
1596 state->ndesc, state->nmdesc);
1597}
509c5f33 1598
77269cdf
BVA
1599/**
1600 * srp_map_data() - map SCSI data buffer onto an SRP request
1601 * @scmnd: SCSI command to map
1602 * @ch: SRP RDMA channel
1603 * @req: SRP request
1604 *
1605 * Returns the length in bytes of the SRP_CMD IU or a negative value if
1606 * mapping failed.
1607 */
509c07bc 1608static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
aef9ec39
RD
1609 struct srp_request *req)
1610{
509c07bc 1611 struct srp_target_port *target = ch->target;
5f071777 1612 struct ib_pd *pd = target->pd;
76bc1e1d 1613 struct scatterlist *scat;
aef9ec39 1614 struct srp_cmd *cmd = req->cmd->buf;
330179f2 1615 int len, nents, count, ret;
85507bcc
RC
1616 struct srp_device *dev;
1617 struct ib_device *ibdev;
8f26c9ff
DD
1618 struct srp_map_state state;
1619 struct srp_indirect_buf *indirect_hdr;
330179f2
BVA
1620 u32 idb_len, table_len;
1621 __be32 idb_rkey;
8f26c9ff 1622 u8 fmt;
aef9ec39 1623
bb350d1d 1624 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
aef9ec39
RD
1625 return sizeof (struct srp_cmd);
1626
1627 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1628 scmnd->sc_data_direction != DMA_TO_DEVICE) {
7aa54bd7
DD
1629 shost_printk(KERN_WARNING, target->scsi_host,
1630 PFX "Unhandled data direction %d\n",
1631 scmnd->sc_data_direction);
aef9ec39
RD
1632 return -EINVAL;
1633 }
1634
bb350d1d
FT
1635 nents = scsi_sg_count(scmnd);
1636 scat = scsi_sglist(scmnd);
aef9ec39 1637
05321937 1638 dev = target->srp_host->srp_dev;
85507bcc
RC
1639 ibdev = dev->dev;
1640
1641 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
8f26c9ff
DD
1642 if (unlikely(count == 0))
1643 return -EIO;
f5358a17
RD
1644
1645 fmt = SRP_DATA_DESC_DIRECT;
1646 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
aef9ec39 1647
5f071777 1648 if (count == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
f5358a17
RD
1649 /*
1650 * The midlayer only generated a single gather/scatter
1651 * entry, or DMA mapping coalesced everything to a
1652 * single entry. So a direct descriptor along with
1653 * the DMA MR suffices.
1654 */
cf368713 1655 struct srp_direct_buf *buf = (void *) cmd->add_data;
aef9ec39 1656
85507bcc 1657 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
5f071777 1658 buf->key = cpu_to_be32(pd->unsafe_global_rkey);
85507bcc 1659 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
8f26c9ff 1660
52ede08f 1661 req->nmdesc = 0;
8f26c9ff
DD
1662 goto map_complete;
1663 }
1664
5cfb1782
BVA
1665 /*
1666 * We have more than one scatter/gather entry, so build our indirect
1667 * descriptor table, trying to merge as many entries as we can.
8f26c9ff
DD
1668 */
1669 indirect_hdr = (void *) cmd->add_data;
1670
c07d424d
DD
1671 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1672 target->indirect_size, DMA_TO_DEVICE);
1673
8f26c9ff 1674 memset(&state, 0, sizeof(state));
9edba790 1675 state.desc = req->indirect_desc;
26630e8a 1676 if (dev->use_fast_reg)
e012f363 1677 ret = srp_map_sg_fr(&state, ch, req, scat, count);
26630e8a 1678 else if (dev->use_fmr)
e012f363 1679 ret = srp_map_sg_fmr(&state, ch, req, scat, count);
26630e8a 1680 else
e012f363
BVA
1681 ret = srp_map_sg_dma(&state, ch, req, scat, count);
1682 req->nmdesc = state.nmdesc;
1683 if (ret < 0)
1684 goto unmap;
cf368713 1685
509c5f33
BVA
1686 {
1687 DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1688 "Memory mapping consistency check");
1a1faf7a 1689 if (DYNAMIC_DEBUG_BRANCH(ddm))
509c5f33
BVA
1690 srp_check_mapping(&state, ch, req, scat, count);
1691 }
cf368713 1692
c07d424d
DD
1693 /* We've mapped the request, now pull as much of the indirect
1694 * descriptor table as we can into the command buffer. If this
1695 * target is not using an external indirect table, we are
1696 * guaranteed to fit into the command, as the SCSI layer won't
1697 * give us more S/G entries than we allow.
8f26c9ff 1698 */
8f26c9ff 1699 if (state.ndesc == 1) {
5cfb1782
BVA
1700 /*
1701 * Memory registration collapsed the sg-list into one entry,
8f26c9ff
DD
1702 * so use a direct descriptor.
1703 */
1704 struct srp_direct_buf *buf = (void *) cmd->add_data;
cf368713 1705
c07d424d 1706 *buf = req->indirect_desc[0];
8f26c9ff 1707 goto map_complete;
aef9ec39
RD
1708 }
1709
c07d424d
DD
1710 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1711 !target->allow_ext_sg)) {
1712 shost_printk(KERN_ERR, target->scsi_host,
1713 "Could not fit S/G list into SRP_CMD\n");
e012f363
BVA
1714 ret = -EIO;
1715 goto unmap;
c07d424d
DD
1716 }
1717
1718 count = min(state.ndesc, target->cmd_sg_cnt);
8f26c9ff 1719 table_len = state.ndesc * sizeof (struct srp_direct_buf);
330179f2 1720 idb_len = sizeof(struct srp_indirect_buf) + table_len;
8f26c9ff
DD
1721
1722 fmt = SRP_DATA_DESC_INDIRECT;
1723 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
c07d424d 1724 len += count * sizeof (struct srp_direct_buf);
8f26c9ff 1725
c07d424d
DD
1726 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1727 count * sizeof (struct srp_direct_buf));
8f26c9ff 1728
5f071777 1729 if (!(pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
330179f2
BVA
1730 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1731 idb_len, &idb_rkey);
1732 if (ret < 0)
e012f363 1733 goto unmap;
330179f2
BVA
1734 req->nmdesc++;
1735 } else {
5f071777 1736 idb_rkey = cpu_to_be32(pd->unsafe_global_rkey);
330179f2
BVA
1737 }
1738
c07d424d 1739 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
330179f2 1740 indirect_hdr->table_desc.key = idb_rkey;
8f26c9ff
DD
1741 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1742 indirect_hdr->len = cpu_to_be32(state.total_len);
1743
1744 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
c07d424d 1745 cmd->data_out_desc_cnt = count;
8f26c9ff 1746 else
c07d424d
DD
1747 cmd->data_in_desc_cnt = count;
1748
1749 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1750 DMA_TO_DEVICE);
8f26c9ff
DD
1751
1752map_complete:
aef9ec39
RD
1753 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1754 cmd->buf_fmt = fmt << 4;
1755 else
1756 cmd->buf_fmt = fmt;
1757
aef9ec39 1758 return len;
e012f363
BVA
1759
1760unmap:
1761 srp_unmap_data(scmnd, ch, req);
ffc548bb
BVA
1762 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1763 ret = -E2BIG;
e012f363 1764 return ret;
aef9ec39
RD
1765}
1766
76c75b25
BVA
1767/*
1768 * Return an IU and possible credit to the free pool
1769 */
509c07bc 1770static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
76c75b25
BVA
1771 enum srp_iu_type iu_type)
1772{
1773 unsigned long flags;
1774
509c07bc
BVA
1775 spin_lock_irqsave(&ch->lock, flags);
1776 list_add(&iu->list, &ch->free_tx);
76c75b25 1777 if (iu_type != SRP_IU_RSP)
509c07bc
BVA
1778 ++ch->req_lim;
1779 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25
BVA
1780}
1781
05a1d750 1782/*
509c07bc 1783 * Must be called with ch->lock held to protect req_lim and free_tx.
e9684678 1784 * If IU is not sent, it must be returned using srp_put_tx_iu().
05a1d750
DD
1785 *
1786 * Note:
1787 * An upper limit for the number of allocated information units for each
1788 * request type is:
1789 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1790 * more than Scsi_Host.can_queue requests.
1791 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1792 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1793 * one unanswered SRP request to an initiator.
1794 */
509c07bc 1795static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
05a1d750
DD
1796 enum srp_iu_type iu_type)
1797{
509c07bc 1798 struct srp_target_port *target = ch->target;
05a1d750
DD
1799 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1800 struct srp_iu *iu;
1801
1dc7b1f1 1802 ib_process_cq_direct(ch->send_cq, -1);
05a1d750 1803
509c07bc 1804 if (list_empty(&ch->free_tx))
05a1d750
DD
1805 return NULL;
1806
1807 /* Initiator responses to target requests do not consume credits */
76c75b25 1808 if (iu_type != SRP_IU_RSP) {
509c07bc 1809 if (ch->req_lim <= rsv) {
76c75b25
BVA
1810 ++target->zero_req_lim;
1811 return NULL;
1812 }
1813
509c07bc 1814 --ch->req_lim;
05a1d750
DD
1815 }
1816
509c07bc 1817 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
76c75b25 1818 list_del(&iu->list);
05a1d750
DD
1819 return iu;
1820}
1821
1dc7b1f1
CH
1822static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1823{
1824 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1825 struct srp_rdma_ch *ch = cq->cq_context;
1826
1827 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1828 srp_handle_qp_err(cq, wc, "SEND");
1829 return;
1830 }
1831
1832 list_add(&iu->list, &ch->free_tx);
1833}
1834
509c07bc 1835static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
05a1d750 1836{
509c07bc 1837 struct srp_target_port *target = ch->target;
05a1d750
DD
1838 struct ib_sge list;
1839 struct ib_send_wr wr, *bad_wr;
05a1d750
DD
1840
1841 list.addr = iu->dma;
1842 list.length = len;
9af76271 1843 list.lkey = target->lkey;
05a1d750 1844
1dc7b1f1
CH
1845 iu->cqe.done = srp_send_done;
1846
05a1d750 1847 wr.next = NULL;
1dc7b1f1 1848 wr.wr_cqe = &iu->cqe;
05a1d750
DD
1849 wr.sg_list = &list;
1850 wr.num_sge = 1;
1851 wr.opcode = IB_WR_SEND;
1852 wr.send_flags = IB_SEND_SIGNALED;
1853
509c07bc 1854 return ib_post_send(ch->qp, &wr, &bad_wr);
05a1d750
DD
1855}
1856
509c07bc 1857static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
c996bb47 1858{
509c07bc 1859 struct srp_target_port *target = ch->target;
c996bb47 1860 struct ib_recv_wr wr, *bad_wr;
dcb4cb85 1861 struct ib_sge list;
c996bb47
BVA
1862
1863 list.addr = iu->dma;
1864 list.length = iu->size;
9af76271 1865 list.lkey = target->lkey;
c996bb47 1866
1dc7b1f1
CH
1867 iu->cqe.done = srp_recv_done;
1868
c996bb47 1869 wr.next = NULL;
1dc7b1f1 1870 wr.wr_cqe = &iu->cqe;
c996bb47
BVA
1871 wr.sg_list = &list;
1872 wr.num_sge = 1;
1873
509c07bc 1874 return ib_post_recv(ch->qp, &wr, &bad_wr);
c996bb47
BVA
1875}
1876
509c07bc 1877static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
aef9ec39 1878{
509c07bc 1879 struct srp_target_port *target = ch->target;
aef9ec39
RD
1880 struct srp_request *req;
1881 struct scsi_cmnd *scmnd;
1882 unsigned long flags;
aef9ec39 1883
aef9ec39 1884 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
509c07bc
BVA
1885 spin_lock_irqsave(&ch->lock, flags);
1886 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1887 spin_unlock_irqrestore(&ch->lock, flags);
94a9174c 1888
509c07bc 1889 ch->tsk_mgmt_status = -1;
f8b6e31e 1890 if (be32_to_cpu(rsp->resp_data_len) >= 4)
509c07bc
BVA
1891 ch->tsk_mgmt_status = rsp->data[3];
1892 complete(&ch->tsk_mgmt_done);
aef9ec39 1893 } else {
77f2c1a4
BVA
1894 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1895 if (scmnd) {
1896 req = (void *)scmnd->host_scribble;
1897 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1898 }
22032991 1899 if (!scmnd) {
7aa54bd7 1900 shost_printk(KERN_ERR, target->scsi_host,
d92c0da7
BVA
1901 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1902 rsp->tag, ch - target->ch, ch->qp->qp_num);
22032991 1903
509c07bc
BVA
1904 spin_lock_irqsave(&ch->lock, flags);
1905 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1906 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1907
1908 return;
1909 }
aef9ec39
RD
1910 scmnd->result = rsp->status;
1911
1912 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1913 memcpy(scmnd->sense_buffer, rsp->data +
1914 be32_to_cpu(rsp->resp_data_len),
1915 min_t(int, be32_to_cpu(rsp->sense_data_len),
1916 SCSI_SENSE_BUFFERSIZE));
1917 }
1918
e714531a 1919 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
bb350d1d 1920 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
e714531a
BVA
1921 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1922 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1923 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1924 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1925 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1926 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
aef9ec39 1927
509c07bc 1928 srp_free_req(ch, req, scmnd,
22032991
BVA
1929 be32_to_cpu(rsp->req_lim_delta));
1930
f8b6e31e
DD
1931 scmnd->host_scribble = NULL;
1932 scmnd->scsi_done(scmnd);
aef9ec39 1933 }
aef9ec39
RD
1934}
1935
509c07bc 1936static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
bb12588a
DD
1937 void *rsp, int len)
1938{
509c07bc 1939 struct srp_target_port *target = ch->target;
76c75b25 1940 struct ib_device *dev = target->srp_host->srp_dev->dev;
bb12588a
DD
1941 unsigned long flags;
1942 struct srp_iu *iu;
76c75b25 1943 int err;
bb12588a 1944
509c07bc
BVA
1945 spin_lock_irqsave(&ch->lock, flags);
1946 ch->req_lim += req_delta;
1947 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1948 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25 1949
bb12588a
DD
1950 if (!iu) {
1951 shost_printk(KERN_ERR, target->scsi_host, PFX
1952 "no IU available to send response\n");
76c75b25 1953 return 1;
bb12588a
DD
1954 }
1955
1956 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1957 memcpy(iu->buf, rsp, len);
1958 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1959
509c07bc 1960 err = srp_post_send(ch, iu, len);
76c75b25 1961 if (err) {
bb12588a
DD
1962 shost_printk(KERN_ERR, target->scsi_host, PFX
1963 "unable to post response: %d\n", err);
509c07bc 1964 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
76c75b25 1965 }
bb12588a 1966
bb12588a
DD
1967 return err;
1968}
1969
509c07bc 1970static void srp_process_cred_req(struct srp_rdma_ch *ch,
bb12588a
DD
1971 struct srp_cred_req *req)
1972{
1973 struct srp_cred_rsp rsp = {
1974 .opcode = SRP_CRED_RSP,
1975 .tag = req->tag,
1976 };
1977 s32 delta = be32_to_cpu(req->req_lim_delta);
1978
509c07bc
BVA
1979 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1980 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
bb12588a
DD
1981 "problems processing SRP_CRED_REQ\n");
1982}
1983
509c07bc 1984static void srp_process_aer_req(struct srp_rdma_ch *ch,
bb12588a
DD
1985 struct srp_aer_req *req)
1986{
509c07bc 1987 struct srp_target_port *target = ch->target;
bb12588a
DD
1988 struct srp_aer_rsp rsp = {
1989 .opcode = SRP_AER_RSP,
1990 .tag = req->tag,
1991 };
1992 s32 delta = be32_to_cpu(req->req_lim_delta);
1993
1994 shost_printk(KERN_ERR, target->scsi_host, PFX
985aa495 1995 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
bb12588a 1996
509c07bc 1997 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
bb12588a
DD
1998 shost_printk(KERN_ERR, target->scsi_host, PFX
1999 "problems processing SRP_AER_REQ\n");
2000}
2001
1dc7b1f1 2002static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
aef9ec39 2003{
1dc7b1f1
CH
2004 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2005 struct srp_rdma_ch *ch = cq->cq_context;
509c07bc 2006 struct srp_target_port *target = ch->target;
dcb4cb85 2007 struct ib_device *dev = target->srp_host->srp_dev->dev;
c996bb47 2008 int res;
aef9ec39
RD
2009 u8 opcode;
2010
1dc7b1f1
CH
2011 if (unlikely(wc->status != IB_WC_SUCCESS)) {
2012 srp_handle_qp_err(cq, wc, "RECV");
2013 return;
2014 }
2015
509c07bc 2016 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 2017 DMA_FROM_DEVICE);
aef9ec39
RD
2018
2019 opcode = *(u8 *) iu->buf;
2020
2021 if (0) {
7aa54bd7
DD
2022 shost_printk(KERN_ERR, target->scsi_host,
2023 PFX "recv completion, opcode 0x%02x\n", opcode);
7a700811
BVA
2024 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2025 iu->buf, wc->byte_len, true);
aef9ec39
RD
2026 }
2027
2028 switch (opcode) {
2029 case SRP_RSP:
509c07bc 2030 srp_process_rsp(ch, iu->buf);
aef9ec39
RD
2031 break;
2032
bb12588a 2033 case SRP_CRED_REQ:
509c07bc 2034 srp_process_cred_req(ch, iu->buf);
bb12588a
DD
2035 break;
2036
2037 case SRP_AER_REQ:
509c07bc 2038 srp_process_aer_req(ch, iu->buf);
bb12588a
DD
2039 break;
2040
aef9ec39
RD
2041 case SRP_T_LOGOUT:
2042 /* XXX Handle target logout */
7aa54bd7
DD
2043 shost_printk(KERN_WARNING, target->scsi_host,
2044 PFX "Got target logout request\n");
aef9ec39
RD
2045 break;
2046
2047 default:
7aa54bd7
DD
2048 shost_printk(KERN_WARNING, target->scsi_host,
2049 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
aef9ec39
RD
2050 break;
2051 }
2052
509c07bc 2053 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 2054 DMA_FROM_DEVICE);
c996bb47 2055
509c07bc 2056 res = srp_post_recv(ch, iu);
c996bb47
BVA
2057 if (res != 0)
2058 shost_printk(KERN_ERR, target->scsi_host,
2059 PFX "Recv failed with error code %d\n", res);
aef9ec39
RD
2060}
2061
c1120f89
BVA
2062/**
2063 * srp_tl_err_work() - handle a transport layer error
af24663b 2064 * @work: Work structure embedded in an SRP target port.
c1120f89
BVA
2065 *
2066 * Note: This function may get invoked before the rport has been created,
2067 * hence the target->rport test.
2068 */
2069static void srp_tl_err_work(struct work_struct *work)
2070{
2071 struct srp_target_port *target;
2072
2073 target = container_of(work, struct srp_target_port, tl_err_work);
2074 if (target->rport)
2075 srp_start_tl_fail_timers(target->rport);
2076}
2077
1dc7b1f1
CH
2078static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2079 const char *opname)
948d1e88 2080{
1dc7b1f1 2081 struct srp_rdma_ch *ch = cq->cq_context;
7dad6b2e
BVA
2082 struct srp_target_port *target = ch->target;
2083
c014c8cd 2084 if (ch->connected && !target->qp_in_error) {
1dc7b1f1
CH
2085 shost_printk(KERN_ERR, target->scsi_host,
2086 PFX "failed %s status %s (%d) for CQE %p\n",
2087 opname, ib_wc_status_msg(wc->status), wc->status,
2088 wc->wr_cqe);
c1120f89 2089 queue_work(system_long_wq, &target->tl_err_work);
4f0af697 2090 }
948d1e88
BVA
2091 target->qp_in_error = true;
2092}
2093
76c75b25 2094static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
aef9ec39 2095{
76c75b25 2096 struct srp_target_port *target = host_to_target(shost);
a95cadb9 2097 struct srp_rport *rport = target->rport;
509c07bc 2098 struct srp_rdma_ch *ch;
aef9ec39
RD
2099 struct srp_request *req;
2100 struct srp_iu *iu;
2101 struct srp_cmd *cmd;
85507bcc 2102 struct ib_device *dev;
76c75b25 2103 unsigned long flags;
77f2c1a4
BVA
2104 u32 tag;
2105 u16 idx;
d1b4289e 2106 int len, ret;
a95cadb9
BVA
2107 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2108
2109 /*
2110 * The SCSI EH thread is the only context from which srp_queuecommand()
2111 * can get invoked for blocked devices (SDEV_BLOCK /
2112 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2113 * locking the rport mutex if invoked from inside the SCSI EH.
2114 */
2115 if (in_scsi_eh)
2116 mutex_lock(&rport->mutex);
aef9ec39 2117
d1b4289e
BVA
2118 scmnd->result = srp_chkready(target->rport);
2119 if (unlikely(scmnd->result))
2120 goto err;
2ce19e72 2121
77f2c1a4
BVA
2122 WARN_ON_ONCE(scmnd->request->tag < 0);
2123 tag = blk_mq_unique_tag(scmnd->request);
d92c0da7 2124 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
77f2c1a4
BVA
2125 idx = blk_mq_unique_tag_to_tag(tag);
2126 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2127 dev_name(&shost->shost_gendev), tag, idx,
2128 target->req_ring_size);
509c07bc
BVA
2129
2130 spin_lock_irqsave(&ch->lock, flags);
2131 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
509c07bc 2132 spin_unlock_irqrestore(&ch->lock, flags);
aef9ec39 2133
77f2c1a4
BVA
2134 if (!iu)
2135 goto err;
2136
2137 req = &ch->req_ring[idx];
05321937 2138 dev = target->srp_host->srp_dev->dev;
49248644 2139 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
85507bcc 2140 DMA_TO_DEVICE);
aef9ec39 2141
f8b6e31e 2142 scmnd->host_scribble = (void *) req;
aef9ec39
RD
2143
2144 cmd = iu->buf;
2145 memset(cmd, 0, sizeof *cmd);
2146
2147 cmd->opcode = SRP_CMD;
985aa495 2148 int_to_scsilun(scmnd->device->lun, &cmd->lun);
77f2c1a4 2149 cmd->tag = tag;
aef9ec39
RD
2150 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2151
aef9ec39
RD
2152 req->scmnd = scmnd;
2153 req->cmd = iu;
aef9ec39 2154
509c07bc 2155 len = srp_map_data(scmnd, ch, req);
aef9ec39 2156 if (len < 0) {
7aa54bd7 2157 shost_printk(KERN_ERR, target->scsi_host,
d1b4289e
BVA
2158 PFX "Failed to map data (%d)\n", len);
2159 /*
2160 * If we ran out of memory descriptors (-ENOMEM) because an
2161 * application is queuing many requests with more than
52ede08f 2162 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
d1b4289e
BVA
2163 * to reduce queue depth temporarily.
2164 */
2165 scmnd->result = len == -ENOMEM ?
2166 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
76c75b25 2167 goto err_iu;
aef9ec39
RD
2168 }
2169
49248644 2170 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
85507bcc 2171 DMA_TO_DEVICE);
aef9ec39 2172
509c07bc 2173 if (srp_post_send(ch, iu, len)) {
7aa54bd7 2174 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
aef9ec39
RD
2175 goto err_unmap;
2176 }
2177
d1b4289e
BVA
2178 ret = 0;
2179
a95cadb9
BVA
2180unlock_rport:
2181 if (in_scsi_eh)
2182 mutex_unlock(&rport->mutex);
2183
d1b4289e 2184 return ret;
aef9ec39
RD
2185
2186err_unmap:
509c07bc 2187 srp_unmap_data(scmnd, ch, req);
aef9ec39 2188
76c75b25 2189err_iu:
509c07bc 2190 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
76c75b25 2191
024ca901
BVA
2192 /*
2193 * Avoid that the loops that iterate over the request ring can
2194 * encounter a dangling SCSI command pointer.
2195 */
2196 req->scmnd = NULL;
2197
d1b4289e
BVA
2198err:
2199 if (scmnd->result) {
2200 scmnd->scsi_done(scmnd);
2201 ret = 0;
2202 } else {
2203 ret = SCSI_MLQUEUE_HOST_BUSY;
2204 }
a95cadb9 2205
d1b4289e 2206 goto unlock_rport;
aef9ec39
RD
2207}
2208
4d73f95f
BVA
2209/*
2210 * Note: the resources allocated in this function are freed in
509c07bc 2211 * srp_free_ch_ib().
4d73f95f 2212 */
509c07bc 2213static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
aef9ec39 2214{
509c07bc 2215 struct srp_target_port *target = ch->target;
aef9ec39
RD
2216 int i;
2217
509c07bc
BVA
2218 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2219 GFP_KERNEL);
2220 if (!ch->rx_ring)
4d73f95f 2221 goto err_no_ring;
509c07bc
BVA
2222 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2223 GFP_KERNEL);
2224 if (!ch->tx_ring)
4d73f95f
BVA
2225 goto err_no_ring;
2226
2227 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2228 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2229 ch->max_ti_iu_len,
2230 GFP_KERNEL, DMA_FROM_DEVICE);
2231 if (!ch->rx_ring[i])
aef9ec39
RD
2232 goto err;
2233 }
2234
4d73f95f 2235 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2236 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2237 target->max_iu_len,
2238 GFP_KERNEL, DMA_TO_DEVICE);
2239 if (!ch->tx_ring[i])
aef9ec39 2240 goto err;
dcb4cb85 2241
509c07bc 2242 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
aef9ec39
RD
2243 }
2244
2245 return 0;
2246
2247err:
4d73f95f 2248 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2249 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2250 srp_free_iu(target->srp_host, ch->tx_ring[i]);
aef9ec39
RD
2251 }
2252
4d73f95f
BVA
2253
2254err_no_ring:
509c07bc
BVA
2255 kfree(ch->tx_ring);
2256 ch->tx_ring = NULL;
2257 kfree(ch->rx_ring);
2258 ch->rx_ring = NULL;
4d73f95f 2259
aef9ec39
RD
2260 return -ENOMEM;
2261}
2262
c9b03c1a
BVA
2263static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2264{
2265 uint64_t T_tr_ns, max_compl_time_ms;
2266 uint32_t rq_tmo_jiffies;
2267
2268 /*
2269 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2270 * table 91), both the QP timeout and the retry count have to be set
2271 * for RC QP's during the RTR to RTS transition.
2272 */
2273 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2274 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2275
2276 /*
2277 * Set target->rq_tmo_jiffies to one second more than the largest time
2278 * it can take before an error completion is generated. See also
2279 * C9-140..142 in the IBTA spec for more information about how to
2280 * convert the QP Local ACK Timeout value to nanoseconds.
2281 */
2282 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2283 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2284 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2285 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2286
2287 return rq_tmo_jiffies;
2288}
2289
961e0be8 2290static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
e6300cbd 2291 const struct srp_login_rsp *lrsp,
509c07bc 2292 struct srp_rdma_ch *ch)
961e0be8 2293{
509c07bc 2294 struct srp_target_port *target = ch->target;
961e0be8
DD
2295 struct ib_qp_attr *qp_attr = NULL;
2296 int attr_mask = 0;
2297 int ret;
2298 int i;
2299
2300 if (lrsp->opcode == SRP_LOGIN_RSP) {
509c07bc
BVA
2301 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2302 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
961e0be8
DD
2303
2304 /*
2305 * Reserve credits for task management so we don't
2306 * bounce requests back to the SCSI mid-layer.
2307 */
2308 target->scsi_host->can_queue
509c07bc 2309 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
961e0be8 2310 target->scsi_host->can_queue);
4d73f95f
BVA
2311 target->scsi_host->cmd_per_lun
2312 = min_t(int, target->scsi_host->can_queue,
2313 target->scsi_host->cmd_per_lun);
961e0be8
DD
2314 } else {
2315 shost_printk(KERN_WARNING, target->scsi_host,
2316 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2317 ret = -ECONNRESET;
2318 goto error;
2319 }
2320
509c07bc
BVA
2321 if (!ch->rx_ring) {
2322 ret = srp_alloc_iu_bufs(ch);
961e0be8
DD
2323 if (ret)
2324 goto error;
2325 }
2326
2327 ret = -ENOMEM;
2328 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2329 if (!qp_attr)
2330 goto error;
2331
2332 qp_attr->qp_state = IB_QPS_RTR;
2333 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2334 if (ret)
2335 goto error_free;
2336
509c07bc 2337 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
961e0be8
DD
2338 if (ret)
2339 goto error_free;
2340
4d73f95f 2341 for (i = 0; i < target->queue_size; i++) {
509c07bc
BVA
2342 struct srp_iu *iu = ch->rx_ring[i];
2343
2344 ret = srp_post_recv(ch, iu);
961e0be8
DD
2345 if (ret)
2346 goto error_free;
2347 }
2348
2349 qp_attr->qp_state = IB_QPS_RTS;
2350 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2351 if (ret)
2352 goto error_free;
2353
c9b03c1a
BVA
2354 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2355
509c07bc 2356 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
961e0be8
DD
2357 if (ret)
2358 goto error_free;
2359
2360 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2361
2362error_free:
2363 kfree(qp_attr);
2364
2365error:
509c07bc 2366 ch->status = ret;
961e0be8
DD
2367}
2368
aef9ec39
RD
2369static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2370 struct ib_cm_event *event,
509c07bc 2371 struct srp_rdma_ch *ch)
aef9ec39 2372{
509c07bc 2373 struct srp_target_port *target = ch->target;
7aa54bd7 2374 struct Scsi_Host *shost = target->scsi_host;
aef9ec39
RD
2375 struct ib_class_port_info *cpi;
2376 int opcode;
2377
2378 switch (event->param.rej_rcvd.reason) {
2379 case IB_CM_REJ_PORT_CM_REDIRECT:
2380 cpi = event->param.rej_rcvd.ari;
509c07bc
BVA
2381 ch->path.dlid = cpi->redirect_lid;
2382 ch->path.pkey = cpi->redirect_pkey;
aef9ec39 2383 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
509c07bc 2384 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
aef9ec39 2385
509c07bc 2386 ch->status = ch->path.dlid ?
aef9ec39
RD
2387 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2388 break;
2389
2390 case IB_CM_REJ_PORT_REDIRECT:
5d7cbfd6 2391 if (srp_target_is_topspin(target)) {
aef9ec39
RD
2392 /*
2393 * Topspin/Cisco SRP gateways incorrectly send
2394 * reject reason code 25 when they mean 24
2395 * (port redirect).
2396 */
509c07bc 2397 memcpy(ch->path.dgid.raw,
aef9ec39
RD
2398 event->param.rej_rcvd.ari, 16);
2399
7aa54bd7
DD
2400 shost_printk(KERN_DEBUG, shost,
2401 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
509c07bc
BVA
2402 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2403 be64_to_cpu(ch->path.dgid.global.interface_id));
aef9ec39 2404
509c07bc 2405 ch->status = SRP_PORT_REDIRECT;
aef9ec39 2406 } else {
7aa54bd7
DD
2407 shost_printk(KERN_WARNING, shost,
2408 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
509c07bc 2409 ch->status = -ECONNRESET;
aef9ec39
RD
2410 }
2411 break;
2412
2413 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
7aa54bd7
DD
2414 shost_printk(KERN_WARNING, shost,
2415 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
509c07bc 2416 ch->status = -ECONNRESET;
aef9ec39
RD
2417 break;
2418
2419 case IB_CM_REJ_CONSUMER_DEFINED:
2420 opcode = *(u8 *) event->private_data;
2421 if (opcode == SRP_LOGIN_REJ) {
2422 struct srp_login_rej *rej = event->private_data;
2423 u32 reason = be32_to_cpu(rej->reason);
2424
2425 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
7aa54bd7
DD
2426 shost_printk(KERN_WARNING, shost,
2427 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
aef9ec39 2428 else
e7ffde01
BVA
2429 shost_printk(KERN_WARNING, shost, PFX
2430 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
747fe000
BVA
2431 target->sgid.raw,
2432 target->orig_dgid.raw, reason);
aef9ec39 2433 } else
7aa54bd7
DD
2434 shost_printk(KERN_WARNING, shost,
2435 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2436 " opcode 0x%02x\n", opcode);
509c07bc 2437 ch->status = -ECONNRESET;
aef9ec39
RD
2438 break;
2439
9fe4bcf4
DD
2440 case IB_CM_REJ_STALE_CONN:
2441 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
509c07bc 2442 ch->status = SRP_STALE_CONN;
9fe4bcf4
DD
2443 break;
2444
aef9ec39 2445 default:
7aa54bd7
DD
2446 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2447 event->param.rej_rcvd.reason);
509c07bc 2448 ch->status = -ECONNRESET;
aef9ec39
RD
2449 }
2450}
2451
2452static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2453{
509c07bc
BVA
2454 struct srp_rdma_ch *ch = cm_id->context;
2455 struct srp_target_port *target = ch->target;
aef9ec39 2456 int comp = 0;
aef9ec39
RD
2457
2458 switch (event->event) {
2459 case IB_CM_REQ_ERROR:
7aa54bd7
DD
2460 shost_printk(KERN_DEBUG, target->scsi_host,
2461 PFX "Sending CM REQ failed\n");
aef9ec39 2462 comp = 1;
509c07bc 2463 ch->status = -ECONNRESET;
aef9ec39
RD
2464 break;
2465
2466 case IB_CM_REP_RECEIVED:
2467 comp = 1;
509c07bc 2468 srp_cm_rep_handler(cm_id, event->private_data, ch);
aef9ec39
RD
2469 break;
2470
2471 case IB_CM_REJ_RECEIVED:
7aa54bd7 2472 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
aef9ec39
RD
2473 comp = 1;
2474
509c07bc 2475 srp_cm_rej_handler(cm_id, event, ch);
aef9ec39
RD
2476 break;
2477
b7ac4ab4 2478 case IB_CM_DREQ_RECEIVED:
7aa54bd7
DD
2479 shost_printk(KERN_WARNING, target->scsi_host,
2480 PFX "DREQ received - connection closed\n");
c014c8cd 2481 ch->connected = false;
b7ac4ab4 2482 if (ib_send_cm_drep(cm_id, NULL, 0))
7aa54bd7
DD
2483 shost_printk(KERN_ERR, target->scsi_host,
2484 PFX "Sending CM DREP failed\n");
c1120f89 2485 queue_work(system_long_wq, &target->tl_err_work);
aef9ec39
RD
2486 break;
2487
2488 case IB_CM_TIMEWAIT_EXIT:
7aa54bd7
DD
2489 shost_printk(KERN_ERR, target->scsi_host,
2490 PFX "connection closed\n");
ac72d766 2491 comp = 1;
aef9ec39 2492
509c07bc 2493 ch->status = 0;
aef9ec39
RD
2494 break;
2495
b7ac4ab4
IR
2496 case IB_CM_MRA_RECEIVED:
2497 case IB_CM_DREQ_ERROR:
2498 case IB_CM_DREP_RECEIVED:
2499 break;
2500
aef9ec39 2501 default:
7aa54bd7
DD
2502 shost_printk(KERN_WARNING, target->scsi_host,
2503 PFX "Unhandled CM event %d\n", event->event);
aef9ec39
RD
2504 break;
2505 }
2506
2507 if (comp)
509c07bc 2508 complete(&ch->done);
aef9ec39 2509
aef9ec39
RD
2510 return 0;
2511}
2512
71444b97
JW
2513/**
2514 * srp_change_queue_depth - setting device queue depth
2515 * @sdev: scsi device struct
2516 * @qdepth: requested queue depth
71444b97
JW
2517 *
2518 * Returns queue depth.
2519 */
2520static int
db5ed4df 2521srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
71444b97 2522{
c40ecc12 2523 if (!sdev->tagged_supported)
1e6f2416 2524 qdepth = 1;
db5ed4df 2525 return scsi_change_queue_depth(sdev, qdepth);
71444b97
JW
2526}
2527
985aa495
BVA
2528static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2529 u8 func)
aef9ec39 2530{
509c07bc 2531 struct srp_target_port *target = ch->target;
a95cadb9 2532 struct srp_rport *rport = target->rport;
19081f31 2533 struct ib_device *dev = target->srp_host->srp_dev->dev;
aef9ec39
RD
2534 struct srp_iu *iu;
2535 struct srp_tsk_mgmt *tsk_mgmt;
aef9ec39 2536
c014c8cd 2537 if (!ch->connected || target->qp_in_error)
3780d1f0
BVA
2538 return -1;
2539
509c07bc 2540 init_completion(&ch->tsk_mgmt_done);
aef9ec39 2541
a95cadb9 2542 /*
509c07bc 2543 * Lock the rport mutex to avoid that srp_create_ch_ib() is
a95cadb9
BVA
2544 * invoked while a task management function is being sent.
2545 */
2546 mutex_lock(&rport->mutex);
509c07bc
BVA
2547 spin_lock_irq(&ch->lock);
2548 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2549 spin_unlock_irq(&ch->lock);
76c75b25 2550
a95cadb9
BVA
2551 if (!iu) {
2552 mutex_unlock(&rport->mutex);
2553
76c75b25 2554 return -1;
a95cadb9 2555 }
aef9ec39 2556
19081f31
DD
2557 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2558 DMA_TO_DEVICE);
aef9ec39
RD
2559 tsk_mgmt = iu->buf;
2560 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2561
2562 tsk_mgmt->opcode = SRP_TSK_MGMT;
985aa495 2563 int_to_scsilun(lun, &tsk_mgmt->lun);
f8b6e31e 2564 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
aef9ec39 2565 tsk_mgmt->tsk_mgmt_func = func;
f8b6e31e 2566 tsk_mgmt->task_tag = req_tag;
aef9ec39 2567
19081f31
DD
2568 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2569 DMA_TO_DEVICE);
509c07bc
BVA
2570 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2571 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
a95cadb9
BVA
2572 mutex_unlock(&rport->mutex);
2573
76c75b25
BVA
2574 return -1;
2575 }
a95cadb9 2576 mutex_unlock(&rport->mutex);
d945e1df 2577
509c07bc 2578 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
aef9ec39 2579 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
d945e1df 2580 return -1;
aef9ec39 2581
d945e1df 2582 return 0;
d945e1df
RD
2583}
2584
aef9ec39
RD
2585static int srp_abort(struct scsi_cmnd *scmnd)
2586{
d945e1df 2587 struct srp_target_port *target = host_to_target(scmnd->device->host);
f8b6e31e 2588 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
77f2c1a4 2589 u32 tag;
d92c0da7 2590 u16 ch_idx;
509c07bc 2591 struct srp_rdma_ch *ch;
086f44f5 2592 int ret;
d945e1df 2593
7aa54bd7 2594 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
aef9ec39 2595
d92c0da7 2596 if (!req)
99b6697a 2597 return SUCCESS;
77f2c1a4 2598 tag = blk_mq_unique_tag(scmnd->request);
d92c0da7
BVA
2599 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2600 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2601 return SUCCESS;
2602 ch = &target->ch[ch_idx];
2603 if (!srp_claim_req(ch, req, NULL, scmnd))
2604 return SUCCESS;
2605 shost_printk(KERN_ERR, target->scsi_host,
2606 "Sending SRP abort for tag %#x\n", tag);
77f2c1a4 2607 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
80d5e8a2 2608 SRP_TSK_ABORT_TASK) == 0)
086f44f5 2609 ret = SUCCESS;
ed9b2264 2610 else if (target->rport->state == SRP_RPORT_LOST)
99e1c139 2611 ret = FAST_IO_FAIL;
086f44f5
BVA
2612 else
2613 ret = FAILED;
509c07bc 2614 srp_free_req(ch, req, scmnd, 0);
22032991 2615 scmnd->result = DID_ABORT << 16;
d8536670 2616 scmnd->scsi_done(scmnd);
d945e1df 2617
086f44f5 2618 return ret;
aef9ec39
RD
2619}
2620
2621static int srp_reset_device(struct scsi_cmnd *scmnd)
2622{
d945e1df 2623 struct srp_target_port *target = host_to_target(scmnd->device->host);
d92c0da7 2624 struct srp_rdma_ch *ch;
536ae14e 2625 int i;
d945e1df 2626
7aa54bd7 2627 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
aef9ec39 2628
d92c0da7 2629 ch = &target->ch[0];
509c07bc 2630 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
f8b6e31e 2631 SRP_TSK_LUN_RESET))
d945e1df 2632 return FAILED;
509c07bc 2633 if (ch->tsk_mgmt_status)
d945e1df
RD
2634 return FAILED;
2635
d92c0da7
BVA
2636 for (i = 0; i < target->ch_count; i++) {
2637 ch = &target->ch[i];
2638 for (i = 0; i < target->req_ring_size; ++i) {
2639 struct srp_request *req = &ch->req_ring[i];
509c07bc 2640
d92c0da7
BVA
2641 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2642 }
536ae14e 2643 }
d945e1df 2644
d945e1df 2645 return SUCCESS;
aef9ec39
RD
2646}
2647
2648static int srp_reset_host(struct scsi_cmnd *scmnd)
2649{
2650 struct srp_target_port *target = host_to_target(scmnd->device->host);
aef9ec39 2651
7aa54bd7 2652 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
aef9ec39 2653
ed9b2264 2654 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
aef9ec39
RD
2655}
2656
509c5f33
BVA
2657static int srp_slave_alloc(struct scsi_device *sdev)
2658{
2659 struct Scsi_Host *shost = sdev->host;
2660 struct srp_target_port *target = host_to_target(shost);
2661 struct srp_device *srp_dev = target->srp_host->srp_dev;
2662 struct ib_device *ibdev = srp_dev->dev;
2663
2664 if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
2665 blk_queue_virt_boundary(sdev->request_queue,
2666 ~srp_dev->mr_page_mask);
2667
2668 return 0;
2669}
2670
c9b03c1a
BVA
2671static int srp_slave_configure(struct scsi_device *sdev)
2672{
2673 struct Scsi_Host *shost = sdev->host;
2674 struct srp_target_port *target = host_to_target(shost);
2675 struct request_queue *q = sdev->request_queue;
2676 unsigned long timeout;
2677
2678 if (sdev->type == TYPE_DISK) {
2679 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2680 blk_queue_rq_timeout(q, timeout);
2681 }
2682
2683 return 0;
2684}
2685
ee959b00
TJ
2686static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2687 char *buf)
6ecb0c84 2688{
ee959b00 2689 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2690
45c37cad 2691 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
6ecb0c84
RD
2692}
2693
ee959b00
TJ
2694static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2695 char *buf)
6ecb0c84 2696{
ee959b00 2697 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2698
45c37cad 2699 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
6ecb0c84
RD
2700}
2701
ee959b00
TJ
2702static ssize_t show_service_id(struct device *dev,
2703 struct device_attribute *attr, char *buf)
6ecb0c84 2704{
ee959b00 2705 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2706
45c37cad 2707 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
6ecb0c84
RD
2708}
2709
ee959b00
TJ
2710static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2711 char *buf)
6ecb0c84 2712{
ee959b00 2713 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2714
747fe000 2715 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
6ecb0c84
RD
2716}
2717
848b3082
BVA
2718static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2719 char *buf)
2720{
2721 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2722
747fe000 2723 return sprintf(buf, "%pI6\n", target->sgid.raw);
848b3082
BVA
2724}
2725
ee959b00
TJ
2726static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2727 char *buf)
6ecb0c84 2728{
ee959b00 2729 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7 2730 struct srp_rdma_ch *ch = &target->ch[0];
6ecb0c84 2731
509c07bc 2732 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
6ecb0c84
RD
2733}
2734
ee959b00
TJ
2735static ssize_t show_orig_dgid(struct device *dev,
2736 struct device_attribute *attr, char *buf)
3633b3d0 2737{
ee959b00 2738 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3633b3d0 2739
747fe000 2740 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
3633b3d0
IR
2741}
2742
89de7486
BVA
2743static ssize_t show_req_lim(struct device *dev,
2744 struct device_attribute *attr, char *buf)
2745{
2746 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7
BVA
2747 struct srp_rdma_ch *ch;
2748 int i, req_lim = INT_MAX;
89de7486 2749
d92c0da7
BVA
2750 for (i = 0; i < target->ch_count; i++) {
2751 ch = &target->ch[i];
2752 req_lim = min(req_lim, ch->req_lim);
2753 }
2754 return sprintf(buf, "%d\n", req_lim);
89de7486
BVA
2755}
2756
ee959b00
TJ
2757static ssize_t show_zero_req_lim(struct device *dev,
2758 struct device_attribute *attr, char *buf)
6bfa24fa 2759{
ee959b00 2760 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6bfa24fa 2761
6bfa24fa
RD
2762 return sprintf(buf, "%d\n", target->zero_req_lim);
2763}
2764
ee959b00
TJ
2765static ssize_t show_local_ib_port(struct device *dev,
2766 struct device_attribute *attr, char *buf)
ded7f1a1 2767{
ee959b00 2768 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1
IR
2769
2770 return sprintf(buf, "%d\n", target->srp_host->port);
2771}
2772
ee959b00
TJ
2773static ssize_t show_local_ib_device(struct device *dev,
2774 struct device_attribute *attr, char *buf)
ded7f1a1 2775{
ee959b00 2776 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1 2777
05321937 2778 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
ded7f1a1
IR
2779}
2780
d92c0da7
BVA
2781static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2782 char *buf)
2783{
2784 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2785
2786 return sprintf(buf, "%d\n", target->ch_count);
2787}
2788
4b5e5f41
BVA
2789static ssize_t show_comp_vector(struct device *dev,
2790 struct device_attribute *attr, char *buf)
2791{
2792 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2793
2794 return sprintf(buf, "%d\n", target->comp_vector);
2795}
2796
7bb312e4
VP
2797static ssize_t show_tl_retry_count(struct device *dev,
2798 struct device_attribute *attr, char *buf)
2799{
2800 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2801
2802 return sprintf(buf, "%d\n", target->tl_retry_count);
2803}
2804
49248644
DD
2805static ssize_t show_cmd_sg_entries(struct device *dev,
2806 struct device_attribute *attr, char *buf)
2807{
2808 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2809
2810 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2811}
2812
c07d424d
DD
2813static ssize_t show_allow_ext_sg(struct device *dev,
2814 struct device_attribute *attr, char *buf)
2815{
2816 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2817
2818 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2819}
2820
ee959b00
TJ
2821static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2822static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2823static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2824static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
848b3082 2825static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
ee959b00
TJ
2826static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2827static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
89de7486 2828static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
ee959b00
TJ
2829static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2830static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2831static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
d92c0da7 2832static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
4b5e5f41 2833static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
7bb312e4 2834static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
49248644 2835static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
c07d424d 2836static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
ee959b00
TJ
2837
2838static struct device_attribute *srp_host_attrs[] = {
2839 &dev_attr_id_ext,
2840 &dev_attr_ioc_guid,
2841 &dev_attr_service_id,
2842 &dev_attr_pkey,
848b3082 2843 &dev_attr_sgid,
ee959b00
TJ
2844 &dev_attr_dgid,
2845 &dev_attr_orig_dgid,
89de7486 2846 &dev_attr_req_lim,
ee959b00
TJ
2847 &dev_attr_zero_req_lim,
2848 &dev_attr_local_ib_port,
2849 &dev_attr_local_ib_device,
d92c0da7 2850 &dev_attr_ch_count,
4b5e5f41 2851 &dev_attr_comp_vector,
7bb312e4 2852 &dev_attr_tl_retry_count,
49248644 2853 &dev_attr_cmd_sg_entries,
c07d424d 2854 &dev_attr_allow_ext_sg,
6ecb0c84
RD
2855 NULL
2856};
2857
aef9ec39
RD
2858static struct scsi_host_template srp_template = {
2859 .module = THIS_MODULE,
b7f008fd
RD
2860 .name = "InfiniBand SRP initiator",
2861 .proc_name = DRV_NAME,
509c5f33 2862 .slave_alloc = srp_slave_alloc,
c9b03c1a 2863 .slave_configure = srp_slave_configure,
aef9ec39
RD
2864 .info = srp_target_info,
2865 .queuecommand = srp_queuecommand,
71444b97 2866 .change_queue_depth = srp_change_queue_depth,
b6a05c82 2867 .eh_timed_out = srp_timed_out,
aef9ec39
RD
2868 .eh_abort_handler = srp_abort,
2869 .eh_device_reset_handler = srp_reset_device,
2870 .eh_host_reset_handler = srp_reset_host,
2742c1da 2871 .skip_settle_delay = true,
49248644 2872 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
4d73f95f 2873 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
aef9ec39 2874 .this_id = -1,
4d73f95f 2875 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
6ecb0c84 2876 .use_clustering = ENABLE_CLUSTERING,
77f2c1a4 2877 .shost_attrs = srp_host_attrs,
c40ecc12 2878 .track_queue_depth = 1,
aef9ec39
RD
2879};
2880
34aa654e
BVA
2881static int srp_sdev_count(struct Scsi_Host *host)
2882{
2883 struct scsi_device *sdev;
2884 int c = 0;
2885
2886 shost_for_each_device(sdev, host)
2887 c++;
2888
2889 return c;
2890}
2891
bc44bd1d
BVA
2892/*
2893 * Return values:
2894 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2895 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2896 * removal has been scheduled.
2897 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2898 */
aef9ec39
RD
2899static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2900{
3236822b
FT
2901 struct srp_rport_identifiers ids;
2902 struct srp_rport *rport;
2903
34aa654e 2904 target->state = SRP_TARGET_SCANNING;
aef9ec39 2905 sprintf(target->target_name, "SRP.T10:%016llX",
45c37cad 2906 be64_to_cpu(target->id_ext));
aef9ec39 2907
05321937 2908 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
aef9ec39
RD
2909 return -ENODEV;
2910
3236822b
FT
2911 memcpy(ids.port_id, &target->id_ext, 8);
2912 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
aebd5e47 2913 ids.roles = SRP_RPORT_ROLE_TARGET;
3236822b
FT
2914 rport = srp_rport_add(target->scsi_host, &ids);
2915 if (IS_ERR(rport)) {
2916 scsi_remove_host(target->scsi_host);
2917 return PTR_ERR(rport);
2918 }
2919
dc1bdbd9 2920 rport->lld_data = target;
9dd69a60 2921 target->rport = rport;
dc1bdbd9 2922
b3589fd4 2923 spin_lock(&host->target_lock);
aef9ec39 2924 list_add_tail(&target->list, &host->target_list);
b3589fd4 2925 spin_unlock(&host->target_lock);
aef9ec39 2926
aef9ec39 2927 scsi_scan_target(&target->scsi_host->shost_gendev,
1d645088 2928 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
aef9ec39 2929
c014c8cd
BVA
2930 if (srp_connected_ch(target) < target->ch_count ||
2931 target->qp_in_error) {
34aa654e
BVA
2932 shost_printk(KERN_INFO, target->scsi_host,
2933 PFX "SCSI scan failed - removing SCSI host\n");
2934 srp_queue_remove_work(target);
2935 goto out;
2936 }
2937
cf1acab7 2938 pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
34aa654e
BVA
2939 dev_name(&target->scsi_host->shost_gendev),
2940 srp_sdev_count(target->scsi_host));
2941
2942 spin_lock_irq(&target->lock);
2943 if (target->state == SRP_TARGET_SCANNING)
2944 target->state = SRP_TARGET_LIVE;
2945 spin_unlock_irq(&target->lock);
2946
2947out:
aef9ec39
RD
2948 return 0;
2949}
2950
ee959b00 2951static void srp_release_dev(struct device *dev)
aef9ec39
RD
2952{
2953 struct srp_host *host =
ee959b00 2954 container_of(dev, struct srp_host, dev);
aef9ec39
RD
2955
2956 complete(&host->released);
2957}
2958
2959static struct class srp_class = {
2960 .name = "infiniband_srp",
ee959b00 2961 .dev_release = srp_release_dev
aef9ec39
RD
2962};
2963
96fc248a
BVA
2964/**
2965 * srp_conn_unique() - check whether the connection to a target is unique
af24663b
BVA
2966 * @host: SRP host.
2967 * @target: SRP target port.
96fc248a
BVA
2968 */
2969static bool srp_conn_unique(struct srp_host *host,
2970 struct srp_target_port *target)
2971{
2972 struct srp_target_port *t;
2973 bool ret = false;
2974
2975 if (target->state == SRP_TARGET_REMOVED)
2976 goto out;
2977
2978 ret = true;
2979
2980 spin_lock(&host->target_lock);
2981 list_for_each_entry(t, &host->target_list, list) {
2982 if (t != target &&
2983 target->id_ext == t->id_ext &&
2984 target->ioc_guid == t->ioc_guid &&
2985 target->initiator_ext == t->initiator_ext) {
2986 ret = false;
2987 break;
2988 }
2989 }
2990 spin_unlock(&host->target_lock);
2991
2992out:
2993 return ret;
2994}
2995
aef9ec39
RD
2996/*
2997 * Target ports are added by writing
2998 *
2999 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
3000 * pkey=<P_Key>,service_id=<service ID>
3001 *
3002 * to the add_target sysfs attribute.
3003 */
3004enum {
3005 SRP_OPT_ERR = 0,
3006 SRP_OPT_ID_EXT = 1 << 0,
3007 SRP_OPT_IOC_GUID = 1 << 1,
3008 SRP_OPT_DGID = 1 << 2,
3009 SRP_OPT_PKEY = 1 << 3,
3010 SRP_OPT_SERVICE_ID = 1 << 4,
3011 SRP_OPT_MAX_SECT = 1 << 5,
52fb2b50 3012 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
0c0450db 3013 SRP_OPT_IO_CLASS = 1 << 7,
01cb9bcb 3014 SRP_OPT_INITIATOR_EXT = 1 << 8,
49248644 3015 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
c07d424d
DD
3016 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
3017 SRP_OPT_SG_TABLESIZE = 1 << 11,
4b5e5f41 3018 SRP_OPT_COMP_VECTOR = 1 << 12,
7bb312e4 3019 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
4d73f95f 3020 SRP_OPT_QUEUE_SIZE = 1 << 14,
aef9ec39
RD
3021 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
3022 SRP_OPT_IOC_GUID |
3023 SRP_OPT_DGID |
3024 SRP_OPT_PKEY |
3025 SRP_OPT_SERVICE_ID),
3026};
3027
a447c093 3028static const match_table_t srp_opt_tokens = {
52fb2b50
VP
3029 { SRP_OPT_ID_EXT, "id_ext=%s" },
3030 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
3031 { SRP_OPT_DGID, "dgid=%s" },
3032 { SRP_OPT_PKEY, "pkey=%x" },
3033 { SRP_OPT_SERVICE_ID, "service_id=%s" },
3034 { SRP_OPT_MAX_SECT, "max_sect=%d" },
3035 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
0c0450db 3036 { SRP_OPT_IO_CLASS, "io_class=%x" },
01cb9bcb 3037 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
49248644 3038 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
c07d424d
DD
3039 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
3040 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
4b5e5f41 3041 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
7bb312e4 3042 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
4d73f95f 3043 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
52fb2b50 3044 { SRP_OPT_ERR, NULL }
aef9ec39
RD
3045};
3046
3047static int srp_parse_options(const char *buf, struct srp_target_port *target)
3048{
3049 char *options, *sep_opt;
3050 char *p;
3051 char dgid[3];
3052 substring_t args[MAX_OPT_ARGS];
3053 int opt_mask = 0;
3054 int token;
3055 int ret = -EINVAL;
3056 int i;
3057
3058 options = kstrdup(buf, GFP_KERNEL);
3059 if (!options)
3060 return -ENOMEM;
3061
3062 sep_opt = options;
7dcf9c19 3063 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
aef9ec39
RD
3064 if (!*p)
3065 continue;
3066
3067 token = match_token(p, srp_opt_tokens, args);
3068 opt_mask |= token;
3069
3070 switch (token) {
3071 case SRP_OPT_ID_EXT:
3072 p = match_strdup(args);
a20f3a6d
IR
3073 if (!p) {
3074 ret = -ENOMEM;
3075 goto out;
3076 }
aef9ec39
RD
3077 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3078 kfree(p);
3079 break;
3080
3081 case SRP_OPT_IOC_GUID:
3082 p = match_strdup(args);
a20f3a6d
IR
3083 if (!p) {
3084 ret = -ENOMEM;
3085 goto out;
3086 }
aef9ec39
RD
3087 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
3088 kfree(p);
3089 break;
3090
3091 case SRP_OPT_DGID:
3092 p = match_strdup(args);
a20f3a6d
IR
3093 if (!p) {
3094 ret = -ENOMEM;
3095 goto out;
3096 }
aef9ec39 3097 if (strlen(p) != 32) {
e0bda7d8 3098 pr_warn("bad dest GID parameter '%s'\n", p);
ce1823f0 3099 kfree(p);
aef9ec39
RD
3100 goto out;
3101 }
3102
3103 for (i = 0; i < 16; ++i) {
747fe000
BVA
3104 strlcpy(dgid, p + i * 2, sizeof(dgid));
3105 if (sscanf(dgid, "%hhx",
3106 &target->orig_dgid.raw[i]) < 1) {
3107 ret = -EINVAL;
3108 kfree(p);
3109 goto out;
3110 }
aef9ec39 3111 }
bf17c1c7 3112 kfree(p);
aef9ec39
RD
3113 break;
3114
3115 case SRP_OPT_PKEY:
3116 if (match_hex(args, &token)) {
e0bda7d8 3117 pr_warn("bad P_Key parameter '%s'\n", p);
aef9ec39
RD
3118 goto out;
3119 }
747fe000 3120 target->pkey = cpu_to_be16(token);
aef9ec39
RD
3121 break;
3122
3123 case SRP_OPT_SERVICE_ID:
3124 p = match_strdup(args);
a20f3a6d
IR
3125 if (!p) {
3126 ret = -ENOMEM;
3127 goto out;
3128 }
aef9ec39
RD
3129 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3130 kfree(p);
3131 break;
3132
3133 case SRP_OPT_MAX_SECT:
3134 if (match_int(args, &token)) {
e0bda7d8 3135 pr_warn("bad max sect parameter '%s'\n", p);
aef9ec39
RD
3136 goto out;
3137 }
3138 target->scsi_host->max_sectors = token;
3139 break;
3140
4d73f95f
BVA
3141 case SRP_OPT_QUEUE_SIZE:
3142 if (match_int(args, &token) || token < 1) {
3143 pr_warn("bad queue_size parameter '%s'\n", p);
3144 goto out;
3145 }
3146 target->scsi_host->can_queue = token;
3147 target->queue_size = token + SRP_RSP_SQ_SIZE +
3148 SRP_TSK_MGMT_SQ_SIZE;
3149 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3150 target->scsi_host->cmd_per_lun = token;
3151 break;
3152
52fb2b50 3153 case SRP_OPT_MAX_CMD_PER_LUN:
4d73f95f 3154 if (match_int(args, &token) || token < 1) {
e0bda7d8
BVA
3155 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3156 p);
52fb2b50
VP
3157 goto out;
3158 }
4d73f95f 3159 target->scsi_host->cmd_per_lun = token;
52fb2b50
VP
3160 break;
3161
0c0450db
R
3162 case SRP_OPT_IO_CLASS:
3163 if (match_hex(args, &token)) {
e0bda7d8 3164 pr_warn("bad IO class parameter '%s'\n", p);
0c0450db
R
3165 goto out;
3166 }
3167 if (token != SRP_REV10_IB_IO_CLASS &&
3168 token != SRP_REV16A_IB_IO_CLASS) {
e0bda7d8
BVA
3169 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3170 token, SRP_REV10_IB_IO_CLASS,
3171 SRP_REV16A_IB_IO_CLASS);
0c0450db
R
3172 goto out;
3173 }
3174 target->io_class = token;
3175 break;
3176
01cb9bcb
IR
3177 case SRP_OPT_INITIATOR_EXT:
3178 p = match_strdup(args);
a20f3a6d
IR
3179 if (!p) {
3180 ret = -ENOMEM;
3181 goto out;
3182 }
01cb9bcb
IR
3183 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3184 kfree(p);
3185 break;
3186
49248644
DD
3187 case SRP_OPT_CMD_SG_ENTRIES:
3188 if (match_int(args, &token) || token < 1 || token > 255) {
e0bda7d8
BVA
3189 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3190 p);
49248644
DD
3191 goto out;
3192 }
3193 target->cmd_sg_cnt = token;
3194 break;
3195
c07d424d
DD
3196 case SRP_OPT_ALLOW_EXT_SG:
3197 if (match_int(args, &token)) {
e0bda7d8 3198 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
c07d424d
DD
3199 goto out;
3200 }
3201 target->allow_ext_sg = !!token;
3202 break;
3203
3204 case SRP_OPT_SG_TABLESIZE:
3205 if (match_int(args, &token) || token < 1 ||
65e8617f 3206 token > SG_MAX_SEGMENTS) {
e0bda7d8
BVA
3207 pr_warn("bad max sg_tablesize parameter '%s'\n",
3208 p);
c07d424d
DD
3209 goto out;
3210 }
3211 target->sg_tablesize = token;
3212 break;
3213
4b5e5f41
BVA
3214 case SRP_OPT_COMP_VECTOR:
3215 if (match_int(args, &token) || token < 0) {
3216 pr_warn("bad comp_vector parameter '%s'\n", p);
3217 goto out;
3218 }
3219 target->comp_vector = token;
3220 break;
3221
7bb312e4
VP
3222 case SRP_OPT_TL_RETRY_COUNT:
3223 if (match_int(args, &token) || token < 2 || token > 7) {
3224 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3225 p);
3226 goto out;
3227 }
3228 target->tl_retry_count = token;
3229 break;
3230
aef9ec39 3231 default:
e0bda7d8
BVA
3232 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3233 p);
aef9ec39
RD
3234 goto out;
3235 }
3236 }
3237
3238 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3239 ret = 0;
3240 else
3241 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3242 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3243 !(srp_opt_tokens[i].token & opt_mask))
e0bda7d8
BVA
3244 pr_warn("target creation request is missing parameter '%s'\n",
3245 srp_opt_tokens[i].pattern);
aef9ec39 3246
4d73f95f
BVA
3247 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3248 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3249 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3250 target->scsi_host->cmd_per_lun,
3251 target->scsi_host->can_queue);
3252
aef9ec39
RD
3253out:
3254 kfree(options);
3255 return ret;
3256}
3257
ee959b00
TJ
3258static ssize_t srp_create_target(struct device *dev,
3259 struct device_attribute *attr,
aef9ec39
RD
3260 const char *buf, size_t count)
3261{
3262 struct srp_host *host =
ee959b00 3263 container_of(dev, struct srp_host, dev);
aef9ec39
RD
3264 struct Scsi_Host *target_host;
3265 struct srp_target_port *target;
509c07bc 3266 struct srp_rdma_ch *ch;
d1b4289e
BVA
3267 struct srp_device *srp_dev = host->srp_dev;
3268 struct ib_device *ibdev = srp_dev->dev;
d92c0da7 3269 int ret, node_idx, node, cpu, i;
509c5f33 3270 unsigned int max_sectors_per_mr, mr_per_cmd = 0;
d92c0da7 3271 bool multich = false;
aef9ec39
RD
3272
3273 target_host = scsi_host_alloc(&srp_template,
3274 sizeof (struct srp_target_port));
3275 if (!target_host)
3276 return -ENOMEM;
3277
49248644 3278 target_host->transportt = ib_srp_transport_template;
fd1b6c4a
BVA
3279 target_host->max_channel = 0;
3280 target_host->max_id = 1;
985aa495 3281 target_host->max_lun = -1LL;
3c8edf0e 3282 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
5f068992 3283
aef9ec39 3284 target = host_to_target(target_host);
aef9ec39 3285
49248644
DD
3286 target->io_class = SRP_REV16A_IB_IO_CLASS;
3287 target->scsi_host = target_host;
3288 target->srp_host = host;
5f071777 3289 target->pd = host->srp_dev->pd;
e6bf5f48 3290 target->lkey = host->srp_dev->pd->local_dma_lkey;
49248644 3291 target->cmd_sg_cnt = cmd_sg_entries;
c07d424d
DD
3292 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3293 target->allow_ext_sg = allow_ext_sg;
7bb312e4 3294 target->tl_retry_count = 7;
4d73f95f 3295 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
aef9ec39 3296
34aa654e
BVA
3297 /*
3298 * Avoid that the SCSI host can be removed by srp_remove_target()
3299 * before this function returns.
3300 */
3301 scsi_host_get(target->scsi_host);
3302
4fa354c9
BVA
3303 ret = mutex_lock_interruptible(&host->add_target_mutex);
3304 if (ret < 0)
3305 goto put;
2d7091bc 3306
aef9ec39
RD
3307 ret = srp_parse_options(buf, target);
3308 if (ret)
fb49c8bb 3309 goto out;
aef9ec39 3310
4d73f95f
BVA
3311 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3312
96fc248a
BVA
3313 if (!srp_conn_unique(target->srp_host, target)) {
3314 shost_printk(KERN_INFO, target->scsi_host,
3315 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3316 be64_to_cpu(target->id_ext),
3317 be64_to_cpu(target->ioc_guid),
3318 be64_to_cpu(target->initiator_ext));
3319 ret = -EEXIST;
fb49c8bb 3320 goto out;
96fc248a
BVA
3321 }
3322
5cfb1782 3323 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
d1b4289e 3324 target->cmd_sg_cnt < target->sg_tablesize) {
5cfb1782 3325 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
c07d424d
DD
3326 target->sg_tablesize = target->cmd_sg_cnt;
3327 }
3328
509c5f33
BVA
3329 if (srp_dev->use_fast_reg || srp_dev->use_fmr) {
3330 /*
3331 * FR and FMR can only map one HCA page per entry. If the
3332 * start address is not aligned on a HCA page boundary two
3333 * entries will be used for the head and the tail although
3334 * these two entries combined contain at most one HCA page of
3335 * data. Hence the "+ 1" in the calculation below.
3336 *
3337 * The indirect data buffer descriptor is contiguous so the
3338 * memory for that buffer will only be registered if
3339 * register_always is true. Hence add one to mr_per_cmd if
3340 * register_always has been set.
3341 */
3342 max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3343 (ilog2(srp_dev->mr_page_size) - 9);
3344 mr_per_cmd = register_always +
3345 (target->scsi_host->max_sectors + 1 +
3346 max_sectors_per_mr - 1) / max_sectors_per_mr;
3347 pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
3348 target->scsi_host->max_sectors,
3349 srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
3350 max_sectors_per_mr, mr_per_cmd);
3351 }
3352
c07d424d 3353 target_host->sg_tablesize = target->sg_tablesize;
509c5f33
BVA
3354 target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3355 target->mr_per_cmd = mr_per_cmd;
c07d424d
DD
3356 target->indirect_size = target->sg_tablesize *
3357 sizeof (struct srp_direct_buf);
49248644
DD
3358 target->max_iu_len = sizeof (struct srp_cmd) +
3359 sizeof (struct srp_indirect_buf) +
3360 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3361
c1120f89 3362 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
ef6c49d8 3363 INIT_WORK(&target->remove_work, srp_remove_work);
8f26c9ff 3364 spin_lock_init(&target->lock);
55ee3ab2 3365 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
2088ca66 3366 if (ret)
fb49c8bb 3367 goto out;
aef9ec39 3368
d92c0da7
BVA
3369 ret = -ENOMEM;
3370 target->ch_count = max_t(unsigned, num_online_nodes(),
3371 min(ch_count ? :
3372 min(4 * num_online_nodes(),
3373 ibdev->num_comp_vectors),
3374 num_online_cpus()));
3375 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3376 GFP_KERNEL);
3377 if (!target->ch)
fb49c8bb 3378 goto out;
aef9ec39 3379
d92c0da7
BVA
3380 node_idx = 0;
3381 for_each_online_node(node) {
3382 const int ch_start = (node_idx * target->ch_count /
3383 num_online_nodes());
3384 const int ch_end = ((node_idx + 1) * target->ch_count /
3385 num_online_nodes());
3386 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3387 num_online_nodes() + target->comp_vector)
3388 % ibdev->num_comp_vectors;
3389 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3390 num_online_nodes() + target->comp_vector)
3391 % ibdev->num_comp_vectors;
3392 int cpu_idx = 0;
3393
3394 for_each_online_cpu(cpu) {
3395 if (cpu_to_node(cpu) != node)
3396 continue;
3397 if (ch_start + cpu_idx >= ch_end)
3398 continue;
3399 ch = &target->ch[ch_start + cpu_idx];
3400 ch->target = target;
3401 ch->comp_vector = cv_start == cv_end ? cv_start :
3402 cv_start + cpu_idx % (cv_end - cv_start);
3403 spin_lock_init(&ch->lock);
3404 INIT_LIST_HEAD(&ch->free_tx);
3405 ret = srp_new_cm_id(ch);
3406 if (ret)
3407 goto err_disconnect;
aef9ec39 3408
d92c0da7
BVA
3409 ret = srp_create_ch_ib(ch);
3410 if (ret)
3411 goto err_disconnect;
3412
3413 ret = srp_alloc_req_data(ch);
3414 if (ret)
3415 goto err_disconnect;
3416
3417 ret = srp_connect_ch(ch, multich);
3418 if (ret) {
3419 shost_printk(KERN_ERR, target->scsi_host,
3420 PFX "Connection %d/%d failed\n",
3421 ch_start + cpu_idx,
3422 target->ch_count);
3423 if (node_idx == 0 && cpu_idx == 0) {
3424 goto err_disconnect;
3425 } else {
3426 srp_free_ch_ib(target, ch);
3427 srp_free_req_data(target, ch);
3428 target->ch_count = ch - target->ch;
c257ea6f 3429 goto connected;
d92c0da7
BVA
3430 }
3431 }
3432
3433 multich = true;
3434 cpu_idx++;
3435 }
3436 node_idx++;
aef9ec39
RD
3437 }
3438
c257ea6f 3439connected:
d92c0da7
BVA
3440 target->scsi_host->nr_hw_queues = target->ch_count;
3441
aef9ec39
RD
3442 ret = srp_add_target(host, target);
3443 if (ret)
3444 goto err_disconnect;
3445
34aa654e
BVA
3446 if (target->state != SRP_TARGET_REMOVED) {
3447 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3448 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3449 be64_to_cpu(target->id_ext),
3450 be64_to_cpu(target->ioc_guid),
747fe000 3451 be16_to_cpu(target->pkey),
34aa654e 3452 be64_to_cpu(target->service_id),
747fe000 3453 target->sgid.raw, target->orig_dgid.raw);
34aa654e 3454 }
e7ffde01 3455
2d7091bc
BVA
3456 ret = count;
3457
3458out:
3459 mutex_unlock(&host->add_target_mutex);
34aa654e 3460
4fa354c9 3461put:
34aa654e 3462 scsi_host_put(target->scsi_host);
bc44bd1d
BVA
3463 if (ret < 0)
3464 scsi_host_put(target->scsi_host);
34aa654e 3465
2d7091bc 3466 return ret;
aef9ec39
RD
3467
3468err_disconnect:
3469 srp_disconnect_target(target);
3470
d92c0da7
BVA
3471 for (i = 0; i < target->ch_count; i++) {
3472 ch = &target->ch[i];
3473 srp_free_ch_ib(target, ch);
3474 srp_free_req_data(target, ch);
3475 }
aef9ec39 3476
d92c0da7 3477 kfree(target->ch);
2d7091bc 3478 goto out;
aef9ec39
RD
3479}
3480
ee959b00 3481static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
aef9ec39 3482
ee959b00
TJ
3483static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3484 char *buf)
aef9ec39 3485{
ee959b00 3486 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39 3487
05321937 3488 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
aef9ec39
RD
3489}
3490
ee959b00 3491static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
aef9ec39 3492
ee959b00
TJ
3493static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3494 char *buf)
aef9ec39 3495{
ee959b00 3496 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39
RD
3497
3498 return sprintf(buf, "%d\n", host->port);
3499}
3500
ee959b00 3501static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
aef9ec39 3502
f5358a17 3503static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
aef9ec39
RD
3504{
3505 struct srp_host *host;
3506
3507 host = kzalloc(sizeof *host, GFP_KERNEL);
3508 if (!host)
3509 return NULL;
3510
3511 INIT_LIST_HEAD(&host->target_list);
b3589fd4 3512 spin_lock_init(&host->target_lock);
aef9ec39 3513 init_completion(&host->released);
2d7091bc 3514 mutex_init(&host->add_target_mutex);
05321937 3515 host->srp_dev = device;
aef9ec39
RD
3516 host->port = port;
3517
ee959b00
TJ
3518 host->dev.class = &srp_class;
3519 host->dev.parent = device->dev->dma_device;
d927e38c 3520 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
aef9ec39 3521
ee959b00 3522 if (device_register(&host->dev))
f5358a17 3523 goto free_host;
ee959b00 3524 if (device_create_file(&host->dev, &dev_attr_add_target))
aef9ec39 3525 goto err_class;
ee959b00 3526 if (device_create_file(&host->dev, &dev_attr_ibdev))
aef9ec39 3527 goto err_class;
ee959b00 3528 if (device_create_file(&host->dev, &dev_attr_port))
aef9ec39
RD
3529 goto err_class;
3530
3531 return host;
3532
3533err_class:
ee959b00 3534 device_unregister(&host->dev);
aef9ec39 3535
f5358a17 3536free_host:
aef9ec39
RD
3537 kfree(host);
3538
3539 return NULL;
3540}
3541
3542static void srp_add_one(struct ib_device *device)
3543{
f5358a17 3544 struct srp_device *srp_dev;
042dd765 3545 struct ib_device_attr *attr = &device->attrs;
aef9ec39 3546 struct srp_host *host;
4139032b 3547 int mr_page_shift, p;
52ede08f 3548 u64 max_pages_per_mr;
5f071777 3549 unsigned int flags = 0;
aef9ec39 3550
249f0656 3551 srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
f5358a17 3552 if (!srp_dev)
4a061b28 3553 return;
f5358a17
RD
3554
3555 /*
3556 * Use the smallest page size supported by the HCA, down to a
8f26c9ff
DD
3557 * minimum of 4096 bytes. We're unlikely to build large sglists
3558 * out of smaller entries.
f5358a17 3559 */
042dd765 3560 mr_page_shift = max(12, ffs(attr->page_size_cap) - 1);
52ede08f
BVA
3561 srp_dev->mr_page_size = 1 << mr_page_shift;
3562 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
042dd765 3563 max_pages_per_mr = attr->max_mr_size;
52ede08f 3564 do_div(max_pages_per_mr, srp_dev->mr_page_size);
509c5f33 3565 pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
042dd765 3566 attr->max_mr_size, srp_dev->mr_page_size,
509c5f33 3567 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
52ede08f
BVA
3568 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3569 max_pages_per_mr);
835ee624
BVA
3570
3571 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3572 device->map_phys_fmr && device->unmap_fmr);
042dd765 3573 srp_dev->has_fr = (attr->device_cap_flags &
835ee624 3574 IB_DEVICE_MEM_MGT_EXTENSIONS);
c222a39f 3575 if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
835ee624 3576 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
c222a39f 3577 } else if (!never_register &&
042dd765 3578 attr->max_mr_size >= 2 * srp_dev->mr_page_size) {
509c5f33
BVA
3579 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3580 (!srp_dev->has_fmr || prefer_fr));
3581 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
3582 }
835ee624 3583
5f071777
CH
3584 if (never_register || !register_always ||
3585 (!srp_dev->has_fmr && !srp_dev->has_fr))
3586 flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
3587
5cfb1782
BVA
3588 if (srp_dev->use_fast_reg) {
3589 srp_dev->max_pages_per_mr =
3590 min_t(u32, srp_dev->max_pages_per_mr,
042dd765 3591 attr->max_fast_reg_page_list_len);
5cfb1782 3592 }
52ede08f
BVA
3593 srp_dev->mr_max_size = srp_dev->mr_page_size *
3594 srp_dev->max_pages_per_mr;
4a061b28 3595 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
042dd765
BVA
3596 device->name, mr_page_shift, attr->max_mr_size,
3597 attr->max_fast_reg_page_list_len,
52ede08f 3598 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
f5358a17
RD
3599
3600 INIT_LIST_HEAD(&srp_dev->dev_list);
3601
3602 srp_dev->dev = device;
5f071777 3603 srp_dev->pd = ib_alloc_pd(device, flags);
f5358a17
RD
3604 if (IS_ERR(srp_dev->pd))
3605 goto free_dev;
3606
f5358a17 3607
4139032b 3608 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
f5358a17 3609 host = srp_add_port(srp_dev, p);
aef9ec39 3610 if (host)
f5358a17 3611 list_add_tail(&host->list, &srp_dev->dev_list);
aef9ec39
RD
3612 }
3613
f5358a17 3614 ib_set_client_data(device, &srp_client, srp_dev);
4a061b28 3615 return;
f5358a17 3616
f5358a17
RD
3617free_dev:
3618 kfree(srp_dev);
aef9ec39
RD
3619}
3620
7c1eb45a 3621static void srp_remove_one(struct ib_device *device, void *client_data)
aef9ec39 3622{
f5358a17 3623 struct srp_device *srp_dev;
aef9ec39 3624 struct srp_host *host, *tmp_host;
ef6c49d8 3625 struct srp_target_port *target;
aef9ec39 3626
7c1eb45a 3627 srp_dev = client_data;
1fe0cb84
DB
3628 if (!srp_dev)
3629 return;
aef9ec39 3630
f5358a17 3631 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
ee959b00 3632 device_unregister(&host->dev);
aef9ec39
RD
3633 /*
3634 * Wait for the sysfs entry to go away, so that no new
3635 * target ports can be created.
3636 */
3637 wait_for_completion(&host->released);
3638
3639 /*
ef6c49d8 3640 * Remove all target ports.
aef9ec39 3641 */
b3589fd4 3642 spin_lock(&host->target_lock);
ef6c49d8
BVA
3643 list_for_each_entry(target, &host->target_list, list)
3644 srp_queue_remove_work(target);
b3589fd4 3645 spin_unlock(&host->target_lock);
aef9ec39
RD
3646
3647 /*
bcc05910 3648 * Wait for tl_err and target port removal tasks.
aef9ec39 3649 */
ef6c49d8 3650 flush_workqueue(system_long_wq);
bcc05910 3651 flush_workqueue(srp_remove_wq);
aef9ec39 3652
aef9ec39
RD
3653 kfree(host);
3654 }
3655
f5358a17
RD
3656 ib_dealloc_pd(srp_dev->pd);
3657
3658 kfree(srp_dev);
aef9ec39
RD
3659}
3660
3236822b 3661static struct srp_function_template ib_srp_transport_functions = {
ed9b2264
BVA
3662 .has_rport_state = true,
3663 .reset_timer_if_blocked = true,
a95cadb9 3664 .reconnect_delay = &srp_reconnect_delay,
ed9b2264
BVA
3665 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3666 .dev_loss_tmo = &srp_dev_loss_tmo,
3667 .reconnect = srp_rport_reconnect,
dc1bdbd9 3668 .rport_delete = srp_rport_delete,
ed9b2264 3669 .terminate_rport_io = srp_terminate_io,
3236822b
FT
3670};
3671
aef9ec39
RD
3672static int __init srp_init_module(void)
3673{
3674 int ret;
3675
49248644 3676 if (srp_sg_tablesize) {
e0bda7d8 3677 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
49248644
DD
3678 if (!cmd_sg_entries)
3679 cmd_sg_entries = srp_sg_tablesize;
3680 }
3681
3682 if (!cmd_sg_entries)
3683 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3684
3685 if (cmd_sg_entries > 255) {
e0bda7d8 3686 pr_warn("Clamping cmd_sg_entries to 255\n");
49248644 3687 cmd_sg_entries = 255;
1e89a194
DD
3688 }
3689
c07d424d
DD
3690 if (!indirect_sg_entries)
3691 indirect_sg_entries = cmd_sg_entries;
3692 else if (indirect_sg_entries < cmd_sg_entries) {
e0bda7d8
BVA
3693 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3694 cmd_sg_entries);
c07d424d
DD
3695 indirect_sg_entries = cmd_sg_entries;
3696 }
3697
bcc05910 3698 srp_remove_wq = create_workqueue("srp_remove");
da05be29
WY
3699 if (!srp_remove_wq) {
3700 ret = -ENOMEM;
bcc05910
BVA
3701 goto out;
3702 }
3703
3704 ret = -ENOMEM;
3236822b
FT
3705 ib_srp_transport_template =
3706 srp_attach_transport(&ib_srp_transport_functions);
3707 if (!ib_srp_transport_template)
bcc05910 3708 goto destroy_wq;
3236822b 3709
aef9ec39
RD
3710 ret = class_register(&srp_class);
3711 if (ret) {
e0bda7d8 3712 pr_err("couldn't register class infiniband_srp\n");
bcc05910 3713 goto release_tr;
aef9ec39
RD
3714 }
3715
c1a0b23b
MT
3716 ib_sa_register_client(&srp_sa_client);
3717
aef9ec39
RD
3718 ret = ib_register_client(&srp_client);
3719 if (ret) {
e0bda7d8 3720 pr_err("couldn't register IB client\n");
bcc05910 3721 goto unreg_sa;
aef9ec39
RD
3722 }
3723
bcc05910
BVA
3724out:
3725 return ret;
3726
3727unreg_sa:
3728 ib_sa_unregister_client(&srp_sa_client);
3729 class_unregister(&srp_class);
3730
3731release_tr:
3732 srp_release_transport(ib_srp_transport_template);
3733
3734destroy_wq:
3735 destroy_workqueue(srp_remove_wq);
3736 goto out;
aef9ec39
RD
3737}
3738
3739static void __exit srp_cleanup_module(void)
3740{
3741 ib_unregister_client(&srp_client);
c1a0b23b 3742 ib_sa_unregister_client(&srp_sa_client);
aef9ec39 3743 class_unregister(&srp_class);
3236822b 3744 srp_release_transport(ib_srp_transport_template);
bcc05910 3745 destroy_workqueue(srp_remove_wq);
aef9ec39
RD
3746}
3747
3748module_init(srp_init_module);
3749module_exit(srp_cleanup_module);