]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/infiniband/ulp/srp/ib_srp.c
IB/mlx4: Fix device managed flow steering support test
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / ulp / srp / ib_srp.c
CommitLineData
aef9ec39
RD
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
aef9ec39
RD
31 */
32
d236cd0e 33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
e0bda7d8 34
aef9ec39
RD
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
de25968c 42#include <linux/jiffies.h>
56b5390c 43#include <rdma/ib_cache.h>
aef9ec39 44
60063497 45#include <linux/atomic.h>
aef9ec39
RD
46
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
71444b97 50#include <scsi/scsi_tcq.h>
aef9ec39 51#include <scsi/srp.h>
3236822b 52#include <scsi/scsi_transport_srp.h>
aef9ec39 53
aef9ec39
RD
54#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
713ef24e
BVA
58#define DRV_VERSION "2.0"
59#define DRV_RELDATE "July 26, 2015"
aef9ec39
RD
60
61MODULE_AUTHOR("Roland Dreier");
33ab3e5b 62MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
aef9ec39 63MODULE_LICENSE("Dual BSD/GPL");
33ab3e5b
BVA
64MODULE_VERSION(DRV_VERSION);
65MODULE_INFO(release_date, DRV_RELDATE);
aef9ec39 66
49248644
DD
67static unsigned int srp_sg_tablesize;
68static unsigned int cmd_sg_entries;
c07d424d
DD
69static unsigned int indirect_sg_entries;
70static bool allow_ext_sg;
03f6fb93
BVA
71static bool prefer_fr = true;
72static bool register_always = true;
c222a39f 73static bool never_register;
49248644 74static int topspin_workarounds = 1;
74b0a15b 75
49248644
DD
76module_param(srp_sg_tablesize, uint, 0444);
77MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
74b0a15b 78
49248644
DD
79module_param(cmd_sg_entries, uint, 0444);
80MODULE_PARM_DESC(cmd_sg_entries,
81 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
aef9ec39 82
c07d424d
DD
83module_param(indirect_sg_entries, uint, 0444);
84MODULE_PARM_DESC(indirect_sg_entries,
65e8617f 85 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
c07d424d
DD
86
87module_param(allow_ext_sg, bool, 0444);
88MODULE_PARM_DESC(allow_ext_sg,
89 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
90
aef9ec39
RD
91module_param(topspin_workarounds, int, 0444);
92MODULE_PARM_DESC(topspin_workarounds,
93 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
94
5cfb1782
BVA
95module_param(prefer_fr, bool, 0444);
96MODULE_PARM_DESC(prefer_fr,
97"Whether to use fast registration if both FMR and fast registration are supported");
98
b1b8854d
BVA
99module_param(register_always, bool, 0444);
100MODULE_PARM_DESC(register_always,
101 "Use memory registration even for contiguous memory regions");
102
c222a39f
BVA
103module_param(never_register, bool, 0444);
104MODULE_PARM_DESC(never_register, "Never register memory");
105
9c27847d 106static const struct kernel_param_ops srp_tmo_ops;
ed9b2264 107
a95cadb9
BVA
108static int srp_reconnect_delay = 10;
109module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
110 S_IRUGO | S_IWUSR);
111MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
112
ed9b2264
BVA
113static int srp_fast_io_fail_tmo = 15;
114module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
115 S_IRUGO | S_IWUSR);
116MODULE_PARM_DESC(fast_io_fail_tmo,
117 "Number of seconds between the observation of a transport"
118 " layer error and failing all I/O. \"off\" means that this"
119 " functionality is disabled.");
120
a95cadb9 121static int srp_dev_loss_tmo = 600;
ed9b2264
BVA
122module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
123 S_IRUGO | S_IWUSR);
124MODULE_PARM_DESC(dev_loss_tmo,
125 "Maximum number of seconds that the SRP transport should"
126 " insulate transport layer errors. After this time has been"
127 " exceeded the SCSI host is removed. Should be"
128 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
129 " if fast_io_fail_tmo has not been set. \"off\" means that"
130 " this functionality is disabled.");
131
d92c0da7
BVA
132static unsigned ch_count;
133module_param(ch_count, uint, 0444);
134MODULE_PARM_DESC(ch_count,
135 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
136
aef9ec39 137static void srp_add_one(struct ib_device *device);
7c1eb45a 138static void srp_remove_one(struct ib_device *device, void *client_data);
1dc7b1f1
CH
139static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
140static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
141 const char *opname);
aef9ec39
RD
142static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
143
3236822b 144static struct scsi_transport_template *ib_srp_transport_template;
bcc05910 145static struct workqueue_struct *srp_remove_wq;
3236822b 146
aef9ec39
RD
147static struct ib_client srp_client = {
148 .name = "srp",
149 .add = srp_add_one,
150 .remove = srp_remove_one
151};
152
c1a0b23b
MT
153static struct ib_sa_client srp_sa_client;
154
ed9b2264
BVA
155static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
156{
157 int tmo = *(int *)kp->arg;
158
159 if (tmo >= 0)
160 return sprintf(buffer, "%d", tmo);
161 else
162 return sprintf(buffer, "off");
163}
164
165static int srp_tmo_set(const char *val, const struct kernel_param *kp)
166{
167 int tmo, res;
168
3fdf70ac
SG
169 res = srp_parse_tmo(&tmo, val);
170 if (res)
171 goto out;
172
a95cadb9
BVA
173 if (kp->arg == &srp_reconnect_delay)
174 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
175 srp_dev_loss_tmo);
176 else if (kp->arg == &srp_fast_io_fail_tmo)
177 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
ed9b2264 178 else
a95cadb9
BVA
179 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
180 tmo);
ed9b2264
BVA
181 if (res)
182 goto out;
183 *(int *)kp->arg = tmo;
184
185out:
186 return res;
187}
188
9c27847d 189static const struct kernel_param_ops srp_tmo_ops = {
ed9b2264
BVA
190 .get = srp_tmo_get,
191 .set = srp_tmo_set,
192};
193
aef9ec39
RD
194static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
195{
196 return (struct srp_target_port *) host->hostdata;
197}
198
199static const char *srp_target_info(struct Scsi_Host *host)
200{
201 return host_to_target(host)->target_name;
202}
203
5d7cbfd6
RD
204static int srp_target_is_topspin(struct srp_target_port *target)
205{
206 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
3d1ff48d 207 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
5d7cbfd6
RD
208
209 return topspin_workarounds &&
3d1ff48d
RK
210 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
211 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
5d7cbfd6
RD
212}
213
aef9ec39
RD
214static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
215 gfp_t gfp_mask,
216 enum dma_data_direction direction)
217{
218 struct srp_iu *iu;
219
220 iu = kmalloc(sizeof *iu, gfp_mask);
221 if (!iu)
222 goto out;
223
224 iu->buf = kzalloc(size, gfp_mask);
225 if (!iu->buf)
226 goto out_free_iu;
227
05321937
GKH
228 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
229 direction);
230 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
aef9ec39
RD
231 goto out_free_buf;
232
233 iu->size = size;
234 iu->direction = direction;
235
236 return iu;
237
238out_free_buf:
239 kfree(iu->buf);
240out_free_iu:
241 kfree(iu);
242out:
243 return NULL;
244}
245
246static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
247{
248 if (!iu)
249 return;
250
05321937
GKH
251 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
252 iu->direction);
aef9ec39
RD
253 kfree(iu->buf);
254 kfree(iu);
255}
256
257static void srp_qp_event(struct ib_event *event, void *context)
258{
57363d98
SG
259 pr_debug("QP event %s (%d)\n",
260 ib_event_msg(event->event), event->event);
aef9ec39
RD
261}
262
263static int srp_init_qp(struct srp_target_port *target,
264 struct ib_qp *qp)
265{
266 struct ib_qp_attr *attr;
267 int ret;
268
269 attr = kmalloc(sizeof *attr, GFP_KERNEL);
270 if (!attr)
271 return -ENOMEM;
272
56b5390c
BVA
273 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
274 target->srp_host->port,
275 be16_to_cpu(target->pkey),
276 &attr->pkey_index);
aef9ec39
RD
277 if (ret)
278 goto out;
279
280 attr->qp_state = IB_QPS_INIT;
281 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
282 IB_ACCESS_REMOTE_WRITE);
283 attr->port_num = target->srp_host->port;
284
285 ret = ib_modify_qp(qp, attr,
286 IB_QP_STATE |
287 IB_QP_PKEY_INDEX |
288 IB_QP_ACCESS_FLAGS |
289 IB_QP_PORT);
290
291out:
292 kfree(attr);
293 return ret;
294}
295
509c07bc 296static int srp_new_cm_id(struct srp_rdma_ch *ch)
9fe4bcf4 297{
509c07bc 298 struct srp_target_port *target = ch->target;
9fe4bcf4
DD
299 struct ib_cm_id *new_cm_id;
300
05321937 301 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
509c07bc 302 srp_cm_handler, ch);
9fe4bcf4
DD
303 if (IS_ERR(new_cm_id))
304 return PTR_ERR(new_cm_id);
305
509c07bc
BVA
306 if (ch->cm_id)
307 ib_destroy_cm_id(ch->cm_id);
308 ch->cm_id = new_cm_id;
309 ch->path.sgid = target->sgid;
310 ch->path.dgid = target->orig_dgid;
311 ch->path.pkey = target->pkey;
312 ch->path.service_id = target->service_id;
9fe4bcf4
DD
313
314 return 0;
315}
316
d1b4289e
BVA
317static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
318{
319 struct srp_device *dev = target->srp_host->srp_dev;
320 struct ib_fmr_pool_param fmr_param;
321
322 memset(&fmr_param, 0, sizeof(fmr_param));
fa9863f8 323 fmr_param.pool_size = target->mr_pool_size;
d1b4289e
BVA
324 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
325 fmr_param.cache = 1;
52ede08f
BVA
326 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
327 fmr_param.page_shift = ilog2(dev->mr_page_size);
d1b4289e
BVA
328 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
329 IB_ACCESS_REMOTE_WRITE |
330 IB_ACCESS_REMOTE_READ);
331
332 return ib_create_fmr_pool(dev->pd, &fmr_param);
333}
334
5cfb1782
BVA
335/**
336 * srp_destroy_fr_pool() - free the resources owned by a pool
337 * @pool: Fast registration pool to be destroyed.
338 */
339static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
340{
341 int i;
342 struct srp_fr_desc *d;
343
344 if (!pool)
345 return;
346
347 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
5cfb1782
BVA
348 if (d->mr)
349 ib_dereg_mr(d->mr);
350 }
351 kfree(pool);
352}
353
354/**
355 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
356 * @device: IB device to allocate fast registration descriptors for.
357 * @pd: Protection domain associated with the FR descriptors.
358 * @pool_size: Number of descriptors to allocate.
359 * @max_page_list_len: Maximum fast registration work request page list length.
360 */
361static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
362 struct ib_pd *pd, int pool_size,
363 int max_page_list_len)
364{
365 struct srp_fr_pool *pool;
366 struct srp_fr_desc *d;
367 struct ib_mr *mr;
5cfb1782
BVA
368 int i, ret = -EINVAL;
369
370 if (pool_size <= 0)
371 goto err;
372 ret = -ENOMEM;
373 pool = kzalloc(sizeof(struct srp_fr_pool) +
374 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
375 if (!pool)
376 goto err;
377 pool->size = pool_size;
378 pool->max_page_list_len = max_page_list_len;
379 spin_lock_init(&pool->lock);
380 INIT_LIST_HEAD(&pool->free_list);
381
382 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
563b67c5
SG
383 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
384 max_page_list_len);
5cfb1782
BVA
385 if (IS_ERR(mr)) {
386 ret = PTR_ERR(mr);
387 goto destroy_pool;
388 }
389 d->mr = mr;
5cfb1782
BVA
390 list_add_tail(&d->entry, &pool->free_list);
391 }
392
393out:
394 return pool;
395
396destroy_pool:
397 srp_destroy_fr_pool(pool);
398
399err:
400 pool = ERR_PTR(ret);
401 goto out;
402}
403
404/**
405 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
406 * @pool: Pool to obtain descriptor from.
407 */
408static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
409{
410 struct srp_fr_desc *d = NULL;
411 unsigned long flags;
412
413 spin_lock_irqsave(&pool->lock, flags);
414 if (!list_empty(&pool->free_list)) {
415 d = list_first_entry(&pool->free_list, typeof(*d), entry);
416 list_del(&d->entry);
417 }
418 spin_unlock_irqrestore(&pool->lock, flags);
419
420 return d;
421}
422
423/**
424 * srp_fr_pool_put() - put an FR descriptor back in the free list
425 * @pool: Pool the descriptor was allocated from.
426 * @desc: Pointer to an array of fast registration descriptor pointers.
427 * @n: Number of descriptors to put back.
428 *
429 * Note: The caller must already have queued an invalidation request for
430 * desc->mr->rkey before calling this function.
431 */
432static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
433 int n)
434{
435 unsigned long flags;
436 int i;
437
438 spin_lock_irqsave(&pool->lock, flags);
439 for (i = 0; i < n; i++)
440 list_add(&desc[i]->entry, &pool->free_list);
441 spin_unlock_irqrestore(&pool->lock, flags);
442}
443
444static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
445{
446 struct srp_device *dev = target->srp_host->srp_dev;
447
fa9863f8 448 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
5cfb1782
BVA
449 dev->max_pages_per_mr);
450}
451
7dad6b2e
BVA
452/**
453 * srp_destroy_qp() - destroy an RDMA queue pair
f83b2561 454 * @qp: RDMA queue pair.
7dad6b2e 455 *
561392d4
SW
456 * Drain the qp before destroying it. This avoids that the receive
457 * completion handler can access the queue pair while it is
7dad6b2e
BVA
458 * being destroyed.
459 */
f83b2561 460static void srp_destroy_qp(struct ib_qp *qp)
7dad6b2e 461{
f83b2561
BVA
462 ib_drain_rq(qp);
463 ib_destroy_qp(qp);
7dad6b2e
BVA
464}
465
509c07bc 466static int srp_create_ch_ib(struct srp_rdma_ch *ch)
aef9ec39 467{
509c07bc 468 struct srp_target_port *target = ch->target;
62154b2e 469 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39 470 struct ib_qp_init_attr *init_attr;
73aa89ed
IR
471 struct ib_cq *recv_cq, *send_cq;
472 struct ib_qp *qp;
d1b4289e 473 struct ib_fmr_pool *fmr_pool = NULL;
5cfb1782 474 struct srp_fr_pool *fr_pool = NULL;
509c5f33 475 const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
aef9ec39
RD
476 int ret;
477
478 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
479 if (!init_attr)
480 return -ENOMEM;
481
561392d4 482 /* queue_size + 1 for ib_drain_rq() */
1dc7b1f1
CH
483 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
484 ch->comp_vector, IB_POLL_SOFTIRQ);
73aa89ed
IR
485 if (IS_ERR(recv_cq)) {
486 ret = PTR_ERR(recv_cq);
da9d2f07 487 goto err;
aef9ec39
RD
488 }
489
1dc7b1f1
CH
490 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
491 ch->comp_vector, IB_POLL_DIRECT);
73aa89ed
IR
492 if (IS_ERR(send_cq)) {
493 ret = PTR_ERR(send_cq);
da9d2f07 494 goto err_recv_cq;
9c03dc9f
BVA
495 }
496
aef9ec39 497 init_attr->event_handler = srp_qp_event;
5cfb1782 498 init_attr->cap.max_send_wr = m * target->queue_size;
7dad6b2e 499 init_attr->cap.max_recv_wr = target->queue_size + 1;
aef9ec39
RD
500 init_attr->cap.max_recv_sge = 1;
501 init_attr->cap.max_send_sge = 1;
5cfb1782 502 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
aef9ec39 503 init_attr->qp_type = IB_QPT_RC;
73aa89ed
IR
504 init_attr->send_cq = send_cq;
505 init_attr->recv_cq = recv_cq;
aef9ec39 506
62154b2e 507 qp = ib_create_qp(dev->pd, init_attr);
73aa89ed
IR
508 if (IS_ERR(qp)) {
509 ret = PTR_ERR(qp);
da9d2f07 510 goto err_send_cq;
aef9ec39
RD
511 }
512
73aa89ed 513 ret = srp_init_qp(target, qp);
da9d2f07
RD
514 if (ret)
515 goto err_qp;
aef9ec39 516
002f1567 517 if (dev->use_fast_reg) {
5cfb1782
BVA
518 fr_pool = srp_alloc_fr_pool(target);
519 if (IS_ERR(fr_pool)) {
520 ret = PTR_ERR(fr_pool);
521 shost_printk(KERN_WARNING, target->scsi_host, PFX
522 "FR pool allocation failed (%d)\n", ret);
523 goto err_qp;
524 }
002f1567 525 } else if (dev->use_fmr) {
d1b4289e
BVA
526 fmr_pool = srp_alloc_fmr_pool(target);
527 if (IS_ERR(fmr_pool)) {
528 ret = PTR_ERR(fmr_pool);
529 shost_printk(KERN_WARNING, target->scsi_host, PFX
530 "FMR pool allocation failed (%d)\n", ret);
531 goto err_qp;
532 }
d1b4289e
BVA
533 }
534
509c07bc 535 if (ch->qp)
f83b2561 536 srp_destroy_qp(ch->qp);
509c07bc 537 if (ch->recv_cq)
1dc7b1f1 538 ib_free_cq(ch->recv_cq);
509c07bc 539 if (ch->send_cq)
1dc7b1f1 540 ib_free_cq(ch->send_cq);
73aa89ed 541
509c07bc
BVA
542 ch->qp = qp;
543 ch->recv_cq = recv_cq;
544 ch->send_cq = send_cq;
73aa89ed 545
7fbc67df
SG
546 if (dev->use_fast_reg) {
547 if (ch->fr_pool)
548 srp_destroy_fr_pool(ch->fr_pool);
549 ch->fr_pool = fr_pool;
550 } else if (dev->use_fmr) {
551 if (ch->fmr_pool)
552 ib_destroy_fmr_pool(ch->fmr_pool);
553 ch->fmr_pool = fmr_pool;
554 }
555
da9d2f07
RD
556 kfree(init_attr);
557 return 0;
558
559err_qp:
f83b2561 560 srp_destroy_qp(qp);
da9d2f07
RD
561
562err_send_cq:
1dc7b1f1 563 ib_free_cq(send_cq);
da9d2f07
RD
564
565err_recv_cq:
1dc7b1f1 566 ib_free_cq(recv_cq);
da9d2f07
RD
567
568err:
aef9ec39
RD
569 kfree(init_attr);
570 return ret;
571}
572
4d73f95f
BVA
573/*
574 * Note: this function may be called without srp_alloc_iu_bufs() having been
509c07bc 575 * invoked. Hence the ch->[rt]x_ring checks.
4d73f95f 576 */
509c07bc
BVA
577static void srp_free_ch_ib(struct srp_target_port *target,
578 struct srp_rdma_ch *ch)
aef9ec39 579{
5cfb1782 580 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39
RD
581 int i;
582
d92c0da7
BVA
583 if (!ch->target)
584 return;
585
509c07bc
BVA
586 if (ch->cm_id) {
587 ib_destroy_cm_id(ch->cm_id);
588 ch->cm_id = NULL;
394c595e
BVA
589 }
590
d92c0da7
BVA
591 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
592 if (!ch->qp)
593 return;
594
5cfb1782 595 if (dev->use_fast_reg) {
509c07bc
BVA
596 if (ch->fr_pool)
597 srp_destroy_fr_pool(ch->fr_pool);
002f1567 598 } else if (dev->use_fmr) {
509c07bc
BVA
599 if (ch->fmr_pool)
600 ib_destroy_fmr_pool(ch->fmr_pool);
5cfb1782 601 }
1dc7b1f1 602
f83b2561 603 srp_destroy_qp(ch->qp);
1dc7b1f1
CH
604 ib_free_cq(ch->send_cq);
605 ib_free_cq(ch->recv_cq);
aef9ec39 606
d92c0da7
BVA
607 /*
608 * Avoid that the SCSI error handler tries to use this channel after
609 * it has been freed. The SCSI error handler can namely continue
610 * trying to perform recovery actions after scsi_remove_host()
611 * returned.
612 */
613 ch->target = NULL;
614
509c07bc
BVA
615 ch->qp = NULL;
616 ch->send_cq = ch->recv_cq = NULL;
73aa89ed 617
509c07bc 618 if (ch->rx_ring) {
4d73f95f 619 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
620 srp_free_iu(target->srp_host, ch->rx_ring[i]);
621 kfree(ch->rx_ring);
622 ch->rx_ring = NULL;
4d73f95f 623 }
509c07bc 624 if (ch->tx_ring) {
4d73f95f 625 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
626 srp_free_iu(target->srp_host, ch->tx_ring[i]);
627 kfree(ch->tx_ring);
628 ch->tx_ring = NULL;
4d73f95f 629 }
aef9ec39
RD
630}
631
632static void srp_path_rec_completion(int status,
633 struct ib_sa_path_rec *pathrec,
509c07bc 634 void *ch_ptr)
aef9ec39 635{
509c07bc
BVA
636 struct srp_rdma_ch *ch = ch_ptr;
637 struct srp_target_port *target = ch->target;
aef9ec39 638
509c07bc 639 ch->status = status;
aef9ec39 640 if (status)
7aa54bd7
DD
641 shost_printk(KERN_ERR, target->scsi_host,
642 PFX "Got failed path rec status %d\n", status);
aef9ec39 643 else
509c07bc
BVA
644 ch->path = *pathrec;
645 complete(&ch->done);
aef9ec39
RD
646}
647
509c07bc 648static int srp_lookup_path(struct srp_rdma_ch *ch)
aef9ec39 649{
509c07bc 650 struct srp_target_port *target = ch->target;
a702adce
BVA
651 int ret;
652
509c07bc
BVA
653 ch->path.numb_path = 1;
654
655 init_completion(&ch->done);
656
657 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
658 target->srp_host->srp_dev->dev,
659 target->srp_host->port,
660 &ch->path,
661 IB_SA_PATH_REC_SERVICE_ID |
662 IB_SA_PATH_REC_DGID |
663 IB_SA_PATH_REC_SGID |
664 IB_SA_PATH_REC_NUMB_PATH |
665 IB_SA_PATH_REC_PKEY,
666 SRP_PATH_REC_TIMEOUT_MS,
667 GFP_KERNEL,
668 srp_path_rec_completion,
669 ch, &ch->path_query);
670 if (ch->path_query_id < 0)
671 return ch->path_query_id;
672
673 ret = wait_for_completion_interruptible(&ch->done);
a702adce
BVA
674 if (ret < 0)
675 return ret;
aef9ec39 676
509c07bc 677 if (ch->status < 0)
7aa54bd7
DD
678 shost_printk(KERN_WARNING, target->scsi_host,
679 PFX "Path record query failed\n");
aef9ec39 680
509c07bc 681 return ch->status;
aef9ec39
RD
682}
683
d92c0da7 684static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
aef9ec39 685{
509c07bc 686 struct srp_target_port *target = ch->target;
aef9ec39
RD
687 struct {
688 struct ib_cm_req_param param;
689 struct srp_login_req priv;
690 } *req = NULL;
691 int status;
692
693 req = kzalloc(sizeof *req, GFP_KERNEL);
694 if (!req)
695 return -ENOMEM;
696
509c07bc 697 req->param.primary_path = &ch->path;
aef9ec39
RD
698 req->param.alternate_path = NULL;
699 req->param.service_id = target->service_id;
509c07bc
BVA
700 req->param.qp_num = ch->qp->qp_num;
701 req->param.qp_type = ch->qp->qp_type;
aef9ec39
RD
702 req->param.private_data = &req->priv;
703 req->param.private_data_len = sizeof req->priv;
704 req->param.flow_control = 1;
705
706 get_random_bytes(&req->param.starting_psn, 4);
707 req->param.starting_psn &= 0xffffff;
708
709 /*
710 * Pick some arbitrary defaults here; we could make these
711 * module parameters if anyone cared about setting them.
712 */
713 req->param.responder_resources = 4;
714 req->param.remote_cm_response_timeout = 20;
715 req->param.local_cm_response_timeout = 20;
7bb312e4 716 req->param.retry_count = target->tl_retry_count;
aef9ec39
RD
717 req->param.rnr_retry_count = 7;
718 req->param.max_cm_retries = 15;
719
720 req->priv.opcode = SRP_LOGIN_REQ;
721 req->priv.tag = 0;
49248644 722 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
aef9ec39
RD
723 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
724 SRP_BUF_FORMAT_INDIRECT);
d92c0da7
BVA
725 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
726 SRP_MULTICHAN_SINGLE);
0c0450db 727 /*
3cd96564 728 * In the published SRP specification (draft rev. 16a), the
0c0450db
R
729 * port identifier format is 8 bytes of ID extension followed
730 * by 8 bytes of GUID. Older drafts put the two halves in the
731 * opposite order, so that the GUID comes first.
732 *
733 * Targets conforming to these obsolete drafts can be
734 * recognized by the I/O Class they report.
735 */
736 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
737 memcpy(req->priv.initiator_port_id,
747fe000 738 &target->sgid.global.interface_id, 8);
0c0450db 739 memcpy(req->priv.initiator_port_id + 8,
01cb9bcb 740 &target->initiator_ext, 8);
0c0450db
R
741 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
742 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
743 } else {
744 memcpy(req->priv.initiator_port_id,
01cb9bcb
IR
745 &target->initiator_ext, 8);
746 memcpy(req->priv.initiator_port_id + 8,
747fe000 747 &target->sgid.global.interface_id, 8);
0c0450db
R
748 memcpy(req->priv.target_port_id, &target->id_ext, 8);
749 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
750 }
751
aef9ec39
RD
752 /*
753 * Topspin/Cisco SRP targets will reject our login unless we
01cb9bcb
IR
754 * zero out the first 8 bytes of our initiator port ID and set
755 * the second 8 bytes to the local node GUID.
aef9ec39 756 */
5d7cbfd6 757 if (srp_target_is_topspin(target)) {
7aa54bd7
DD
758 shost_printk(KERN_DEBUG, target->scsi_host,
759 PFX "Topspin/Cisco initiator port ID workaround "
760 "activated for target GUID %016llx\n",
45c37cad 761 be64_to_cpu(target->ioc_guid));
aef9ec39 762 memset(req->priv.initiator_port_id, 0, 8);
01cb9bcb 763 memcpy(req->priv.initiator_port_id + 8,
05321937 764 &target->srp_host->srp_dev->dev->node_guid, 8);
aef9ec39 765 }
aef9ec39 766
509c07bc 767 status = ib_send_cm_req(ch->cm_id, &req->param);
aef9ec39
RD
768
769 kfree(req);
770
771 return status;
772}
773
ef6c49d8
BVA
774static bool srp_queue_remove_work(struct srp_target_port *target)
775{
776 bool changed = false;
777
778 spin_lock_irq(&target->lock);
779 if (target->state != SRP_TARGET_REMOVED) {
780 target->state = SRP_TARGET_REMOVED;
781 changed = true;
782 }
783 spin_unlock_irq(&target->lock);
784
785 if (changed)
bcc05910 786 queue_work(srp_remove_wq, &target->remove_work);
ef6c49d8
BVA
787
788 return changed;
789}
790
aef9ec39
RD
791static void srp_disconnect_target(struct srp_target_port *target)
792{
d92c0da7
BVA
793 struct srp_rdma_ch *ch;
794 int i;
509c07bc 795
c014c8cd 796 /* XXX should send SRP_I_LOGOUT request */
aef9ec39 797
c014c8cd
BVA
798 for (i = 0; i < target->ch_count; i++) {
799 ch = &target->ch[i];
800 ch->connected = false;
801 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
802 shost_printk(KERN_DEBUG, target->scsi_host,
803 PFX "Sending CM DREQ failed\n");
294c875a 804 }
e6581056 805 }
aef9ec39
RD
806}
807
509c07bc
BVA
808static void srp_free_req_data(struct srp_target_port *target,
809 struct srp_rdma_ch *ch)
8f26c9ff 810{
5cfb1782
BVA
811 struct srp_device *dev = target->srp_host->srp_dev;
812 struct ib_device *ibdev = dev->dev;
8f26c9ff
DD
813 struct srp_request *req;
814 int i;
815
47513cf4 816 if (!ch->req_ring)
4d73f95f
BVA
817 return;
818
819 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 820 req = &ch->req_ring[i];
9a21be53 821 if (dev->use_fast_reg) {
5cfb1782 822 kfree(req->fr_list);
9a21be53 823 } else {
5cfb1782 824 kfree(req->fmr_list);
9a21be53
SG
825 kfree(req->map_page);
826 }
c07d424d
DD
827 if (req->indirect_dma_addr) {
828 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
829 target->indirect_size,
830 DMA_TO_DEVICE);
831 }
832 kfree(req->indirect_desc);
8f26c9ff 833 }
4d73f95f 834
509c07bc
BVA
835 kfree(ch->req_ring);
836 ch->req_ring = NULL;
8f26c9ff
DD
837}
838
509c07bc 839static int srp_alloc_req_data(struct srp_rdma_ch *ch)
b81d00bd 840{
509c07bc 841 struct srp_target_port *target = ch->target;
b81d00bd
BVA
842 struct srp_device *srp_dev = target->srp_host->srp_dev;
843 struct ib_device *ibdev = srp_dev->dev;
844 struct srp_request *req;
5cfb1782 845 void *mr_list;
b81d00bd
BVA
846 dma_addr_t dma_addr;
847 int i, ret = -ENOMEM;
848
509c07bc
BVA
849 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
850 GFP_KERNEL);
851 if (!ch->req_ring)
4d73f95f
BVA
852 goto out;
853
854 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 855 req = &ch->req_ring[i];
509c5f33 856 mr_list = kmalloc(target->mr_per_cmd * sizeof(void *),
5cfb1782
BVA
857 GFP_KERNEL);
858 if (!mr_list)
859 goto out;
9a21be53 860 if (srp_dev->use_fast_reg) {
5cfb1782 861 req->fr_list = mr_list;
9a21be53 862 } else {
5cfb1782 863 req->fmr_list = mr_list;
9a21be53
SG
864 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
865 sizeof(void *), GFP_KERNEL);
866 if (!req->map_page)
867 goto out;
868 }
b81d00bd 869 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
5cfb1782 870 if (!req->indirect_desc)
b81d00bd
BVA
871 goto out;
872
873 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
874 target->indirect_size,
875 DMA_TO_DEVICE);
876 if (ib_dma_mapping_error(ibdev, dma_addr))
877 goto out;
878
879 req->indirect_dma_addr = dma_addr;
b81d00bd
BVA
880 }
881 ret = 0;
882
883out:
884 return ret;
885}
886
683b159a
BVA
887/**
888 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
889 * @shost: SCSI host whose attributes to remove from sysfs.
890 *
891 * Note: Any attributes defined in the host template and that did not exist
892 * before invocation of this function will be ignored.
893 */
894static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
895{
896 struct device_attribute **attr;
897
898 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
899 device_remove_file(&shost->shost_dev, *attr);
900}
901
ee12d6a8
BVA
902static void srp_remove_target(struct srp_target_port *target)
903{
d92c0da7
BVA
904 struct srp_rdma_ch *ch;
905 int i;
509c07bc 906
ef6c49d8
BVA
907 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
908
ee12d6a8 909 srp_del_scsi_host_attr(target->scsi_host);
9dd69a60 910 srp_rport_get(target->rport);
ee12d6a8
BVA
911 srp_remove_host(target->scsi_host);
912 scsi_remove_host(target->scsi_host);
93079162 913 srp_stop_rport_timers(target->rport);
ef6c49d8 914 srp_disconnect_target(target);
d92c0da7
BVA
915 for (i = 0; i < target->ch_count; i++) {
916 ch = &target->ch[i];
917 srp_free_ch_ib(target, ch);
918 }
c1120f89 919 cancel_work_sync(&target->tl_err_work);
9dd69a60 920 srp_rport_put(target->rport);
d92c0da7
BVA
921 for (i = 0; i < target->ch_count; i++) {
922 ch = &target->ch[i];
923 srp_free_req_data(target, ch);
924 }
925 kfree(target->ch);
926 target->ch = NULL;
65d7dd2f
VP
927
928 spin_lock(&target->srp_host->target_lock);
929 list_del(&target->list);
930 spin_unlock(&target->srp_host->target_lock);
931
ee12d6a8
BVA
932 scsi_host_put(target->scsi_host);
933}
934
c4028958 935static void srp_remove_work(struct work_struct *work)
aef9ec39 936{
c4028958 937 struct srp_target_port *target =
ef6c49d8 938 container_of(work, struct srp_target_port, remove_work);
aef9ec39 939
ef6c49d8 940 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
aef9ec39 941
96fc248a 942 srp_remove_target(target);
aef9ec39
RD
943}
944
dc1bdbd9
BVA
945static void srp_rport_delete(struct srp_rport *rport)
946{
947 struct srp_target_port *target = rport->lld_data;
948
949 srp_queue_remove_work(target);
950}
951
c014c8cd
BVA
952/**
953 * srp_connected_ch() - number of connected channels
954 * @target: SRP target port.
955 */
956static int srp_connected_ch(struct srp_target_port *target)
957{
958 int i, c = 0;
959
960 for (i = 0; i < target->ch_count; i++)
961 c += target->ch[i].connected;
962
963 return c;
964}
965
d92c0da7 966static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
aef9ec39 967{
509c07bc 968 struct srp_target_port *target = ch->target;
aef9ec39
RD
969 int ret;
970
c014c8cd 971 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
294c875a 972
509c07bc 973 ret = srp_lookup_path(ch);
aef9ec39 974 if (ret)
4d59ad29 975 goto out;
aef9ec39
RD
976
977 while (1) {
509c07bc 978 init_completion(&ch->done);
d92c0da7 979 ret = srp_send_req(ch, multich);
aef9ec39 980 if (ret)
4d59ad29 981 goto out;
509c07bc 982 ret = wait_for_completion_interruptible(&ch->done);
a702adce 983 if (ret < 0)
4d59ad29 984 goto out;
aef9ec39
RD
985
986 /*
987 * The CM event handling code will set status to
988 * SRP_PORT_REDIRECT if we get a port redirect REJ
989 * back, or SRP_DLID_REDIRECT if we get a lid/qp
990 * redirect REJ back.
991 */
4d59ad29
BVA
992 ret = ch->status;
993 switch (ret) {
aef9ec39 994 case 0:
c014c8cd 995 ch->connected = true;
4d59ad29 996 goto out;
aef9ec39
RD
997
998 case SRP_PORT_REDIRECT:
509c07bc 999 ret = srp_lookup_path(ch);
aef9ec39 1000 if (ret)
4d59ad29 1001 goto out;
aef9ec39
RD
1002 break;
1003
1004 case SRP_DLID_REDIRECT:
1005 break;
1006
9fe4bcf4 1007 case SRP_STALE_CONN:
9fe4bcf4 1008 shost_printk(KERN_ERR, target->scsi_host, PFX
205619f2 1009 "giving up on stale connection\n");
4d59ad29
BVA
1010 ret = -ECONNRESET;
1011 goto out;
9fe4bcf4 1012
aef9ec39 1013 default:
4d59ad29 1014 goto out;
aef9ec39
RD
1015 }
1016 }
4d59ad29
BVA
1017
1018out:
1019 return ret <= 0 ? ret : -ENODEV;
aef9ec39
RD
1020}
1021
1dc7b1f1
CH
1022static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1023{
1024 srp_handle_qp_err(cq, wc, "INV RKEY");
1025}
1026
1027static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1028 u32 rkey)
5cfb1782
BVA
1029{
1030 struct ib_send_wr *bad_wr;
1031 struct ib_send_wr wr = {
1032 .opcode = IB_WR_LOCAL_INV,
5cfb1782
BVA
1033 .next = NULL,
1034 .num_sge = 0,
1035 .send_flags = 0,
1036 .ex.invalidate_rkey = rkey,
1037 };
1038
1dc7b1f1
CH
1039 wr.wr_cqe = &req->reg_cqe;
1040 req->reg_cqe.done = srp_inv_rkey_err_done;
509c07bc 1041 return ib_post_send(ch->qp, &wr, &bad_wr);
5cfb1782
BVA
1042}
1043
d945e1df 1044static void srp_unmap_data(struct scsi_cmnd *scmnd,
509c07bc 1045 struct srp_rdma_ch *ch,
d945e1df
RD
1046 struct srp_request *req)
1047{
509c07bc 1048 struct srp_target_port *target = ch->target;
5cfb1782
BVA
1049 struct srp_device *dev = target->srp_host->srp_dev;
1050 struct ib_device *ibdev = dev->dev;
1051 int i, res;
8f26c9ff 1052
bb350d1d 1053 if (!scsi_sglist(scmnd) ||
d945e1df
RD
1054 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1055 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1056 return;
1057
5cfb1782
BVA
1058 if (dev->use_fast_reg) {
1059 struct srp_fr_desc **pfr;
1060
1061 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1dc7b1f1 1062 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
5cfb1782
BVA
1063 if (res < 0) {
1064 shost_printk(KERN_ERR, target->scsi_host, PFX
1065 "Queueing INV WR for rkey %#x failed (%d)\n",
1066 (*pfr)->mr->rkey, res);
1067 queue_work(system_long_wq,
1068 &target->tl_err_work);
1069 }
1070 }
1071 if (req->nmdesc)
509c07bc 1072 srp_fr_pool_put(ch->fr_pool, req->fr_list,
5cfb1782 1073 req->nmdesc);
002f1567 1074 } else if (dev->use_fmr) {
5cfb1782
BVA
1075 struct ib_pool_fmr **pfmr;
1076
1077 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1078 ib_fmr_pool_unmap(*pfmr);
1079 }
f5358a17 1080
8f26c9ff
DD
1081 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1082 scmnd->sc_data_direction);
d945e1df
RD
1083}
1084
22032991
BVA
1085/**
1086 * srp_claim_req - Take ownership of the scmnd associated with a request.
509c07bc 1087 * @ch: SRP RDMA channel.
22032991 1088 * @req: SRP request.
b3fe628d 1089 * @sdev: If not NULL, only take ownership for this SCSI device.
22032991
BVA
1090 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1091 * ownership of @req->scmnd if it equals @scmnd.
1092 *
1093 * Return value:
1094 * Either NULL or a pointer to the SCSI command the caller became owner of.
1095 */
509c07bc 1096static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
22032991 1097 struct srp_request *req,
b3fe628d 1098 struct scsi_device *sdev,
22032991
BVA
1099 struct scsi_cmnd *scmnd)
1100{
1101 unsigned long flags;
1102
509c07bc 1103 spin_lock_irqsave(&ch->lock, flags);
b3fe628d
BVA
1104 if (req->scmnd &&
1105 (!sdev || req->scmnd->device == sdev) &&
1106 (!scmnd || req->scmnd == scmnd)) {
22032991
BVA
1107 scmnd = req->scmnd;
1108 req->scmnd = NULL;
22032991
BVA
1109 } else {
1110 scmnd = NULL;
1111 }
509c07bc 1112 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1113
1114 return scmnd;
1115}
1116
1117/**
6ec2ba02 1118 * srp_free_req() - Unmap data and adjust ch->req_lim.
509c07bc 1119 * @ch: SRP RDMA channel.
af24663b
BVA
1120 * @req: Request to be freed.
1121 * @scmnd: SCSI command associated with @req.
1122 * @req_lim_delta: Amount to be added to @target->req_lim.
22032991 1123 */
509c07bc
BVA
1124static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1125 struct scsi_cmnd *scmnd, s32 req_lim_delta)
526b4caa 1126{
94a9174c
BVA
1127 unsigned long flags;
1128
509c07bc 1129 srp_unmap_data(scmnd, ch, req);
22032991 1130
509c07bc
BVA
1131 spin_lock_irqsave(&ch->lock, flags);
1132 ch->req_lim += req_lim_delta;
509c07bc 1133 spin_unlock_irqrestore(&ch->lock, flags);
526b4caa
IR
1134}
1135
509c07bc
BVA
1136static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1137 struct scsi_device *sdev, int result)
526b4caa 1138{
509c07bc 1139 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
22032991
BVA
1140
1141 if (scmnd) {
509c07bc 1142 srp_free_req(ch, req, scmnd, 0);
ed9b2264 1143 scmnd->result = result;
22032991 1144 scmnd->scsi_done(scmnd);
22032991 1145 }
526b4caa
IR
1146}
1147
ed9b2264 1148static void srp_terminate_io(struct srp_rport *rport)
aef9ec39 1149{
ed9b2264 1150 struct srp_target_port *target = rport->lld_data;
d92c0da7 1151 struct srp_rdma_ch *ch;
b3fe628d
BVA
1152 struct Scsi_Host *shost = target->scsi_host;
1153 struct scsi_device *sdev;
d92c0da7 1154 int i, j;
ed9b2264 1155
b3fe628d
BVA
1156 /*
1157 * Invoking srp_terminate_io() while srp_queuecommand() is running
1158 * is not safe. Hence the warning statement below.
1159 */
1160 shost_for_each_device(sdev, shost)
1161 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1162
d92c0da7
BVA
1163 for (i = 0; i < target->ch_count; i++) {
1164 ch = &target->ch[i];
509c07bc 1165
d92c0da7
BVA
1166 for (j = 0; j < target->req_ring_size; ++j) {
1167 struct srp_request *req = &ch->req_ring[j];
1168
1169 srp_finish_req(ch, req, NULL,
1170 DID_TRANSPORT_FAILFAST << 16);
1171 }
ed9b2264
BVA
1172 }
1173}
aef9ec39 1174
ed9b2264
BVA
1175/*
1176 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1177 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1178 * srp_reset_device() or srp_reset_host() calls will occur while this function
1179 * is in progress. One way to realize that is not to call this function
1180 * directly but to call srp_reconnect_rport() instead since that last function
1181 * serializes calls of this function via rport->mutex and also blocks
1182 * srp_queuecommand() calls before invoking this function.
1183 */
1184static int srp_rport_reconnect(struct srp_rport *rport)
1185{
1186 struct srp_target_port *target = rport->lld_data;
d92c0da7
BVA
1187 struct srp_rdma_ch *ch;
1188 int i, j, ret = 0;
1189 bool multich = false;
09be70a2 1190
aef9ec39 1191 srp_disconnect_target(target);
34aa654e
BVA
1192
1193 if (target->state == SRP_TARGET_SCANNING)
1194 return -ENODEV;
1195
aef9ec39 1196 /*
c7c4e7ff
BVA
1197 * Now get a new local CM ID so that we avoid confusing the target in
1198 * case things are really fouled up. Doing so also ensures that all CM
1199 * callbacks will have finished before a new QP is allocated.
aef9ec39 1200 */
d92c0da7
BVA
1201 for (i = 0; i < target->ch_count; i++) {
1202 ch = &target->ch[i];
d92c0da7 1203 ret += srp_new_cm_id(ch);
536ae14e 1204 }
d92c0da7
BVA
1205 for (i = 0; i < target->ch_count; i++) {
1206 ch = &target->ch[i];
d92c0da7
BVA
1207 for (j = 0; j < target->req_ring_size; ++j) {
1208 struct srp_request *req = &ch->req_ring[j];
aef9ec39 1209
d92c0da7
BVA
1210 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1211 }
1212 }
1213 for (i = 0; i < target->ch_count; i++) {
1214 ch = &target->ch[i];
d92c0da7
BVA
1215 /*
1216 * Whether or not creating a new CM ID succeeded, create a new
1217 * QP. This guarantees that all completion callback function
1218 * invocations have finished before request resetting starts.
1219 */
1220 ret += srp_create_ch_ib(ch);
aef9ec39 1221
d92c0da7
BVA
1222 INIT_LIST_HEAD(&ch->free_tx);
1223 for (j = 0; j < target->queue_size; ++j)
1224 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1225 }
8de9fe3a
BVA
1226
1227 target->qp_in_error = false;
1228
d92c0da7
BVA
1229 for (i = 0; i < target->ch_count; i++) {
1230 ch = &target->ch[i];
bbac5ccf 1231 if (ret)
d92c0da7 1232 break;
d92c0da7
BVA
1233 ret = srp_connect_ch(ch, multich);
1234 multich = true;
1235 }
09be70a2 1236
ed9b2264
BVA
1237 if (ret == 0)
1238 shost_printk(KERN_INFO, target->scsi_host,
1239 PFX "reconnect succeeded\n");
aef9ec39
RD
1240
1241 return ret;
1242}
1243
8f26c9ff
DD
1244static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1245 unsigned int dma_len, u32 rkey)
f5358a17 1246{
8f26c9ff 1247 struct srp_direct_buf *desc = state->desc;
f5358a17 1248
3ae95da8
BVA
1249 WARN_ON_ONCE(!dma_len);
1250
8f26c9ff
DD
1251 desc->va = cpu_to_be64(dma_addr);
1252 desc->key = cpu_to_be32(rkey);
1253 desc->len = cpu_to_be32(dma_len);
f5358a17 1254
8f26c9ff
DD
1255 state->total_len += dma_len;
1256 state->desc++;
1257 state->ndesc++;
1258}
559ce8f1 1259
8f26c9ff 1260static int srp_map_finish_fmr(struct srp_map_state *state,
509c07bc 1261 struct srp_rdma_ch *ch)
8f26c9ff 1262{
186fbc66
BVA
1263 struct srp_target_port *target = ch->target;
1264 struct srp_device *dev = target->srp_host->srp_dev;
8f26c9ff
DD
1265 struct ib_pool_fmr *fmr;
1266 u64 io_addr = 0;
85507bcc 1267
f731ed62
BVA
1268 if (state->fmr.next >= state->fmr.end)
1269 return -ENOMEM;
1270
26630e8a
SG
1271 WARN_ON_ONCE(!dev->use_fmr);
1272
1273 if (state->npages == 0)
1274 return 0;
1275
1276 if (state->npages == 1 && target->global_mr) {
1277 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1278 target->global_mr->rkey);
1279 goto reset_state;
1280 }
1281
509c07bc 1282 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
8f26c9ff
DD
1283 state->npages, io_addr);
1284 if (IS_ERR(fmr))
1285 return PTR_ERR(fmr);
f5358a17 1286
f731ed62 1287 *state->fmr.next++ = fmr;
52ede08f 1288 state->nmdesc++;
f5358a17 1289
186fbc66
BVA
1290 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1291 state->dma_len, fmr->fmr->rkey);
539dde6f 1292
26630e8a
SG
1293reset_state:
1294 state->npages = 0;
1295 state->dma_len = 0;
1296
8f26c9ff
DD
1297 return 0;
1298}
1299
1dc7b1f1
CH
1300static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1301{
1302 srp_handle_qp_err(cq, wc, "FAST REG");
1303}
1304
509c5f33
BVA
1305/*
1306 * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1307 * where to start in the first element. If sg_offset_p != NULL then
1308 * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1309 * byte that has not yet been mapped.
1310 */
5cfb1782 1311static int srp_map_finish_fr(struct srp_map_state *state,
1dc7b1f1 1312 struct srp_request *req,
509c5f33
BVA
1313 struct srp_rdma_ch *ch, int sg_nents,
1314 unsigned int *sg_offset_p)
5cfb1782 1315{
509c07bc 1316 struct srp_target_port *target = ch->target;
5cfb1782
BVA
1317 struct srp_device *dev = target->srp_host->srp_dev;
1318 struct ib_send_wr *bad_wr;
f7f7aab1 1319 struct ib_reg_wr wr;
5cfb1782
BVA
1320 struct srp_fr_desc *desc;
1321 u32 rkey;
f7f7aab1 1322 int n, err;
5cfb1782 1323
f731ed62
BVA
1324 if (state->fr.next >= state->fr.end)
1325 return -ENOMEM;
1326
26630e8a
SG
1327 WARN_ON_ONCE(!dev->use_fast_reg);
1328
57b0be9c 1329 if (sg_nents == 1 && target->global_mr) {
509c5f33
BVA
1330 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1331
1332 srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1333 sg_dma_len(state->sg) - sg_offset,
26630e8a 1334 target->global_mr->rkey);
509c5f33
BVA
1335 if (sg_offset_p)
1336 *sg_offset_p = 0;
f7f7aab1 1337 return 1;
26630e8a
SG
1338 }
1339
509c07bc 1340 desc = srp_fr_pool_get(ch->fr_pool);
5cfb1782
BVA
1341 if (!desc)
1342 return -ENOMEM;
1343
1344 rkey = ib_inc_rkey(desc->mr->rkey);
1345 ib_update_fast_reg_key(desc->mr, rkey);
1346
509c5f33
BVA
1347 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1348 dev->mr_page_size);
9d8e7d0d
BVA
1349 if (unlikely(n < 0)) {
1350 srp_fr_pool_put(ch->fr_pool, &desc, 1);
509c5f33 1351 pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
9d8e7d0d 1352 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
509c5f33 1353 sg_offset_p ? *sg_offset_p : -1, n);
f7f7aab1 1354 return n;
9d8e7d0d 1355 }
5cfb1782 1356
509c5f33 1357 WARN_ON_ONCE(desc->mr->length == 0);
5cfb1782 1358
1dc7b1f1
CH
1359 req->reg_cqe.done = srp_reg_mr_err_done;
1360
f7f7aab1
SG
1361 wr.wr.next = NULL;
1362 wr.wr.opcode = IB_WR_REG_MR;
1dc7b1f1 1363 wr.wr.wr_cqe = &req->reg_cqe;
f7f7aab1
SG
1364 wr.wr.num_sge = 0;
1365 wr.wr.send_flags = 0;
1366 wr.mr = desc->mr;
1367 wr.key = desc->mr->rkey;
1368 wr.access = (IB_ACCESS_LOCAL_WRITE |
1369 IB_ACCESS_REMOTE_READ |
1370 IB_ACCESS_REMOTE_WRITE);
5cfb1782 1371
f731ed62 1372 *state->fr.next++ = desc;
5cfb1782
BVA
1373 state->nmdesc++;
1374
f7f7aab1
SG
1375 srp_map_desc(state, desc->mr->iova,
1376 desc->mr->length, desc->mr->rkey);
5cfb1782 1377
26630e8a 1378 err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
509c5f33
BVA
1379 if (unlikely(err)) {
1380 WARN_ON_ONCE(err == -ENOMEM);
26630e8a 1381 return err;
509c5f33 1382 }
26630e8a 1383
f7f7aab1 1384 return n;
5cfb1782
BVA
1385}
1386
8f26c9ff 1387static int srp_map_sg_entry(struct srp_map_state *state,
509c07bc 1388 struct srp_rdma_ch *ch,
3ae95da8 1389 struct scatterlist *sg, int sg_index)
8f26c9ff 1390{
509c07bc 1391 struct srp_target_port *target = ch->target;
8f26c9ff
DD
1392 struct srp_device *dev = target->srp_host->srp_dev;
1393 struct ib_device *ibdev = dev->dev;
1394 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1395 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
3ae95da8 1396 unsigned int len = 0;
8f26c9ff
DD
1397 int ret;
1398
3ae95da8 1399 WARN_ON_ONCE(!dma_len);
f5358a17 1400
8f26c9ff 1401 while (dma_len) {
5cfb1782
BVA
1402 unsigned offset = dma_addr & ~dev->mr_page_mask;
1403 if (state->npages == dev->max_pages_per_mr || offset != 0) {
f7f7aab1 1404 ret = srp_map_finish_fmr(state, ch);
8f26c9ff
DD
1405 if (ret)
1406 return ret;
8f26c9ff
DD
1407 }
1408
5cfb1782 1409 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
f5358a17 1410
8f26c9ff
DD
1411 if (!state->npages)
1412 state->base_dma_addr = dma_addr;
5cfb1782 1413 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
52ede08f 1414 state->dma_len += len;
8f26c9ff
DD
1415 dma_addr += len;
1416 dma_len -= len;
1417 }
1418
5cfb1782
BVA
1419 /*
1420 * If the last entry of the MR wasn't a full page, then we need to
8f26c9ff 1421 * close it out and start a new one -- we can only merge at page
1d3d98c4 1422 * boundaries.
8f26c9ff
DD
1423 */
1424 ret = 0;
0e0d3a48 1425 if (len != dev->mr_page_size)
f7f7aab1 1426 ret = srp_map_finish_fmr(state, ch);
f5358a17
RD
1427 return ret;
1428}
1429
26630e8a
SG
1430static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1431 struct srp_request *req, struct scatterlist *scat,
1432 int count)
76bc1e1d 1433{
76bc1e1d 1434 struct scatterlist *sg;
0e0d3a48 1435 int i, ret;
76bc1e1d 1436
26630e8a
SG
1437 state->pages = req->map_page;
1438 state->fmr.next = req->fmr_list;
509c5f33 1439 state->fmr.end = req->fmr_list + ch->target->mr_per_cmd;
26630e8a
SG
1440
1441 for_each_sg(scat, sg, count, i) {
1442 ret = srp_map_sg_entry(state, ch, sg, i);
1443 if (ret)
1444 return ret;
5cfb1782 1445 }
76bc1e1d 1446
f7f7aab1 1447 ret = srp_map_finish_fmr(state, ch);
26630e8a
SG
1448 if (ret)
1449 return ret;
1450
26630e8a
SG
1451 return 0;
1452}
1453
1454static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1455 struct srp_request *req, struct scatterlist *scat,
1456 int count)
1457{
509c5f33
BVA
1458 unsigned int sg_offset = 0;
1459
26630e8a 1460 state->desc = req->indirect_desc;
f7f7aab1 1461 state->fr.next = req->fr_list;
509c5f33 1462 state->fr.end = req->fr_list + ch->target->mr_per_cmd;
f7f7aab1 1463 state->sg = scat;
26630e8a 1464
3b59b7a6
BVA
1465 if (count == 0)
1466 return 0;
1467
57b0be9c 1468 while (count) {
f7f7aab1 1469 int i, n;
26630e8a 1470
509c5f33 1471 n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
f7f7aab1
SG
1472 if (unlikely(n < 0))
1473 return n;
1474
57b0be9c 1475 count -= n;
f7f7aab1
SG
1476 for (i = 0; i < n; i++)
1477 state->sg = sg_next(state->sg);
1478 }
26630e8a 1479
26630e8a
SG
1480 return 0;
1481}
1482
1483static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1484 struct srp_request *req, struct scatterlist *scat,
1485 int count)
1486{
1487 struct srp_target_port *target = ch->target;
1488 struct srp_device *dev = target->srp_host->srp_dev;
1489 struct scatterlist *sg;
1490 int i;
1491
1492 state->desc = req->indirect_desc;
1493 for_each_sg(scat, sg, count, i) {
1494 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1495 ib_sg_dma_len(dev->dev, sg),
1496 target->global_mr->rkey);
0e0d3a48 1497 }
76bc1e1d 1498
26630e8a 1499 return 0;
76bc1e1d
BVA
1500}
1501
330179f2
BVA
1502/*
1503 * Register the indirect data buffer descriptor with the HCA.
1504 *
1505 * Note: since the indirect data buffer descriptor has been allocated with
1506 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1507 * memory buffer.
1508 */
1509static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1510 void **next_mr, void **end_mr, u32 idb_len,
1511 __be32 *idb_rkey)
1512{
1513 struct srp_target_port *target = ch->target;
1514 struct srp_device *dev = target->srp_host->srp_dev;
1515 struct srp_map_state state;
1516 struct srp_direct_buf idb_desc;
1517 u64 idb_pages[1];
f7f7aab1 1518 struct scatterlist idb_sg[1];
330179f2
BVA
1519 int ret;
1520
1521 memset(&state, 0, sizeof(state));
1522 memset(&idb_desc, 0, sizeof(idb_desc));
1523 state.gen.next = next_mr;
1524 state.gen.end = end_mr;
1525 state.desc = &idb_desc;
330179f2
BVA
1526 state.base_dma_addr = req->indirect_dma_addr;
1527 state.dma_len = idb_len;
f7f7aab1
SG
1528
1529 if (dev->use_fast_reg) {
1530 state.sg = idb_sg;
54f5c9c5 1531 sg_init_one(idb_sg, req->indirect_desc, idb_len);
f7f7aab1 1532 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
fc925518
CH
1533#ifdef CONFIG_NEED_SG_DMA_LENGTH
1534 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1535#endif
509c5f33 1536 ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
f7f7aab1
SG
1537 if (ret < 0)
1538 return ret;
509c5f33 1539 WARN_ON_ONCE(ret < 1);
f7f7aab1
SG
1540 } else if (dev->use_fmr) {
1541 state.pages = idb_pages;
1542 state.pages[0] = (req->indirect_dma_addr &
1543 dev->mr_page_mask);
1544 state.npages = 1;
1545 ret = srp_map_finish_fmr(&state, ch);
1546 if (ret < 0)
1547 return ret;
1548 } else {
1549 return -EINVAL;
1550 }
330179f2
BVA
1551
1552 *idb_rkey = idb_desc.key;
1553
f7f7aab1 1554 return 0;
330179f2
BVA
1555}
1556
509c5f33
BVA
1557#if defined(DYNAMIC_DATA_DEBUG)
1558static void srp_check_mapping(struct srp_map_state *state,
1559 struct srp_rdma_ch *ch, struct srp_request *req,
1560 struct scatterlist *scat, int count)
1561{
1562 struct srp_device *dev = ch->target->srp_host->srp_dev;
1563 struct srp_fr_desc **pfr;
1564 u64 desc_len = 0, mr_len = 0;
1565 int i;
1566
1567 for (i = 0; i < state->ndesc; i++)
1568 desc_len += be32_to_cpu(req->indirect_desc[i].len);
1569 if (dev->use_fast_reg)
1570 for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1571 mr_len += (*pfr)->mr->length;
1572 else if (dev->use_fmr)
1573 for (i = 0; i < state->nmdesc; i++)
1574 mr_len += be32_to_cpu(req->indirect_desc[i].len);
1575 if (desc_len != scsi_bufflen(req->scmnd) ||
1576 mr_len > scsi_bufflen(req->scmnd))
1577 pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1578 scsi_bufflen(req->scmnd), desc_len, mr_len,
1579 state->ndesc, state->nmdesc);
1580}
1581#endif
1582
77269cdf
BVA
1583/**
1584 * srp_map_data() - map SCSI data buffer onto an SRP request
1585 * @scmnd: SCSI command to map
1586 * @ch: SRP RDMA channel
1587 * @req: SRP request
1588 *
1589 * Returns the length in bytes of the SRP_CMD IU or a negative value if
1590 * mapping failed.
1591 */
509c07bc 1592static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
aef9ec39
RD
1593 struct srp_request *req)
1594{
509c07bc 1595 struct srp_target_port *target = ch->target;
76bc1e1d 1596 struct scatterlist *scat;
aef9ec39 1597 struct srp_cmd *cmd = req->cmd->buf;
330179f2 1598 int len, nents, count, ret;
85507bcc
RC
1599 struct srp_device *dev;
1600 struct ib_device *ibdev;
8f26c9ff
DD
1601 struct srp_map_state state;
1602 struct srp_indirect_buf *indirect_hdr;
330179f2
BVA
1603 u32 idb_len, table_len;
1604 __be32 idb_rkey;
8f26c9ff 1605 u8 fmt;
aef9ec39 1606
bb350d1d 1607 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
aef9ec39
RD
1608 return sizeof (struct srp_cmd);
1609
1610 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1611 scmnd->sc_data_direction != DMA_TO_DEVICE) {
7aa54bd7
DD
1612 shost_printk(KERN_WARNING, target->scsi_host,
1613 PFX "Unhandled data direction %d\n",
1614 scmnd->sc_data_direction);
aef9ec39
RD
1615 return -EINVAL;
1616 }
1617
bb350d1d
FT
1618 nents = scsi_sg_count(scmnd);
1619 scat = scsi_sglist(scmnd);
aef9ec39 1620
05321937 1621 dev = target->srp_host->srp_dev;
85507bcc
RC
1622 ibdev = dev->dev;
1623
1624 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
8f26c9ff
DD
1625 if (unlikely(count == 0))
1626 return -EIO;
f5358a17
RD
1627
1628 fmt = SRP_DATA_DESC_DIRECT;
1629 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
aef9ec39 1630
03f6fb93 1631 if (count == 1 && target->global_mr) {
f5358a17
RD
1632 /*
1633 * The midlayer only generated a single gather/scatter
1634 * entry, or DMA mapping coalesced everything to a
1635 * single entry. So a direct descriptor along with
1636 * the DMA MR suffices.
1637 */
cf368713 1638 struct srp_direct_buf *buf = (void *) cmd->add_data;
aef9ec39 1639
85507bcc 1640 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
03f6fb93 1641 buf->key = cpu_to_be32(target->global_mr->rkey);
85507bcc 1642 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
8f26c9ff 1643
52ede08f 1644 req->nmdesc = 0;
8f26c9ff
DD
1645 goto map_complete;
1646 }
1647
5cfb1782
BVA
1648 /*
1649 * We have more than one scatter/gather entry, so build our indirect
1650 * descriptor table, trying to merge as many entries as we can.
8f26c9ff
DD
1651 */
1652 indirect_hdr = (void *) cmd->add_data;
1653
c07d424d
DD
1654 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1655 target->indirect_size, DMA_TO_DEVICE);
1656
8f26c9ff 1657 memset(&state, 0, sizeof(state));
26630e8a 1658 if (dev->use_fast_reg)
e012f363 1659 ret = srp_map_sg_fr(&state, ch, req, scat, count);
26630e8a 1660 else if (dev->use_fmr)
e012f363 1661 ret = srp_map_sg_fmr(&state, ch, req, scat, count);
26630e8a 1662 else
e012f363
BVA
1663 ret = srp_map_sg_dma(&state, ch, req, scat, count);
1664 req->nmdesc = state.nmdesc;
1665 if (ret < 0)
1666 goto unmap;
cf368713 1667
509c5f33
BVA
1668#if defined(DYNAMIC_DEBUG)
1669 {
1670 DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1671 "Memory mapping consistency check");
1672 if (unlikely(ddm.flags & _DPRINTK_FLAGS_PRINT))
1673 srp_check_mapping(&state, ch, req, scat, count);
1674 }
1675#endif
cf368713 1676
c07d424d
DD
1677 /* We've mapped the request, now pull as much of the indirect
1678 * descriptor table as we can into the command buffer. If this
1679 * target is not using an external indirect table, we are
1680 * guaranteed to fit into the command, as the SCSI layer won't
1681 * give us more S/G entries than we allow.
8f26c9ff 1682 */
8f26c9ff 1683 if (state.ndesc == 1) {
5cfb1782
BVA
1684 /*
1685 * Memory registration collapsed the sg-list into one entry,
8f26c9ff
DD
1686 * so use a direct descriptor.
1687 */
1688 struct srp_direct_buf *buf = (void *) cmd->add_data;
cf368713 1689
c07d424d 1690 *buf = req->indirect_desc[0];
8f26c9ff 1691 goto map_complete;
aef9ec39
RD
1692 }
1693
c07d424d
DD
1694 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1695 !target->allow_ext_sg)) {
1696 shost_printk(KERN_ERR, target->scsi_host,
1697 "Could not fit S/G list into SRP_CMD\n");
e012f363
BVA
1698 ret = -EIO;
1699 goto unmap;
c07d424d
DD
1700 }
1701
1702 count = min(state.ndesc, target->cmd_sg_cnt);
8f26c9ff 1703 table_len = state.ndesc * sizeof (struct srp_direct_buf);
330179f2 1704 idb_len = sizeof(struct srp_indirect_buf) + table_len;
8f26c9ff
DD
1705
1706 fmt = SRP_DATA_DESC_INDIRECT;
1707 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
c07d424d 1708 len += count * sizeof (struct srp_direct_buf);
8f26c9ff 1709
c07d424d
DD
1710 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1711 count * sizeof (struct srp_direct_buf));
8f26c9ff 1712
03f6fb93 1713 if (!target->global_mr) {
330179f2
BVA
1714 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1715 idb_len, &idb_rkey);
1716 if (ret < 0)
e012f363 1717 goto unmap;
330179f2
BVA
1718 req->nmdesc++;
1719 } else {
a745f4f4 1720 idb_rkey = cpu_to_be32(target->global_mr->rkey);
330179f2
BVA
1721 }
1722
c07d424d 1723 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
330179f2 1724 indirect_hdr->table_desc.key = idb_rkey;
8f26c9ff
DD
1725 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1726 indirect_hdr->len = cpu_to_be32(state.total_len);
1727
1728 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
c07d424d 1729 cmd->data_out_desc_cnt = count;
8f26c9ff 1730 else
c07d424d
DD
1731 cmd->data_in_desc_cnt = count;
1732
1733 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1734 DMA_TO_DEVICE);
8f26c9ff
DD
1735
1736map_complete:
aef9ec39
RD
1737 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1738 cmd->buf_fmt = fmt << 4;
1739 else
1740 cmd->buf_fmt = fmt;
1741
aef9ec39 1742 return len;
e012f363
BVA
1743
1744unmap:
1745 srp_unmap_data(scmnd, ch, req);
ffc548bb
BVA
1746 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1747 ret = -E2BIG;
e012f363 1748 return ret;
aef9ec39
RD
1749}
1750
76c75b25
BVA
1751/*
1752 * Return an IU and possible credit to the free pool
1753 */
509c07bc 1754static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
76c75b25
BVA
1755 enum srp_iu_type iu_type)
1756{
1757 unsigned long flags;
1758
509c07bc
BVA
1759 spin_lock_irqsave(&ch->lock, flags);
1760 list_add(&iu->list, &ch->free_tx);
76c75b25 1761 if (iu_type != SRP_IU_RSP)
509c07bc
BVA
1762 ++ch->req_lim;
1763 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25
BVA
1764}
1765
05a1d750 1766/*
509c07bc 1767 * Must be called with ch->lock held to protect req_lim and free_tx.
e9684678 1768 * If IU is not sent, it must be returned using srp_put_tx_iu().
05a1d750
DD
1769 *
1770 * Note:
1771 * An upper limit for the number of allocated information units for each
1772 * request type is:
1773 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1774 * more than Scsi_Host.can_queue requests.
1775 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1776 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1777 * one unanswered SRP request to an initiator.
1778 */
509c07bc 1779static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
05a1d750
DD
1780 enum srp_iu_type iu_type)
1781{
509c07bc 1782 struct srp_target_port *target = ch->target;
05a1d750
DD
1783 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1784 struct srp_iu *iu;
1785
1dc7b1f1 1786 ib_process_cq_direct(ch->send_cq, -1);
05a1d750 1787
509c07bc 1788 if (list_empty(&ch->free_tx))
05a1d750
DD
1789 return NULL;
1790
1791 /* Initiator responses to target requests do not consume credits */
76c75b25 1792 if (iu_type != SRP_IU_RSP) {
509c07bc 1793 if (ch->req_lim <= rsv) {
76c75b25
BVA
1794 ++target->zero_req_lim;
1795 return NULL;
1796 }
1797
509c07bc 1798 --ch->req_lim;
05a1d750
DD
1799 }
1800
509c07bc 1801 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
76c75b25 1802 list_del(&iu->list);
05a1d750
DD
1803 return iu;
1804}
1805
1dc7b1f1
CH
1806static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1807{
1808 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1809 struct srp_rdma_ch *ch = cq->cq_context;
1810
1811 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1812 srp_handle_qp_err(cq, wc, "SEND");
1813 return;
1814 }
1815
1816 list_add(&iu->list, &ch->free_tx);
1817}
1818
509c07bc 1819static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
05a1d750 1820{
509c07bc 1821 struct srp_target_port *target = ch->target;
05a1d750
DD
1822 struct ib_sge list;
1823 struct ib_send_wr wr, *bad_wr;
05a1d750
DD
1824
1825 list.addr = iu->dma;
1826 list.length = len;
9af76271 1827 list.lkey = target->lkey;
05a1d750 1828
1dc7b1f1
CH
1829 iu->cqe.done = srp_send_done;
1830
05a1d750 1831 wr.next = NULL;
1dc7b1f1 1832 wr.wr_cqe = &iu->cqe;
05a1d750
DD
1833 wr.sg_list = &list;
1834 wr.num_sge = 1;
1835 wr.opcode = IB_WR_SEND;
1836 wr.send_flags = IB_SEND_SIGNALED;
1837
509c07bc 1838 return ib_post_send(ch->qp, &wr, &bad_wr);
05a1d750
DD
1839}
1840
509c07bc 1841static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
c996bb47 1842{
509c07bc 1843 struct srp_target_port *target = ch->target;
c996bb47 1844 struct ib_recv_wr wr, *bad_wr;
dcb4cb85 1845 struct ib_sge list;
c996bb47
BVA
1846
1847 list.addr = iu->dma;
1848 list.length = iu->size;
9af76271 1849 list.lkey = target->lkey;
c996bb47 1850
1dc7b1f1
CH
1851 iu->cqe.done = srp_recv_done;
1852
c996bb47 1853 wr.next = NULL;
1dc7b1f1 1854 wr.wr_cqe = &iu->cqe;
c996bb47
BVA
1855 wr.sg_list = &list;
1856 wr.num_sge = 1;
1857
509c07bc 1858 return ib_post_recv(ch->qp, &wr, &bad_wr);
c996bb47
BVA
1859}
1860
509c07bc 1861static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
aef9ec39 1862{
509c07bc 1863 struct srp_target_port *target = ch->target;
aef9ec39
RD
1864 struct srp_request *req;
1865 struct scsi_cmnd *scmnd;
1866 unsigned long flags;
aef9ec39 1867
aef9ec39 1868 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
509c07bc
BVA
1869 spin_lock_irqsave(&ch->lock, flags);
1870 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1871 spin_unlock_irqrestore(&ch->lock, flags);
94a9174c 1872
509c07bc 1873 ch->tsk_mgmt_status = -1;
f8b6e31e 1874 if (be32_to_cpu(rsp->resp_data_len) >= 4)
509c07bc
BVA
1875 ch->tsk_mgmt_status = rsp->data[3];
1876 complete(&ch->tsk_mgmt_done);
aef9ec39 1877 } else {
77f2c1a4
BVA
1878 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1879 if (scmnd) {
1880 req = (void *)scmnd->host_scribble;
1881 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1882 }
22032991 1883 if (!scmnd) {
7aa54bd7 1884 shost_printk(KERN_ERR, target->scsi_host,
d92c0da7
BVA
1885 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1886 rsp->tag, ch - target->ch, ch->qp->qp_num);
22032991 1887
509c07bc
BVA
1888 spin_lock_irqsave(&ch->lock, flags);
1889 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1890 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1891
1892 return;
1893 }
aef9ec39
RD
1894 scmnd->result = rsp->status;
1895
1896 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1897 memcpy(scmnd->sense_buffer, rsp->data +
1898 be32_to_cpu(rsp->resp_data_len),
1899 min_t(int, be32_to_cpu(rsp->sense_data_len),
1900 SCSI_SENSE_BUFFERSIZE));
1901 }
1902
e714531a 1903 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
bb350d1d 1904 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
e714531a
BVA
1905 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1906 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1907 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1908 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1909 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1910 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
aef9ec39 1911
509c07bc 1912 srp_free_req(ch, req, scmnd,
22032991
BVA
1913 be32_to_cpu(rsp->req_lim_delta));
1914
f8b6e31e
DD
1915 scmnd->host_scribble = NULL;
1916 scmnd->scsi_done(scmnd);
aef9ec39 1917 }
aef9ec39
RD
1918}
1919
509c07bc 1920static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
bb12588a
DD
1921 void *rsp, int len)
1922{
509c07bc 1923 struct srp_target_port *target = ch->target;
76c75b25 1924 struct ib_device *dev = target->srp_host->srp_dev->dev;
bb12588a
DD
1925 unsigned long flags;
1926 struct srp_iu *iu;
76c75b25 1927 int err;
bb12588a 1928
509c07bc
BVA
1929 spin_lock_irqsave(&ch->lock, flags);
1930 ch->req_lim += req_delta;
1931 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1932 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25 1933
bb12588a
DD
1934 if (!iu) {
1935 shost_printk(KERN_ERR, target->scsi_host, PFX
1936 "no IU available to send response\n");
76c75b25 1937 return 1;
bb12588a
DD
1938 }
1939
1940 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1941 memcpy(iu->buf, rsp, len);
1942 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1943
509c07bc 1944 err = srp_post_send(ch, iu, len);
76c75b25 1945 if (err) {
bb12588a
DD
1946 shost_printk(KERN_ERR, target->scsi_host, PFX
1947 "unable to post response: %d\n", err);
509c07bc 1948 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
76c75b25 1949 }
bb12588a 1950
bb12588a
DD
1951 return err;
1952}
1953
509c07bc 1954static void srp_process_cred_req(struct srp_rdma_ch *ch,
bb12588a
DD
1955 struct srp_cred_req *req)
1956{
1957 struct srp_cred_rsp rsp = {
1958 .opcode = SRP_CRED_RSP,
1959 .tag = req->tag,
1960 };
1961 s32 delta = be32_to_cpu(req->req_lim_delta);
1962
509c07bc
BVA
1963 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1964 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
bb12588a
DD
1965 "problems processing SRP_CRED_REQ\n");
1966}
1967
509c07bc 1968static void srp_process_aer_req(struct srp_rdma_ch *ch,
bb12588a
DD
1969 struct srp_aer_req *req)
1970{
509c07bc 1971 struct srp_target_port *target = ch->target;
bb12588a
DD
1972 struct srp_aer_rsp rsp = {
1973 .opcode = SRP_AER_RSP,
1974 .tag = req->tag,
1975 };
1976 s32 delta = be32_to_cpu(req->req_lim_delta);
1977
1978 shost_printk(KERN_ERR, target->scsi_host, PFX
985aa495 1979 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
bb12588a 1980
509c07bc 1981 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
bb12588a
DD
1982 shost_printk(KERN_ERR, target->scsi_host, PFX
1983 "problems processing SRP_AER_REQ\n");
1984}
1985
1dc7b1f1 1986static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
aef9ec39 1987{
1dc7b1f1
CH
1988 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1989 struct srp_rdma_ch *ch = cq->cq_context;
509c07bc 1990 struct srp_target_port *target = ch->target;
dcb4cb85 1991 struct ib_device *dev = target->srp_host->srp_dev->dev;
c996bb47 1992 int res;
aef9ec39
RD
1993 u8 opcode;
1994
1dc7b1f1
CH
1995 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1996 srp_handle_qp_err(cq, wc, "RECV");
1997 return;
1998 }
1999
509c07bc 2000 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 2001 DMA_FROM_DEVICE);
aef9ec39
RD
2002
2003 opcode = *(u8 *) iu->buf;
2004
2005 if (0) {
7aa54bd7
DD
2006 shost_printk(KERN_ERR, target->scsi_host,
2007 PFX "recv completion, opcode 0x%02x\n", opcode);
7a700811
BVA
2008 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2009 iu->buf, wc->byte_len, true);
aef9ec39
RD
2010 }
2011
2012 switch (opcode) {
2013 case SRP_RSP:
509c07bc 2014 srp_process_rsp(ch, iu->buf);
aef9ec39
RD
2015 break;
2016
bb12588a 2017 case SRP_CRED_REQ:
509c07bc 2018 srp_process_cred_req(ch, iu->buf);
bb12588a
DD
2019 break;
2020
2021 case SRP_AER_REQ:
509c07bc 2022 srp_process_aer_req(ch, iu->buf);
bb12588a
DD
2023 break;
2024
aef9ec39
RD
2025 case SRP_T_LOGOUT:
2026 /* XXX Handle target logout */
7aa54bd7
DD
2027 shost_printk(KERN_WARNING, target->scsi_host,
2028 PFX "Got target logout request\n");
aef9ec39
RD
2029 break;
2030
2031 default:
7aa54bd7
DD
2032 shost_printk(KERN_WARNING, target->scsi_host,
2033 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
aef9ec39
RD
2034 break;
2035 }
2036
509c07bc 2037 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 2038 DMA_FROM_DEVICE);
c996bb47 2039
509c07bc 2040 res = srp_post_recv(ch, iu);
c996bb47
BVA
2041 if (res != 0)
2042 shost_printk(KERN_ERR, target->scsi_host,
2043 PFX "Recv failed with error code %d\n", res);
aef9ec39
RD
2044}
2045
c1120f89
BVA
2046/**
2047 * srp_tl_err_work() - handle a transport layer error
af24663b 2048 * @work: Work structure embedded in an SRP target port.
c1120f89
BVA
2049 *
2050 * Note: This function may get invoked before the rport has been created,
2051 * hence the target->rport test.
2052 */
2053static void srp_tl_err_work(struct work_struct *work)
2054{
2055 struct srp_target_port *target;
2056
2057 target = container_of(work, struct srp_target_port, tl_err_work);
2058 if (target->rport)
2059 srp_start_tl_fail_timers(target->rport);
2060}
2061
1dc7b1f1
CH
2062static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2063 const char *opname)
948d1e88 2064{
1dc7b1f1 2065 struct srp_rdma_ch *ch = cq->cq_context;
7dad6b2e
BVA
2066 struct srp_target_port *target = ch->target;
2067
c014c8cd 2068 if (ch->connected && !target->qp_in_error) {
1dc7b1f1
CH
2069 shost_printk(KERN_ERR, target->scsi_host,
2070 PFX "failed %s status %s (%d) for CQE %p\n",
2071 opname, ib_wc_status_msg(wc->status), wc->status,
2072 wc->wr_cqe);
c1120f89 2073 queue_work(system_long_wq, &target->tl_err_work);
4f0af697 2074 }
948d1e88
BVA
2075 target->qp_in_error = true;
2076}
2077
76c75b25 2078static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
aef9ec39 2079{
76c75b25 2080 struct srp_target_port *target = host_to_target(shost);
a95cadb9 2081 struct srp_rport *rport = target->rport;
509c07bc 2082 struct srp_rdma_ch *ch;
aef9ec39
RD
2083 struct srp_request *req;
2084 struct srp_iu *iu;
2085 struct srp_cmd *cmd;
85507bcc 2086 struct ib_device *dev;
76c75b25 2087 unsigned long flags;
77f2c1a4
BVA
2088 u32 tag;
2089 u16 idx;
d1b4289e 2090 int len, ret;
a95cadb9
BVA
2091 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2092
2093 /*
2094 * The SCSI EH thread is the only context from which srp_queuecommand()
2095 * can get invoked for blocked devices (SDEV_BLOCK /
2096 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2097 * locking the rport mutex if invoked from inside the SCSI EH.
2098 */
2099 if (in_scsi_eh)
2100 mutex_lock(&rport->mutex);
aef9ec39 2101
d1b4289e
BVA
2102 scmnd->result = srp_chkready(target->rport);
2103 if (unlikely(scmnd->result))
2104 goto err;
2ce19e72 2105
77f2c1a4
BVA
2106 WARN_ON_ONCE(scmnd->request->tag < 0);
2107 tag = blk_mq_unique_tag(scmnd->request);
d92c0da7 2108 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
77f2c1a4
BVA
2109 idx = blk_mq_unique_tag_to_tag(tag);
2110 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2111 dev_name(&shost->shost_gendev), tag, idx,
2112 target->req_ring_size);
509c07bc
BVA
2113
2114 spin_lock_irqsave(&ch->lock, flags);
2115 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
509c07bc 2116 spin_unlock_irqrestore(&ch->lock, flags);
aef9ec39 2117
77f2c1a4
BVA
2118 if (!iu)
2119 goto err;
2120
2121 req = &ch->req_ring[idx];
05321937 2122 dev = target->srp_host->srp_dev->dev;
49248644 2123 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
85507bcc 2124 DMA_TO_DEVICE);
aef9ec39 2125
f8b6e31e 2126 scmnd->host_scribble = (void *) req;
aef9ec39
RD
2127
2128 cmd = iu->buf;
2129 memset(cmd, 0, sizeof *cmd);
2130
2131 cmd->opcode = SRP_CMD;
985aa495 2132 int_to_scsilun(scmnd->device->lun, &cmd->lun);
77f2c1a4 2133 cmd->tag = tag;
aef9ec39
RD
2134 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2135
aef9ec39
RD
2136 req->scmnd = scmnd;
2137 req->cmd = iu;
aef9ec39 2138
509c07bc 2139 len = srp_map_data(scmnd, ch, req);
aef9ec39 2140 if (len < 0) {
7aa54bd7 2141 shost_printk(KERN_ERR, target->scsi_host,
d1b4289e
BVA
2142 PFX "Failed to map data (%d)\n", len);
2143 /*
2144 * If we ran out of memory descriptors (-ENOMEM) because an
2145 * application is queuing many requests with more than
52ede08f 2146 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
d1b4289e
BVA
2147 * to reduce queue depth temporarily.
2148 */
2149 scmnd->result = len == -ENOMEM ?
2150 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
76c75b25 2151 goto err_iu;
aef9ec39
RD
2152 }
2153
49248644 2154 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
85507bcc 2155 DMA_TO_DEVICE);
aef9ec39 2156
509c07bc 2157 if (srp_post_send(ch, iu, len)) {
7aa54bd7 2158 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
aef9ec39
RD
2159 goto err_unmap;
2160 }
2161
d1b4289e
BVA
2162 ret = 0;
2163
a95cadb9
BVA
2164unlock_rport:
2165 if (in_scsi_eh)
2166 mutex_unlock(&rport->mutex);
2167
d1b4289e 2168 return ret;
aef9ec39
RD
2169
2170err_unmap:
509c07bc 2171 srp_unmap_data(scmnd, ch, req);
aef9ec39 2172
76c75b25 2173err_iu:
509c07bc 2174 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
76c75b25 2175
024ca901
BVA
2176 /*
2177 * Avoid that the loops that iterate over the request ring can
2178 * encounter a dangling SCSI command pointer.
2179 */
2180 req->scmnd = NULL;
2181
d1b4289e
BVA
2182err:
2183 if (scmnd->result) {
2184 scmnd->scsi_done(scmnd);
2185 ret = 0;
2186 } else {
2187 ret = SCSI_MLQUEUE_HOST_BUSY;
2188 }
a95cadb9 2189
d1b4289e 2190 goto unlock_rport;
aef9ec39
RD
2191}
2192
4d73f95f
BVA
2193/*
2194 * Note: the resources allocated in this function are freed in
509c07bc 2195 * srp_free_ch_ib().
4d73f95f 2196 */
509c07bc 2197static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
aef9ec39 2198{
509c07bc 2199 struct srp_target_port *target = ch->target;
aef9ec39
RD
2200 int i;
2201
509c07bc
BVA
2202 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2203 GFP_KERNEL);
2204 if (!ch->rx_ring)
4d73f95f 2205 goto err_no_ring;
509c07bc
BVA
2206 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2207 GFP_KERNEL);
2208 if (!ch->tx_ring)
4d73f95f
BVA
2209 goto err_no_ring;
2210
2211 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2212 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2213 ch->max_ti_iu_len,
2214 GFP_KERNEL, DMA_FROM_DEVICE);
2215 if (!ch->rx_ring[i])
aef9ec39
RD
2216 goto err;
2217 }
2218
4d73f95f 2219 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2220 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2221 target->max_iu_len,
2222 GFP_KERNEL, DMA_TO_DEVICE);
2223 if (!ch->tx_ring[i])
aef9ec39 2224 goto err;
dcb4cb85 2225
509c07bc 2226 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
aef9ec39
RD
2227 }
2228
2229 return 0;
2230
2231err:
4d73f95f 2232 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2233 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2234 srp_free_iu(target->srp_host, ch->tx_ring[i]);
aef9ec39
RD
2235 }
2236
4d73f95f
BVA
2237
2238err_no_ring:
509c07bc
BVA
2239 kfree(ch->tx_ring);
2240 ch->tx_ring = NULL;
2241 kfree(ch->rx_ring);
2242 ch->rx_ring = NULL;
4d73f95f 2243
aef9ec39
RD
2244 return -ENOMEM;
2245}
2246
c9b03c1a
BVA
2247static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2248{
2249 uint64_t T_tr_ns, max_compl_time_ms;
2250 uint32_t rq_tmo_jiffies;
2251
2252 /*
2253 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2254 * table 91), both the QP timeout and the retry count have to be set
2255 * for RC QP's during the RTR to RTS transition.
2256 */
2257 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2258 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2259
2260 /*
2261 * Set target->rq_tmo_jiffies to one second more than the largest time
2262 * it can take before an error completion is generated. See also
2263 * C9-140..142 in the IBTA spec for more information about how to
2264 * convert the QP Local ACK Timeout value to nanoseconds.
2265 */
2266 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2267 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2268 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2269 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2270
2271 return rq_tmo_jiffies;
2272}
2273
961e0be8 2274static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
e6300cbd 2275 const struct srp_login_rsp *lrsp,
509c07bc 2276 struct srp_rdma_ch *ch)
961e0be8 2277{
509c07bc 2278 struct srp_target_port *target = ch->target;
961e0be8
DD
2279 struct ib_qp_attr *qp_attr = NULL;
2280 int attr_mask = 0;
2281 int ret;
2282 int i;
2283
2284 if (lrsp->opcode == SRP_LOGIN_RSP) {
509c07bc
BVA
2285 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2286 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
961e0be8
DD
2287
2288 /*
2289 * Reserve credits for task management so we don't
2290 * bounce requests back to the SCSI mid-layer.
2291 */
2292 target->scsi_host->can_queue
509c07bc 2293 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
961e0be8 2294 target->scsi_host->can_queue);
4d73f95f
BVA
2295 target->scsi_host->cmd_per_lun
2296 = min_t(int, target->scsi_host->can_queue,
2297 target->scsi_host->cmd_per_lun);
961e0be8
DD
2298 } else {
2299 shost_printk(KERN_WARNING, target->scsi_host,
2300 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2301 ret = -ECONNRESET;
2302 goto error;
2303 }
2304
509c07bc
BVA
2305 if (!ch->rx_ring) {
2306 ret = srp_alloc_iu_bufs(ch);
961e0be8
DD
2307 if (ret)
2308 goto error;
2309 }
2310
2311 ret = -ENOMEM;
2312 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2313 if (!qp_attr)
2314 goto error;
2315
2316 qp_attr->qp_state = IB_QPS_RTR;
2317 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2318 if (ret)
2319 goto error_free;
2320
509c07bc 2321 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
961e0be8
DD
2322 if (ret)
2323 goto error_free;
2324
4d73f95f 2325 for (i = 0; i < target->queue_size; i++) {
509c07bc
BVA
2326 struct srp_iu *iu = ch->rx_ring[i];
2327
2328 ret = srp_post_recv(ch, iu);
961e0be8
DD
2329 if (ret)
2330 goto error_free;
2331 }
2332
2333 qp_attr->qp_state = IB_QPS_RTS;
2334 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2335 if (ret)
2336 goto error_free;
2337
c9b03c1a
BVA
2338 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2339
509c07bc 2340 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
961e0be8
DD
2341 if (ret)
2342 goto error_free;
2343
2344 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2345
2346error_free:
2347 kfree(qp_attr);
2348
2349error:
509c07bc 2350 ch->status = ret;
961e0be8
DD
2351}
2352
aef9ec39
RD
2353static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2354 struct ib_cm_event *event,
509c07bc 2355 struct srp_rdma_ch *ch)
aef9ec39 2356{
509c07bc 2357 struct srp_target_port *target = ch->target;
7aa54bd7 2358 struct Scsi_Host *shost = target->scsi_host;
aef9ec39
RD
2359 struct ib_class_port_info *cpi;
2360 int opcode;
2361
2362 switch (event->param.rej_rcvd.reason) {
2363 case IB_CM_REJ_PORT_CM_REDIRECT:
2364 cpi = event->param.rej_rcvd.ari;
509c07bc
BVA
2365 ch->path.dlid = cpi->redirect_lid;
2366 ch->path.pkey = cpi->redirect_pkey;
aef9ec39 2367 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
509c07bc 2368 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
aef9ec39 2369
509c07bc 2370 ch->status = ch->path.dlid ?
aef9ec39
RD
2371 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2372 break;
2373
2374 case IB_CM_REJ_PORT_REDIRECT:
5d7cbfd6 2375 if (srp_target_is_topspin(target)) {
aef9ec39
RD
2376 /*
2377 * Topspin/Cisco SRP gateways incorrectly send
2378 * reject reason code 25 when they mean 24
2379 * (port redirect).
2380 */
509c07bc 2381 memcpy(ch->path.dgid.raw,
aef9ec39
RD
2382 event->param.rej_rcvd.ari, 16);
2383
7aa54bd7
DD
2384 shost_printk(KERN_DEBUG, shost,
2385 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
509c07bc
BVA
2386 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2387 be64_to_cpu(ch->path.dgid.global.interface_id));
aef9ec39 2388
509c07bc 2389 ch->status = SRP_PORT_REDIRECT;
aef9ec39 2390 } else {
7aa54bd7
DD
2391 shost_printk(KERN_WARNING, shost,
2392 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
509c07bc 2393 ch->status = -ECONNRESET;
aef9ec39
RD
2394 }
2395 break;
2396
2397 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
7aa54bd7
DD
2398 shost_printk(KERN_WARNING, shost,
2399 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
509c07bc 2400 ch->status = -ECONNRESET;
aef9ec39
RD
2401 break;
2402
2403 case IB_CM_REJ_CONSUMER_DEFINED:
2404 opcode = *(u8 *) event->private_data;
2405 if (opcode == SRP_LOGIN_REJ) {
2406 struct srp_login_rej *rej = event->private_data;
2407 u32 reason = be32_to_cpu(rej->reason);
2408
2409 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
7aa54bd7
DD
2410 shost_printk(KERN_WARNING, shost,
2411 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
aef9ec39 2412 else
e7ffde01
BVA
2413 shost_printk(KERN_WARNING, shost, PFX
2414 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
747fe000
BVA
2415 target->sgid.raw,
2416 target->orig_dgid.raw, reason);
aef9ec39 2417 } else
7aa54bd7
DD
2418 shost_printk(KERN_WARNING, shost,
2419 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2420 " opcode 0x%02x\n", opcode);
509c07bc 2421 ch->status = -ECONNRESET;
aef9ec39
RD
2422 break;
2423
9fe4bcf4
DD
2424 case IB_CM_REJ_STALE_CONN:
2425 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
509c07bc 2426 ch->status = SRP_STALE_CONN;
9fe4bcf4
DD
2427 break;
2428
aef9ec39 2429 default:
7aa54bd7
DD
2430 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2431 event->param.rej_rcvd.reason);
509c07bc 2432 ch->status = -ECONNRESET;
aef9ec39
RD
2433 }
2434}
2435
2436static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2437{
509c07bc
BVA
2438 struct srp_rdma_ch *ch = cm_id->context;
2439 struct srp_target_port *target = ch->target;
aef9ec39 2440 int comp = 0;
aef9ec39
RD
2441
2442 switch (event->event) {
2443 case IB_CM_REQ_ERROR:
7aa54bd7
DD
2444 shost_printk(KERN_DEBUG, target->scsi_host,
2445 PFX "Sending CM REQ failed\n");
aef9ec39 2446 comp = 1;
509c07bc 2447 ch->status = -ECONNRESET;
aef9ec39
RD
2448 break;
2449
2450 case IB_CM_REP_RECEIVED:
2451 comp = 1;
509c07bc 2452 srp_cm_rep_handler(cm_id, event->private_data, ch);
aef9ec39
RD
2453 break;
2454
2455 case IB_CM_REJ_RECEIVED:
7aa54bd7 2456 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
aef9ec39
RD
2457 comp = 1;
2458
509c07bc 2459 srp_cm_rej_handler(cm_id, event, ch);
aef9ec39
RD
2460 break;
2461
b7ac4ab4 2462 case IB_CM_DREQ_RECEIVED:
7aa54bd7
DD
2463 shost_printk(KERN_WARNING, target->scsi_host,
2464 PFX "DREQ received - connection closed\n");
c014c8cd 2465 ch->connected = false;
b7ac4ab4 2466 if (ib_send_cm_drep(cm_id, NULL, 0))
7aa54bd7
DD
2467 shost_printk(KERN_ERR, target->scsi_host,
2468 PFX "Sending CM DREP failed\n");
c1120f89 2469 queue_work(system_long_wq, &target->tl_err_work);
aef9ec39
RD
2470 break;
2471
2472 case IB_CM_TIMEWAIT_EXIT:
7aa54bd7
DD
2473 shost_printk(KERN_ERR, target->scsi_host,
2474 PFX "connection closed\n");
ac72d766 2475 comp = 1;
aef9ec39 2476
509c07bc 2477 ch->status = 0;
aef9ec39
RD
2478 break;
2479
b7ac4ab4
IR
2480 case IB_CM_MRA_RECEIVED:
2481 case IB_CM_DREQ_ERROR:
2482 case IB_CM_DREP_RECEIVED:
2483 break;
2484
aef9ec39 2485 default:
7aa54bd7
DD
2486 shost_printk(KERN_WARNING, target->scsi_host,
2487 PFX "Unhandled CM event %d\n", event->event);
aef9ec39
RD
2488 break;
2489 }
2490
2491 if (comp)
509c07bc 2492 complete(&ch->done);
aef9ec39 2493
aef9ec39
RD
2494 return 0;
2495}
2496
71444b97
JW
2497/**
2498 * srp_change_queue_depth - setting device queue depth
2499 * @sdev: scsi device struct
2500 * @qdepth: requested queue depth
71444b97
JW
2501 *
2502 * Returns queue depth.
2503 */
2504static int
db5ed4df 2505srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
71444b97 2506{
c40ecc12 2507 if (!sdev->tagged_supported)
1e6f2416 2508 qdepth = 1;
db5ed4df 2509 return scsi_change_queue_depth(sdev, qdepth);
71444b97
JW
2510}
2511
985aa495
BVA
2512static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2513 u8 func)
aef9ec39 2514{
509c07bc 2515 struct srp_target_port *target = ch->target;
a95cadb9 2516 struct srp_rport *rport = target->rport;
19081f31 2517 struct ib_device *dev = target->srp_host->srp_dev->dev;
aef9ec39
RD
2518 struct srp_iu *iu;
2519 struct srp_tsk_mgmt *tsk_mgmt;
aef9ec39 2520
c014c8cd 2521 if (!ch->connected || target->qp_in_error)
3780d1f0
BVA
2522 return -1;
2523
509c07bc 2524 init_completion(&ch->tsk_mgmt_done);
aef9ec39 2525
a95cadb9 2526 /*
509c07bc 2527 * Lock the rport mutex to avoid that srp_create_ch_ib() is
a95cadb9
BVA
2528 * invoked while a task management function is being sent.
2529 */
2530 mutex_lock(&rport->mutex);
509c07bc
BVA
2531 spin_lock_irq(&ch->lock);
2532 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2533 spin_unlock_irq(&ch->lock);
76c75b25 2534
a95cadb9
BVA
2535 if (!iu) {
2536 mutex_unlock(&rport->mutex);
2537
76c75b25 2538 return -1;
a95cadb9 2539 }
aef9ec39 2540
19081f31
DD
2541 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2542 DMA_TO_DEVICE);
aef9ec39
RD
2543 tsk_mgmt = iu->buf;
2544 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2545
2546 tsk_mgmt->opcode = SRP_TSK_MGMT;
985aa495 2547 int_to_scsilun(lun, &tsk_mgmt->lun);
f8b6e31e 2548 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
aef9ec39 2549 tsk_mgmt->tsk_mgmt_func = func;
f8b6e31e 2550 tsk_mgmt->task_tag = req_tag;
aef9ec39 2551
19081f31
DD
2552 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2553 DMA_TO_DEVICE);
509c07bc
BVA
2554 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2555 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
a95cadb9
BVA
2556 mutex_unlock(&rport->mutex);
2557
76c75b25
BVA
2558 return -1;
2559 }
a95cadb9 2560 mutex_unlock(&rport->mutex);
d945e1df 2561
509c07bc 2562 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
aef9ec39 2563 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
d945e1df 2564 return -1;
aef9ec39 2565
d945e1df 2566 return 0;
d945e1df
RD
2567}
2568
aef9ec39
RD
2569static int srp_abort(struct scsi_cmnd *scmnd)
2570{
d945e1df 2571 struct srp_target_port *target = host_to_target(scmnd->device->host);
f8b6e31e 2572 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
77f2c1a4 2573 u32 tag;
d92c0da7 2574 u16 ch_idx;
509c07bc 2575 struct srp_rdma_ch *ch;
086f44f5 2576 int ret;
d945e1df 2577
7aa54bd7 2578 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
aef9ec39 2579
d92c0da7 2580 if (!req)
99b6697a 2581 return SUCCESS;
77f2c1a4 2582 tag = blk_mq_unique_tag(scmnd->request);
d92c0da7
BVA
2583 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2584 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2585 return SUCCESS;
2586 ch = &target->ch[ch_idx];
2587 if (!srp_claim_req(ch, req, NULL, scmnd))
2588 return SUCCESS;
2589 shost_printk(KERN_ERR, target->scsi_host,
2590 "Sending SRP abort for tag %#x\n", tag);
77f2c1a4 2591 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
80d5e8a2 2592 SRP_TSK_ABORT_TASK) == 0)
086f44f5 2593 ret = SUCCESS;
ed9b2264 2594 else if (target->rport->state == SRP_RPORT_LOST)
99e1c139 2595 ret = FAST_IO_FAIL;
086f44f5
BVA
2596 else
2597 ret = FAILED;
509c07bc 2598 srp_free_req(ch, req, scmnd, 0);
22032991 2599 scmnd->result = DID_ABORT << 16;
d8536670 2600 scmnd->scsi_done(scmnd);
d945e1df 2601
086f44f5 2602 return ret;
aef9ec39
RD
2603}
2604
2605static int srp_reset_device(struct scsi_cmnd *scmnd)
2606{
d945e1df 2607 struct srp_target_port *target = host_to_target(scmnd->device->host);
d92c0da7 2608 struct srp_rdma_ch *ch;
536ae14e 2609 int i;
d945e1df 2610
7aa54bd7 2611 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
aef9ec39 2612
d92c0da7 2613 ch = &target->ch[0];
509c07bc 2614 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
f8b6e31e 2615 SRP_TSK_LUN_RESET))
d945e1df 2616 return FAILED;
509c07bc 2617 if (ch->tsk_mgmt_status)
d945e1df
RD
2618 return FAILED;
2619
d92c0da7
BVA
2620 for (i = 0; i < target->ch_count; i++) {
2621 ch = &target->ch[i];
2622 for (i = 0; i < target->req_ring_size; ++i) {
2623 struct srp_request *req = &ch->req_ring[i];
509c07bc 2624
d92c0da7
BVA
2625 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2626 }
536ae14e 2627 }
d945e1df 2628
d945e1df 2629 return SUCCESS;
aef9ec39
RD
2630}
2631
2632static int srp_reset_host(struct scsi_cmnd *scmnd)
2633{
2634 struct srp_target_port *target = host_to_target(scmnd->device->host);
aef9ec39 2635
7aa54bd7 2636 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
aef9ec39 2637
ed9b2264 2638 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
aef9ec39
RD
2639}
2640
509c5f33
BVA
2641static int srp_slave_alloc(struct scsi_device *sdev)
2642{
2643 struct Scsi_Host *shost = sdev->host;
2644 struct srp_target_port *target = host_to_target(shost);
2645 struct srp_device *srp_dev = target->srp_host->srp_dev;
2646 struct ib_device *ibdev = srp_dev->dev;
2647
2648 if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
2649 blk_queue_virt_boundary(sdev->request_queue,
2650 ~srp_dev->mr_page_mask);
2651
2652 return 0;
2653}
2654
c9b03c1a
BVA
2655static int srp_slave_configure(struct scsi_device *sdev)
2656{
2657 struct Scsi_Host *shost = sdev->host;
2658 struct srp_target_port *target = host_to_target(shost);
2659 struct request_queue *q = sdev->request_queue;
2660 unsigned long timeout;
2661
2662 if (sdev->type == TYPE_DISK) {
2663 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2664 blk_queue_rq_timeout(q, timeout);
2665 }
2666
2667 return 0;
2668}
2669
ee959b00
TJ
2670static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2671 char *buf)
6ecb0c84 2672{
ee959b00 2673 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2674
45c37cad 2675 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
6ecb0c84
RD
2676}
2677
ee959b00
TJ
2678static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2679 char *buf)
6ecb0c84 2680{
ee959b00 2681 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2682
45c37cad 2683 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
6ecb0c84
RD
2684}
2685
ee959b00
TJ
2686static ssize_t show_service_id(struct device *dev,
2687 struct device_attribute *attr, char *buf)
6ecb0c84 2688{
ee959b00 2689 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2690
45c37cad 2691 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
6ecb0c84
RD
2692}
2693
ee959b00
TJ
2694static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2695 char *buf)
6ecb0c84 2696{
ee959b00 2697 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2698
747fe000 2699 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
6ecb0c84
RD
2700}
2701
848b3082
BVA
2702static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2703 char *buf)
2704{
2705 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2706
747fe000 2707 return sprintf(buf, "%pI6\n", target->sgid.raw);
848b3082
BVA
2708}
2709
ee959b00
TJ
2710static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2711 char *buf)
6ecb0c84 2712{
ee959b00 2713 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7 2714 struct srp_rdma_ch *ch = &target->ch[0];
6ecb0c84 2715
509c07bc 2716 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
6ecb0c84
RD
2717}
2718
ee959b00
TJ
2719static ssize_t show_orig_dgid(struct device *dev,
2720 struct device_attribute *attr, char *buf)
3633b3d0 2721{
ee959b00 2722 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3633b3d0 2723
747fe000 2724 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
3633b3d0
IR
2725}
2726
89de7486
BVA
2727static ssize_t show_req_lim(struct device *dev,
2728 struct device_attribute *attr, char *buf)
2729{
2730 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7
BVA
2731 struct srp_rdma_ch *ch;
2732 int i, req_lim = INT_MAX;
89de7486 2733
d92c0da7
BVA
2734 for (i = 0; i < target->ch_count; i++) {
2735 ch = &target->ch[i];
2736 req_lim = min(req_lim, ch->req_lim);
2737 }
2738 return sprintf(buf, "%d\n", req_lim);
89de7486
BVA
2739}
2740
ee959b00
TJ
2741static ssize_t show_zero_req_lim(struct device *dev,
2742 struct device_attribute *attr, char *buf)
6bfa24fa 2743{
ee959b00 2744 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6bfa24fa 2745
6bfa24fa
RD
2746 return sprintf(buf, "%d\n", target->zero_req_lim);
2747}
2748
ee959b00
TJ
2749static ssize_t show_local_ib_port(struct device *dev,
2750 struct device_attribute *attr, char *buf)
ded7f1a1 2751{
ee959b00 2752 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1
IR
2753
2754 return sprintf(buf, "%d\n", target->srp_host->port);
2755}
2756
ee959b00
TJ
2757static ssize_t show_local_ib_device(struct device *dev,
2758 struct device_attribute *attr, char *buf)
ded7f1a1 2759{
ee959b00 2760 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1 2761
05321937 2762 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
ded7f1a1
IR
2763}
2764
d92c0da7
BVA
2765static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2766 char *buf)
2767{
2768 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2769
2770 return sprintf(buf, "%d\n", target->ch_count);
2771}
2772
4b5e5f41
BVA
2773static ssize_t show_comp_vector(struct device *dev,
2774 struct device_attribute *attr, char *buf)
2775{
2776 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2777
2778 return sprintf(buf, "%d\n", target->comp_vector);
2779}
2780
7bb312e4
VP
2781static ssize_t show_tl_retry_count(struct device *dev,
2782 struct device_attribute *attr, char *buf)
2783{
2784 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2785
2786 return sprintf(buf, "%d\n", target->tl_retry_count);
2787}
2788
49248644
DD
2789static ssize_t show_cmd_sg_entries(struct device *dev,
2790 struct device_attribute *attr, char *buf)
2791{
2792 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2793
2794 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2795}
2796
c07d424d
DD
2797static ssize_t show_allow_ext_sg(struct device *dev,
2798 struct device_attribute *attr, char *buf)
2799{
2800 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2801
2802 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2803}
2804
ee959b00
TJ
2805static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2806static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2807static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2808static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
848b3082 2809static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
ee959b00
TJ
2810static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2811static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
89de7486 2812static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
ee959b00
TJ
2813static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2814static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2815static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
d92c0da7 2816static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
4b5e5f41 2817static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
7bb312e4 2818static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
49248644 2819static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
c07d424d 2820static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
ee959b00
TJ
2821
2822static struct device_attribute *srp_host_attrs[] = {
2823 &dev_attr_id_ext,
2824 &dev_attr_ioc_guid,
2825 &dev_attr_service_id,
2826 &dev_attr_pkey,
848b3082 2827 &dev_attr_sgid,
ee959b00
TJ
2828 &dev_attr_dgid,
2829 &dev_attr_orig_dgid,
89de7486 2830 &dev_attr_req_lim,
ee959b00
TJ
2831 &dev_attr_zero_req_lim,
2832 &dev_attr_local_ib_port,
2833 &dev_attr_local_ib_device,
d92c0da7 2834 &dev_attr_ch_count,
4b5e5f41 2835 &dev_attr_comp_vector,
7bb312e4 2836 &dev_attr_tl_retry_count,
49248644 2837 &dev_attr_cmd_sg_entries,
c07d424d 2838 &dev_attr_allow_ext_sg,
6ecb0c84
RD
2839 NULL
2840};
2841
aef9ec39
RD
2842static struct scsi_host_template srp_template = {
2843 .module = THIS_MODULE,
b7f008fd
RD
2844 .name = "InfiniBand SRP initiator",
2845 .proc_name = DRV_NAME,
509c5f33 2846 .slave_alloc = srp_slave_alloc,
c9b03c1a 2847 .slave_configure = srp_slave_configure,
aef9ec39
RD
2848 .info = srp_target_info,
2849 .queuecommand = srp_queuecommand,
71444b97 2850 .change_queue_depth = srp_change_queue_depth,
aef9ec39
RD
2851 .eh_abort_handler = srp_abort,
2852 .eh_device_reset_handler = srp_reset_device,
2853 .eh_host_reset_handler = srp_reset_host,
2742c1da 2854 .skip_settle_delay = true,
49248644 2855 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
4d73f95f 2856 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
aef9ec39 2857 .this_id = -1,
4d73f95f 2858 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
6ecb0c84 2859 .use_clustering = ENABLE_CLUSTERING,
77f2c1a4 2860 .shost_attrs = srp_host_attrs,
c40ecc12 2861 .track_queue_depth = 1,
aef9ec39
RD
2862};
2863
34aa654e
BVA
2864static int srp_sdev_count(struct Scsi_Host *host)
2865{
2866 struct scsi_device *sdev;
2867 int c = 0;
2868
2869 shost_for_each_device(sdev, host)
2870 c++;
2871
2872 return c;
2873}
2874
bc44bd1d
BVA
2875/*
2876 * Return values:
2877 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2878 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2879 * removal has been scheduled.
2880 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2881 */
aef9ec39
RD
2882static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2883{
3236822b
FT
2884 struct srp_rport_identifiers ids;
2885 struct srp_rport *rport;
2886
34aa654e 2887 target->state = SRP_TARGET_SCANNING;
aef9ec39 2888 sprintf(target->target_name, "SRP.T10:%016llX",
45c37cad 2889 be64_to_cpu(target->id_ext));
aef9ec39 2890
05321937 2891 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
aef9ec39
RD
2892 return -ENODEV;
2893
3236822b
FT
2894 memcpy(ids.port_id, &target->id_ext, 8);
2895 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
aebd5e47 2896 ids.roles = SRP_RPORT_ROLE_TARGET;
3236822b
FT
2897 rport = srp_rport_add(target->scsi_host, &ids);
2898 if (IS_ERR(rport)) {
2899 scsi_remove_host(target->scsi_host);
2900 return PTR_ERR(rport);
2901 }
2902
dc1bdbd9 2903 rport->lld_data = target;
9dd69a60 2904 target->rport = rport;
dc1bdbd9 2905
b3589fd4 2906 spin_lock(&host->target_lock);
aef9ec39 2907 list_add_tail(&target->list, &host->target_list);
b3589fd4 2908 spin_unlock(&host->target_lock);
aef9ec39 2909
aef9ec39 2910 scsi_scan_target(&target->scsi_host->shost_gendev,
1d645088 2911 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
aef9ec39 2912
c014c8cd
BVA
2913 if (srp_connected_ch(target) < target->ch_count ||
2914 target->qp_in_error) {
34aa654e
BVA
2915 shost_printk(KERN_INFO, target->scsi_host,
2916 PFX "SCSI scan failed - removing SCSI host\n");
2917 srp_queue_remove_work(target);
2918 goto out;
2919 }
2920
cf1acab7 2921 pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
34aa654e
BVA
2922 dev_name(&target->scsi_host->shost_gendev),
2923 srp_sdev_count(target->scsi_host));
2924
2925 spin_lock_irq(&target->lock);
2926 if (target->state == SRP_TARGET_SCANNING)
2927 target->state = SRP_TARGET_LIVE;
2928 spin_unlock_irq(&target->lock);
2929
2930out:
aef9ec39
RD
2931 return 0;
2932}
2933
ee959b00 2934static void srp_release_dev(struct device *dev)
aef9ec39
RD
2935{
2936 struct srp_host *host =
ee959b00 2937 container_of(dev, struct srp_host, dev);
aef9ec39
RD
2938
2939 complete(&host->released);
2940}
2941
2942static struct class srp_class = {
2943 .name = "infiniband_srp",
ee959b00 2944 .dev_release = srp_release_dev
aef9ec39
RD
2945};
2946
96fc248a
BVA
2947/**
2948 * srp_conn_unique() - check whether the connection to a target is unique
af24663b
BVA
2949 * @host: SRP host.
2950 * @target: SRP target port.
96fc248a
BVA
2951 */
2952static bool srp_conn_unique(struct srp_host *host,
2953 struct srp_target_port *target)
2954{
2955 struct srp_target_port *t;
2956 bool ret = false;
2957
2958 if (target->state == SRP_TARGET_REMOVED)
2959 goto out;
2960
2961 ret = true;
2962
2963 spin_lock(&host->target_lock);
2964 list_for_each_entry(t, &host->target_list, list) {
2965 if (t != target &&
2966 target->id_ext == t->id_ext &&
2967 target->ioc_guid == t->ioc_guid &&
2968 target->initiator_ext == t->initiator_ext) {
2969 ret = false;
2970 break;
2971 }
2972 }
2973 spin_unlock(&host->target_lock);
2974
2975out:
2976 return ret;
2977}
2978
aef9ec39
RD
2979/*
2980 * Target ports are added by writing
2981 *
2982 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2983 * pkey=<P_Key>,service_id=<service ID>
2984 *
2985 * to the add_target sysfs attribute.
2986 */
2987enum {
2988 SRP_OPT_ERR = 0,
2989 SRP_OPT_ID_EXT = 1 << 0,
2990 SRP_OPT_IOC_GUID = 1 << 1,
2991 SRP_OPT_DGID = 1 << 2,
2992 SRP_OPT_PKEY = 1 << 3,
2993 SRP_OPT_SERVICE_ID = 1 << 4,
2994 SRP_OPT_MAX_SECT = 1 << 5,
52fb2b50 2995 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
0c0450db 2996 SRP_OPT_IO_CLASS = 1 << 7,
01cb9bcb 2997 SRP_OPT_INITIATOR_EXT = 1 << 8,
49248644 2998 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
c07d424d
DD
2999 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
3000 SRP_OPT_SG_TABLESIZE = 1 << 11,
4b5e5f41 3001 SRP_OPT_COMP_VECTOR = 1 << 12,
7bb312e4 3002 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
4d73f95f 3003 SRP_OPT_QUEUE_SIZE = 1 << 14,
aef9ec39
RD
3004 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
3005 SRP_OPT_IOC_GUID |
3006 SRP_OPT_DGID |
3007 SRP_OPT_PKEY |
3008 SRP_OPT_SERVICE_ID),
3009};
3010
a447c093 3011static const match_table_t srp_opt_tokens = {
52fb2b50
VP
3012 { SRP_OPT_ID_EXT, "id_ext=%s" },
3013 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
3014 { SRP_OPT_DGID, "dgid=%s" },
3015 { SRP_OPT_PKEY, "pkey=%x" },
3016 { SRP_OPT_SERVICE_ID, "service_id=%s" },
3017 { SRP_OPT_MAX_SECT, "max_sect=%d" },
3018 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
0c0450db 3019 { SRP_OPT_IO_CLASS, "io_class=%x" },
01cb9bcb 3020 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
49248644 3021 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
c07d424d
DD
3022 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
3023 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
4b5e5f41 3024 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
7bb312e4 3025 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
4d73f95f 3026 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
52fb2b50 3027 { SRP_OPT_ERR, NULL }
aef9ec39
RD
3028};
3029
3030static int srp_parse_options(const char *buf, struct srp_target_port *target)
3031{
3032 char *options, *sep_opt;
3033 char *p;
3034 char dgid[3];
3035 substring_t args[MAX_OPT_ARGS];
3036 int opt_mask = 0;
3037 int token;
3038 int ret = -EINVAL;
3039 int i;
3040
3041 options = kstrdup(buf, GFP_KERNEL);
3042 if (!options)
3043 return -ENOMEM;
3044
3045 sep_opt = options;
7dcf9c19 3046 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
aef9ec39
RD
3047 if (!*p)
3048 continue;
3049
3050 token = match_token(p, srp_opt_tokens, args);
3051 opt_mask |= token;
3052
3053 switch (token) {
3054 case SRP_OPT_ID_EXT:
3055 p = match_strdup(args);
a20f3a6d
IR
3056 if (!p) {
3057 ret = -ENOMEM;
3058 goto out;
3059 }
aef9ec39
RD
3060 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3061 kfree(p);
3062 break;
3063
3064 case SRP_OPT_IOC_GUID:
3065 p = match_strdup(args);
a20f3a6d
IR
3066 if (!p) {
3067 ret = -ENOMEM;
3068 goto out;
3069 }
aef9ec39
RD
3070 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
3071 kfree(p);
3072 break;
3073
3074 case SRP_OPT_DGID:
3075 p = match_strdup(args);
a20f3a6d
IR
3076 if (!p) {
3077 ret = -ENOMEM;
3078 goto out;
3079 }
aef9ec39 3080 if (strlen(p) != 32) {
e0bda7d8 3081 pr_warn("bad dest GID parameter '%s'\n", p);
ce1823f0 3082 kfree(p);
aef9ec39
RD
3083 goto out;
3084 }
3085
3086 for (i = 0; i < 16; ++i) {
747fe000
BVA
3087 strlcpy(dgid, p + i * 2, sizeof(dgid));
3088 if (sscanf(dgid, "%hhx",
3089 &target->orig_dgid.raw[i]) < 1) {
3090 ret = -EINVAL;
3091 kfree(p);
3092 goto out;
3093 }
aef9ec39 3094 }
bf17c1c7 3095 kfree(p);
aef9ec39
RD
3096 break;
3097
3098 case SRP_OPT_PKEY:
3099 if (match_hex(args, &token)) {
e0bda7d8 3100 pr_warn("bad P_Key parameter '%s'\n", p);
aef9ec39
RD
3101 goto out;
3102 }
747fe000 3103 target->pkey = cpu_to_be16(token);
aef9ec39
RD
3104 break;
3105
3106 case SRP_OPT_SERVICE_ID:
3107 p = match_strdup(args);
a20f3a6d
IR
3108 if (!p) {
3109 ret = -ENOMEM;
3110 goto out;
3111 }
aef9ec39
RD
3112 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3113 kfree(p);
3114 break;
3115
3116 case SRP_OPT_MAX_SECT:
3117 if (match_int(args, &token)) {
e0bda7d8 3118 pr_warn("bad max sect parameter '%s'\n", p);
aef9ec39
RD
3119 goto out;
3120 }
3121 target->scsi_host->max_sectors = token;
3122 break;
3123
4d73f95f
BVA
3124 case SRP_OPT_QUEUE_SIZE:
3125 if (match_int(args, &token) || token < 1) {
3126 pr_warn("bad queue_size parameter '%s'\n", p);
3127 goto out;
3128 }
3129 target->scsi_host->can_queue = token;
3130 target->queue_size = token + SRP_RSP_SQ_SIZE +
3131 SRP_TSK_MGMT_SQ_SIZE;
3132 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3133 target->scsi_host->cmd_per_lun = token;
3134 break;
3135
52fb2b50 3136 case SRP_OPT_MAX_CMD_PER_LUN:
4d73f95f 3137 if (match_int(args, &token) || token < 1) {
e0bda7d8
BVA
3138 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3139 p);
52fb2b50
VP
3140 goto out;
3141 }
4d73f95f 3142 target->scsi_host->cmd_per_lun = token;
52fb2b50
VP
3143 break;
3144
0c0450db
R
3145 case SRP_OPT_IO_CLASS:
3146 if (match_hex(args, &token)) {
e0bda7d8 3147 pr_warn("bad IO class parameter '%s'\n", p);
0c0450db
R
3148 goto out;
3149 }
3150 if (token != SRP_REV10_IB_IO_CLASS &&
3151 token != SRP_REV16A_IB_IO_CLASS) {
e0bda7d8
BVA
3152 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3153 token, SRP_REV10_IB_IO_CLASS,
3154 SRP_REV16A_IB_IO_CLASS);
0c0450db
R
3155 goto out;
3156 }
3157 target->io_class = token;
3158 break;
3159
01cb9bcb
IR
3160 case SRP_OPT_INITIATOR_EXT:
3161 p = match_strdup(args);
a20f3a6d
IR
3162 if (!p) {
3163 ret = -ENOMEM;
3164 goto out;
3165 }
01cb9bcb
IR
3166 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3167 kfree(p);
3168 break;
3169
49248644
DD
3170 case SRP_OPT_CMD_SG_ENTRIES:
3171 if (match_int(args, &token) || token < 1 || token > 255) {
e0bda7d8
BVA
3172 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3173 p);
49248644
DD
3174 goto out;
3175 }
3176 target->cmd_sg_cnt = token;
3177 break;
3178
c07d424d
DD
3179 case SRP_OPT_ALLOW_EXT_SG:
3180 if (match_int(args, &token)) {
e0bda7d8 3181 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
c07d424d
DD
3182 goto out;
3183 }
3184 target->allow_ext_sg = !!token;
3185 break;
3186
3187 case SRP_OPT_SG_TABLESIZE:
3188 if (match_int(args, &token) || token < 1 ||
65e8617f 3189 token > SG_MAX_SEGMENTS) {
e0bda7d8
BVA
3190 pr_warn("bad max sg_tablesize parameter '%s'\n",
3191 p);
c07d424d
DD
3192 goto out;
3193 }
3194 target->sg_tablesize = token;
3195 break;
3196
4b5e5f41
BVA
3197 case SRP_OPT_COMP_VECTOR:
3198 if (match_int(args, &token) || token < 0) {
3199 pr_warn("bad comp_vector parameter '%s'\n", p);
3200 goto out;
3201 }
3202 target->comp_vector = token;
3203 break;
3204
7bb312e4
VP
3205 case SRP_OPT_TL_RETRY_COUNT:
3206 if (match_int(args, &token) || token < 2 || token > 7) {
3207 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3208 p);
3209 goto out;
3210 }
3211 target->tl_retry_count = token;
3212 break;
3213
aef9ec39 3214 default:
e0bda7d8
BVA
3215 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3216 p);
aef9ec39
RD
3217 goto out;
3218 }
3219 }
3220
3221 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3222 ret = 0;
3223 else
3224 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3225 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3226 !(srp_opt_tokens[i].token & opt_mask))
e0bda7d8
BVA
3227 pr_warn("target creation request is missing parameter '%s'\n",
3228 srp_opt_tokens[i].pattern);
aef9ec39 3229
4d73f95f
BVA
3230 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3231 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3232 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3233 target->scsi_host->cmd_per_lun,
3234 target->scsi_host->can_queue);
3235
aef9ec39
RD
3236out:
3237 kfree(options);
3238 return ret;
3239}
3240
ee959b00
TJ
3241static ssize_t srp_create_target(struct device *dev,
3242 struct device_attribute *attr,
aef9ec39
RD
3243 const char *buf, size_t count)
3244{
3245 struct srp_host *host =
ee959b00 3246 container_of(dev, struct srp_host, dev);
aef9ec39
RD
3247 struct Scsi_Host *target_host;
3248 struct srp_target_port *target;
509c07bc 3249 struct srp_rdma_ch *ch;
d1b4289e
BVA
3250 struct srp_device *srp_dev = host->srp_dev;
3251 struct ib_device *ibdev = srp_dev->dev;
d92c0da7 3252 int ret, node_idx, node, cpu, i;
509c5f33 3253 unsigned int max_sectors_per_mr, mr_per_cmd = 0;
d92c0da7 3254 bool multich = false;
aef9ec39
RD
3255
3256 target_host = scsi_host_alloc(&srp_template,
3257 sizeof (struct srp_target_port));
3258 if (!target_host)
3259 return -ENOMEM;
3260
49248644 3261 target_host->transportt = ib_srp_transport_template;
fd1b6c4a
BVA
3262 target_host->max_channel = 0;
3263 target_host->max_id = 1;
985aa495 3264 target_host->max_lun = -1LL;
3c8edf0e 3265 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
5f068992 3266
aef9ec39 3267 target = host_to_target(target_host);
aef9ec39 3268
49248644
DD
3269 target->io_class = SRP_REV16A_IB_IO_CLASS;
3270 target->scsi_host = target_host;
3271 target->srp_host = host;
e6bf5f48 3272 target->lkey = host->srp_dev->pd->local_dma_lkey;
03f6fb93 3273 target->global_mr = host->srp_dev->global_mr;
49248644 3274 target->cmd_sg_cnt = cmd_sg_entries;
c07d424d
DD
3275 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3276 target->allow_ext_sg = allow_ext_sg;
7bb312e4 3277 target->tl_retry_count = 7;
4d73f95f 3278 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
aef9ec39 3279
34aa654e
BVA
3280 /*
3281 * Avoid that the SCSI host can be removed by srp_remove_target()
3282 * before this function returns.
3283 */
3284 scsi_host_get(target->scsi_host);
3285
2d7091bc
BVA
3286 mutex_lock(&host->add_target_mutex);
3287
aef9ec39
RD
3288 ret = srp_parse_options(buf, target);
3289 if (ret)
fb49c8bb 3290 goto out;
aef9ec39 3291
4d73f95f
BVA
3292 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3293
96fc248a
BVA
3294 if (!srp_conn_unique(target->srp_host, target)) {
3295 shost_printk(KERN_INFO, target->scsi_host,
3296 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3297 be64_to_cpu(target->id_ext),
3298 be64_to_cpu(target->ioc_guid),
3299 be64_to_cpu(target->initiator_ext));
3300 ret = -EEXIST;
fb49c8bb 3301 goto out;
96fc248a
BVA
3302 }
3303
5cfb1782 3304 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
d1b4289e 3305 target->cmd_sg_cnt < target->sg_tablesize) {
5cfb1782 3306 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
c07d424d
DD
3307 target->sg_tablesize = target->cmd_sg_cnt;
3308 }
3309
509c5f33
BVA
3310 if (srp_dev->use_fast_reg || srp_dev->use_fmr) {
3311 /*
3312 * FR and FMR can only map one HCA page per entry. If the
3313 * start address is not aligned on a HCA page boundary two
3314 * entries will be used for the head and the tail although
3315 * these two entries combined contain at most one HCA page of
3316 * data. Hence the "+ 1" in the calculation below.
3317 *
3318 * The indirect data buffer descriptor is contiguous so the
3319 * memory for that buffer will only be registered if
3320 * register_always is true. Hence add one to mr_per_cmd if
3321 * register_always has been set.
3322 */
3323 max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3324 (ilog2(srp_dev->mr_page_size) - 9);
3325 mr_per_cmd = register_always +
3326 (target->scsi_host->max_sectors + 1 +
3327 max_sectors_per_mr - 1) / max_sectors_per_mr;
3328 pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
3329 target->scsi_host->max_sectors,
3330 srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
3331 max_sectors_per_mr, mr_per_cmd);
3332 }
3333
c07d424d 3334 target_host->sg_tablesize = target->sg_tablesize;
509c5f33
BVA
3335 target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3336 target->mr_per_cmd = mr_per_cmd;
c07d424d
DD
3337 target->indirect_size = target->sg_tablesize *
3338 sizeof (struct srp_direct_buf);
49248644
DD
3339 target->max_iu_len = sizeof (struct srp_cmd) +
3340 sizeof (struct srp_indirect_buf) +
3341 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3342
c1120f89 3343 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
ef6c49d8 3344 INIT_WORK(&target->remove_work, srp_remove_work);
8f26c9ff 3345 spin_lock_init(&target->lock);
55ee3ab2 3346 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
2088ca66 3347 if (ret)
fb49c8bb 3348 goto out;
aef9ec39 3349
d92c0da7
BVA
3350 ret = -ENOMEM;
3351 target->ch_count = max_t(unsigned, num_online_nodes(),
3352 min(ch_count ? :
3353 min(4 * num_online_nodes(),
3354 ibdev->num_comp_vectors),
3355 num_online_cpus()));
3356 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3357 GFP_KERNEL);
3358 if (!target->ch)
fb49c8bb 3359 goto out;
aef9ec39 3360
d92c0da7
BVA
3361 node_idx = 0;
3362 for_each_online_node(node) {
3363 const int ch_start = (node_idx * target->ch_count /
3364 num_online_nodes());
3365 const int ch_end = ((node_idx + 1) * target->ch_count /
3366 num_online_nodes());
3367 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3368 num_online_nodes() + target->comp_vector)
3369 % ibdev->num_comp_vectors;
3370 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3371 num_online_nodes() + target->comp_vector)
3372 % ibdev->num_comp_vectors;
3373 int cpu_idx = 0;
3374
3375 for_each_online_cpu(cpu) {
3376 if (cpu_to_node(cpu) != node)
3377 continue;
3378 if (ch_start + cpu_idx >= ch_end)
3379 continue;
3380 ch = &target->ch[ch_start + cpu_idx];
3381 ch->target = target;
3382 ch->comp_vector = cv_start == cv_end ? cv_start :
3383 cv_start + cpu_idx % (cv_end - cv_start);
3384 spin_lock_init(&ch->lock);
3385 INIT_LIST_HEAD(&ch->free_tx);
3386 ret = srp_new_cm_id(ch);
3387 if (ret)
3388 goto err_disconnect;
aef9ec39 3389
d92c0da7
BVA
3390 ret = srp_create_ch_ib(ch);
3391 if (ret)
3392 goto err_disconnect;
3393
3394 ret = srp_alloc_req_data(ch);
3395 if (ret)
3396 goto err_disconnect;
3397
3398 ret = srp_connect_ch(ch, multich);
3399 if (ret) {
3400 shost_printk(KERN_ERR, target->scsi_host,
3401 PFX "Connection %d/%d failed\n",
3402 ch_start + cpu_idx,
3403 target->ch_count);
3404 if (node_idx == 0 && cpu_idx == 0) {
3405 goto err_disconnect;
3406 } else {
3407 srp_free_ch_ib(target, ch);
3408 srp_free_req_data(target, ch);
3409 target->ch_count = ch - target->ch;
c257ea6f 3410 goto connected;
d92c0da7
BVA
3411 }
3412 }
3413
3414 multich = true;
3415 cpu_idx++;
3416 }
3417 node_idx++;
aef9ec39
RD
3418 }
3419
c257ea6f 3420connected:
d92c0da7
BVA
3421 target->scsi_host->nr_hw_queues = target->ch_count;
3422
aef9ec39
RD
3423 ret = srp_add_target(host, target);
3424 if (ret)
3425 goto err_disconnect;
3426
34aa654e
BVA
3427 if (target->state != SRP_TARGET_REMOVED) {
3428 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3429 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3430 be64_to_cpu(target->id_ext),
3431 be64_to_cpu(target->ioc_guid),
747fe000 3432 be16_to_cpu(target->pkey),
34aa654e 3433 be64_to_cpu(target->service_id),
747fe000 3434 target->sgid.raw, target->orig_dgid.raw);
34aa654e 3435 }
e7ffde01 3436
2d7091bc
BVA
3437 ret = count;
3438
3439out:
3440 mutex_unlock(&host->add_target_mutex);
34aa654e
BVA
3441
3442 scsi_host_put(target->scsi_host);
bc44bd1d
BVA
3443 if (ret < 0)
3444 scsi_host_put(target->scsi_host);
34aa654e 3445
2d7091bc 3446 return ret;
aef9ec39
RD
3447
3448err_disconnect:
3449 srp_disconnect_target(target);
3450
d92c0da7
BVA
3451 for (i = 0; i < target->ch_count; i++) {
3452 ch = &target->ch[i];
3453 srp_free_ch_ib(target, ch);
3454 srp_free_req_data(target, ch);
3455 }
aef9ec39 3456
d92c0da7 3457 kfree(target->ch);
2d7091bc 3458 goto out;
aef9ec39
RD
3459}
3460
ee959b00 3461static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
aef9ec39 3462
ee959b00
TJ
3463static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3464 char *buf)
aef9ec39 3465{
ee959b00 3466 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39 3467
05321937 3468 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
aef9ec39
RD
3469}
3470
ee959b00 3471static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
aef9ec39 3472
ee959b00
TJ
3473static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3474 char *buf)
aef9ec39 3475{
ee959b00 3476 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39
RD
3477
3478 return sprintf(buf, "%d\n", host->port);
3479}
3480
ee959b00 3481static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
aef9ec39 3482
f5358a17 3483static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
aef9ec39
RD
3484{
3485 struct srp_host *host;
3486
3487 host = kzalloc(sizeof *host, GFP_KERNEL);
3488 if (!host)
3489 return NULL;
3490
3491 INIT_LIST_HEAD(&host->target_list);
b3589fd4 3492 spin_lock_init(&host->target_lock);
aef9ec39 3493 init_completion(&host->released);
2d7091bc 3494 mutex_init(&host->add_target_mutex);
05321937 3495 host->srp_dev = device;
aef9ec39
RD
3496 host->port = port;
3497
ee959b00
TJ
3498 host->dev.class = &srp_class;
3499 host->dev.parent = device->dev->dma_device;
d927e38c 3500 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
aef9ec39 3501
ee959b00 3502 if (device_register(&host->dev))
f5358a17 3503 goto free_host;
ee959b00 3504 if (device_create_file(&host->dev, &dev_attr_add_target))
aef9ec39 3505 goto err_class;
ee959b00 3506 if (device_create_file(&host->dev, &dev_attr_ibdev))
aef9ec39 3507 goto err_class;
ee959b00 3508 if (device_create_file(&host->dev, &dev_attr_port))
aef9ec39
RD
3509 goto err_class;
3510
3511 return host;
3512
3513err_class:
ee959b00 3514 device_unregister(&host->dev);
aef9ec39 3515
f5358a17 3516free_host:
aef9ec39
RD
3517 kfree(host);
3518
3519 return NULL;
3520}
3521
3522static void srp_add_one(struct ib_device *device)
3523{
f5358a17 3524 struct srp_device *srp_dev;
aef9ec39 3525 struct srp_host *host;
4139032b 3526 int mr_page_shift, p;
52ede08f 3527 u64 max_pages_per_mr;
aef9ec39 3528
f5358a17
RD
3529 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3530 if (!srp_dev)
4a061b28 3531 return;
f5358a17
RD
3532
3533 /*
3534 * Use the smallest page size supported by the HCA, down to a
8f26c9ff
DD
3535 * minimum of 4096 bytes. We're unlikely to build large sglists
3536 * out of smaller entries.
f5358a17 3537 */
4a061b28 3538 mr_page_shift = max(12, ffs(device->attrs.page_size_cap) - 1);
52ede08f
BVA
3539 srp_dev->mr_page_size = 1 << mr_page_shift;
3540 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
4a061b28 3541 max_pages_per_mr = device->attrs.max_mr_size;
52ede08f 3542 do_div(max_pages_per_mr, srp_dev->mr_page_size);
509c5f33
BVA
3543 pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
3544 device->attrs.max_mr_size, srp_dev->mr_page_size,
3545 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
52ede08f
BVA
3546 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3547 max_pages_per_mr);
835ee624
BVA
3548
3549 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3550 device->map_phys_fmr && device->unmap_fmr);
3551 srp_dev->has_fr = (device->attrs.device_cap_flags &
3552 IB_DEVICE_MEM_MGT_EXTENSIONS);
c222a39f 3553 if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
835ee624 3554 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
c222a39f
BVA
3555 } else if (!never_register &&
3556 device->attrs.max_mr_size >= 2 * srp_dev->mr_page_size) {
509c5f33
BVA
3557 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3558 (!srp_dev->has_fmr || prefer_fr));
3559 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
3560 }
835ee624 3561
5cfb1782
BVA
3562 if (srp_dev->use_fast_reg) {
3563 srp_dev->max_pages_per_mr =
3564 min_t(u32, srp_dev->max_pages_per_mr,
4a061b28 3565 device->attrs.max_fast_reg_page_list_len);
5cfb1782 3566 }
52ede08f
BVA
3567 srp_dev->mr_max_size = srp_dev->mr_page_size *
3568 srp_dev->max_pages_per_mr;
4a061b28
OG
3569 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3570 device->name, mr_page_shift, device->attrs.max_mr_size,
3571 device->attrs.max_fast_reg_page_list_len,
52ede08f 3572 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
f5358a17
RD
3573
3574 INIT_LIST_HEAD(&srp_dev->dev_list);
3575
3576 srp_dev->dev = device;
3577 srp_dev->pd = ib_alloc_pd(device);
3578 if (IS_ERR(srp_dev->pd))
3579 goto free_dev;
3580
c222a39f
BVA
3581 if (never_register || !register_always ||
3582 (!srp_dev->has_fmr && !srp_dev->has_fr)) {
03f6fb93
BVA
3583 srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
3584 IB_ACCESS_LOCAL_WRITE |
3585 IB_ACCESS_REMOTE_READ |
3586 IB_ACCESS_REMOTE_WRITE);
3587 if (IS_ERR(srp_dev->global_mr))
3588 goto err_pd;
3589 } else {
3590 srp_dev->global_mr = NULL;
3591 }
f5358a17 3592
4139032b 3593 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
f5358a17 3594 host = srp_add_port(srp_dev, p);
aef9ec39 3595 if (host)
f5358a17 3596 list_add_tail(&host->list, &srp_dev->dev_list);
aef9ec39
RD
3597 }
3598
f5358a17 3599 ib_set_client_data(device, &srp_client, srp_dev);
4a061b28 3600 return;
f5358a17
RD
3601
3602err_pd:
3603 ib_dealloc_pd(srp_dev->pd);
3604
3605free_dev:
3606 kfree(srp_dev);
aef9ec39
RD
3607}
3608
7c1eb45a 3609static void srp_remove_one(struct ib_device *device, void *client_data)
aef9ec39 3610{
f5358a17 3611 struct srp_device *srp_dev;
aef9ec39 3612 struct srp_host *host, *tmp_host;
ef6c49d8 3613 struct srp_target_port *target;
aef9ec39 3614
7c1eb45a 3615 srp_dev = client_data;
1fe0cb84
DB
3616 if (!srp_dev)
3617 return;
aef9ec39 3618
f5358a17 3619 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
ee959b00 3620 device_unregister(&host->dev);
aef9ec39
RD
3621 /*
3622 * Wait for the sysfs entry to go away, so that no new
3623 * target ports can be created.
3624 */
3625 wait_for_completion(&host->released);
3626
3627 /*
ef6c49d8 3628 * Remove all target ports.
aef9ec39 3629 */
b3589fd4 3630 spin_lock(&host->target_lock);
ef6c49d8
BVA
3631 list_for_each_entry(target, &host->target_list, list)
3632 srp_queue_remove_work(target);
b3589fd4 3633 spin_unlock(&host->target_lock);
aef9ec39
RD
3634
3635 /*
bcc05910 3636 * Wait for tl_err and target port removal tasks.
aef9ec39 3637 */
ef6c49d8 3638 flush_workqueue(system_long_wq);
bcc05910 3639 flush_workqueue(srp_remove_wq);
aef9ec39 3640
aef9ec39
RD
3641 kfree(host);
3642 }
3643
03f6fb93
BVA
3644 if (srp_dev->global_mr)
3645 ib_dereg_mr(srp_dev->global_mr);
f5358a17
RD
3646 ib_dealloc_pd(srp_dev->pd);
3647
3648 kfree(srp_dev);
aef9ec39
RD
3649}
3650
3236822b 3651static struct srp_function_template ib_srp_transport_functions = {
ed9b2264
BVA
3652 .has_rport_state = true,
3653 .reset_timer_if_blocked = true,
a95cadb9 3654 .reconnect_delay = &srp_reconnect_delay,
ed9b2264
BVA
3655 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3656 .dev_loss_tmo = &srp_dev_loss_tmo,
3657 .reconnect = srp_rport_reconnect,
dc1bdbd9 3658 .rport_delete = srp_rport_delete,
ed9b2264 3659 .terminate_rport_io = srp_terminate_io,
3236822b
FT
3660};
3661
aef9ec39
RD
3662static int __init srp_init_module(void)
3663{
3664 int ret;
3665
49248644 3666 if (srp_sg_tablesize) {
e0bda7d8 3667 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
49248644
DD
3668 if (!cmd_sg_entries)
3669 cmd_sg_entries = srp_sg_tablesize;
3670 }
3671
3672 if (!cmd_sg_entries)
3673 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3674
3675 if (cmd_sg_entries > 255) {
e0bda7d8 3676 pr_warn("Clamping cmd_sg_entries to 255\n");
49248644 3677 cmd_sg_entries = 255;
1e89a194
DD
3678 }
3679
c07d424d
DD
3680 if (!indirect_sg_entries)
3681 indirect_sg_entries = cmd_sg_entries;
3682 else if (indirect_sg_entries < cmd_sg_entries) {
e0bda7d8
BVA
3683 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3684 cmd_sg_entries);
c07d424d
DD
3685 indirect_sg_entries = cmd_sg_entries;
3686 }
3687
bcc05910 3688 srp_remove_wq = create_workqueue("srp_remove");
da05be29
WY
3689 if (!srp_remove_wq) {
3690 ret = -ENOMEM;
bcc05910
BVA
3691 goto out;
3692 }
3693
3694 ret = -ENOMEM;
3236822b
FT
3695 ib_srp_transport_template =
3696 srp_attach_transport(&ib_srp_transport_functions);
3697 if (!ib_srp_transport_template)
bcc05910 3698 goto destroy_wq;
3236822b 3699
aef9ec39
RD
3700 ret = class_register(&srp_class);
3701 if (ret) {
e0bda7d8 3702 pr_err("couldn't register class infiniband_srp\n");
bcc05910 3703 goto release_tr;
aef9ec39
RD
3704 }
3705
c1a0b23b
MT
3706 ib_sa_register_client(&srp_sa_client);
3707
aef9ec39
RD
3708 ret = ib_register_client(&srp_client);
3709 if (ret) {
e0bda7d8 3710 pr_err("couldn't register IB client\n");
bcc05910 3711 goto unreg_sa;
aef9ec39
RD
3712 }
3713
bcc05910
BVA
3714out:
3715 return ret;
3716
3717unreg_sa:
3718 ib_sa_unregister_client(&srp_sa_client);
3719 class_unregister(&srp_class);
3720
3721release_tr:
3722 srp_release_transport(ib_srp_transport_template);
3723
3724destroy_wq:
3725 destroy_workqueue(srp_remove_wq);
3726 goto out;
aef9ec39
RD
3727}
3728
3729static void __exit srp_cleanup_module(void)
3730{
3731 ib_unregister_client(&srp_client);
c1a0b23b 3732 ib_sa_unregister_client(&srp_sa_client);
aef9ec39 3733 class_unregister(&srp_class);
3236822b 3734 srp_release_transport(ib_srp_transport_template);
bcc05910 3735 destroy_workqueue(srp_remove_wq);
aef9ec39
RD
3736}
3737
3738module_init(srp_init_module);
3739module_exit(srp_cleanup_module);