]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/infiniband/ulp/srp/ib_srp.c
IB/srp: Avoid that I/O hangs due to a cable pull during LUN scanning
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / ulp / srp / ib_srp.c
CommitLineData
aef9ec39
RD
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
aef9ec39
RD
31 */
32
d236cd0e 33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
e0bda7d8 34
aef9ec39
RD
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
de25968c 42#include <linux/jiffies.h>
aef9ec39 43
60063497 44#include <linux/atomic.h>
aef9ec39
RD
45
46#include <scsi/scsi.h>
47#include <scsi/scsi_device.h>
48#include <scsi/scsi_dbg.h>
71444b97 49#include <scsi/scsi_tcq.h>
aef9ec39 50#include <scsi/srp.h>
3236822b 51#include <scsi/scsi_transport_srp.h>
aef9ec39 52
aef9ec39
RD
53#include "ib_srp.h"
54
55#define DRV_NAME "ib_srp"
56#define PFX DRV_NAME ": "
e8ca4135
VP
57#define DRV_VERSION "1.0"
58#define DRV_RELDATE "July 1, 2013"
aef9ec39
RD
59
60MODULE_AUTHOR("Roland Dreier");
61MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
62 "v" DRV_VERSION " (" DRV_RELDATE ")");
63MODULE_LICENSE("Dual BSD/GPL");
64
49248644
DD
65static unsigned int srp_sg_tablesize;
66static unsigned int cmd_sg_entries;
c07d424d
DD
67static unsigned int indirect_sg_entries;
68static bool allow_ext_sg;
5cfb1782 69static bool prefer_fr;
b1b8854d 70static bool register_always;
49248644 71static int topspin_workarounds = 1;
74b0a15b 72
49248644
DD
73module_param(srp_sg_tablesize, uint, 0444);
74MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
74b0a15b 75
49248644
DD
76module_param(cmd_sg_entries, uint, 0444);
77MODULE_PARM_DESC(cmd_sg_entries,
78 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
aef9ec39 79
c07d424d
DD
80module_param(indirect_sg_entries, uint, 0444);
81MODULE_PARM_DESC(indirect_sg_entries,
82 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
83
84module_param(allow_ext_sg, bool, 0444);
85MODULE_PARM_DESC(allow_ext_sg,
86 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
87
aef9ec39
RD
88module_param(topspin_workarounds, int, 0444);
89MODULE_PARM_DESC(topspin_workarounds,
90 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
91
5cfb1782
BVA
92module_param(prefer_fr, bool, 0444);
93MODULE_PARM_DESC(prefer_fr,
94"Whether to use fast registration if both FMR and fast registration are supported");
95
b1b8854d
BVA
96module_param(register_always, bool, 0444);
97MODULE_PARM_DESC(register_always,
98 "Use memory registration even for contiguous memory regions");
99
ed9b2264
BVA
100static struct kernel_param_ops srp_tmo_ops;
101
a95cadb9
BVA
102static int srp_reconnect_delay = 10;
103module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
104 S_IRUGO | S_IWUSR);
105MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
106
ed9b2264
BVA
107static int srp_fast_io_fail_tmo = 15;
108module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
109 S_IRUGO | S_IWUSR);
110MODULE_PARM_DESC(fast_io_fail_tmo,
111 "Number of seconds between the observation of a transport"
112 " layer error and failing all I/O. \"off\" means that this"
113 " functionality is disabled.");
114
a95cadb9 115static int srp_dev_loss_tmo = 600;
ed9b2264
BVA
116module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
117 S_IRUGO | S_IWUSR);
118MODULE_PARM_DESC(dev_loss_tmo,
119 "Maximum number of seconds that the SRP transport should"
120 " insulate transport layer errors. After this time has been"
121 " exceeded the SCSI host is removed. Should be"
122 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
123 " if fast_io_fail_tmo has not been set. \"off\" means that"
124 " this functionality is disabled.");
125
aef9ec39
RD
126static void srp_add_one(struct ib_device *device);
127static void srp_remove_one(struct ib_device *device);
9c03dc9f
BVA
128static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
129static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
aef9ec39
RD
130static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
131
3236822b 132static struct scsi_transport_template *ib_srp_transport_template;
bcc05910 133static struct workqueue_struct *srp_remove_wq;
3236822b 134
aef9ec39
RD
135static struct ib_client srp_client = {
136 .name = "srp",
137 .add = srp_add_one,
138 .remove = srp_remove_one
139};
140
c1a0b23b
MT
141static struct ib_sa_client srp_sa_client;
142
ed9b2264
BVA
143static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
144{
145 int tmo = *(int *)kp->arg;
146
147 if (tmo >= 0)
148 return sprintf(buffer, "%d", tmo);
149 else
150 return sprintf(buffer, "off");
151}
152
153static int srp_tmo_set(const char *val, const struct kernel_param *kp)
154{
155 int tmo, res;
156
157 if (strncmp(val, "off", 3) != 0) {
158 res = kstrtoint(val, 0, &tmo);
159 if (res)
160 goto out;
161 } else {
162 tmo = -1;
163 }
a95cadb9
BVA
164 if (kp->arg == &srp_reconnect_delay)
165 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
166 srp_dev_loss_tmo);
167 else if (kp->arg == &srp_fast_io_fail_tmo)
168 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
ed9b2264 169 else
a95cadb9
BVA
170 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
171 tmo);
ed9b2264
BVA
172 if (res)
173 goto out;
174 *(int *)kp->arg = tmo;
175
176out:
177 return res;
178}
179
180static struct kernel_param_ops srp_tmo_ops = {
181 .get = srp_tmo_get,
182 .set = srp_tmo_set,
183};
184
aef9ec39
RD
185static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
186{
187 return (struct srp_target_port *) host->hostdata;
188}
189
190static const char *srp_target_info(struct Scsi_Host *host)
191{
192 return host_to_target(host)->target_name;
193}
194
5d7cbfd6
RD
195static int srp_target_is_topspin(struct srp_target_port *target)
196{
197 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
3d1ff48d 198 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
5d7cbfd6
RD
199
200 return topspin_workarounds &&
3d1ff48d
RK
201 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
202 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
5d7cbfd6
RD
203}
204
aef9ec39
RD
205static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
206 gfp_t gfp_mask,
207 enum dma_data_direction direction)
208{
209 struct srp_iu *iu;
210
211 iu = kmalloc(sizeof *iu, gfp_mask);
212 if (!iu)
213 goto out;
214
215 iu->buf = kzalloc(size, gfp_mask);
216 if (!iu->buf)
217 goto out_free_iu;
218
05321937
GKH
219 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
220 direction);
221 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
aef9ec39
RD
222 goto out_free_buf;
223
224 iu->size = size;
225 iu->direction = direction;
226
227 return iu;
228
229out_free_buf:
230 kfree(iu->buf);
231out_free_iu:
232 kfree(iu);
233out:
234 return NULL;
235}
236
237static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
238{
239 if (!iu)
240 return;
241
05321937
GKH
242 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
243 iu->direction);
aef9ec39
RD
244 kfree(iu->buf);
245 kfree(iu);
246}
247
248static void srp_qp_event(struct ib_event *event, void *context)
249{
e0bda7d8 250 pr_debug("QP event %d\n", event->event);
aef9ec39
RD
251}
252
253static int srp_init_qp(struct srp_target_port *target,
254 struct ib_qp *qp)
255{
256 struct ib_qp_attr *attr;
257 int ret;
258
259 attr = kmalloc(sizeof *attr, GFP_KERNEL);
260 if (!attr)
261 return -ENOMEM;
262
969a60f9
RD
263 ret = ib_find_pkey(target->srp_host->srp_dev->dev,
264 target->srp_host->port,
265 be16_to_cpu(target->path.pkey),
266 &attr->pkey_index);
aef9ec39
RD
267 if (ret)
268 goto out;
269
270 attr->qp_state = IB_QPS_INIT;
271 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
272 IB_ACCESS_REMOTE_WRITE);
273 attr->port_num = target->srp_host->port;
274
275 ret = ib_modify_qp(qp, attr,
276 IB_QP_STATE |
277 IB_QP_PKEY_INDEX |
278 IB_QP_ACCESS_FLAGS |
279 IB_QP_PORT);
280
281out:
282 kfree(attr);
283 return ret;
284}
285
9fe4bcf4
DD
286static int srp_new_cm_id(struct srp_target_port *target)
287{
288 struct ib_cm_id *new_cm_id;
289
05321937 290 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
9fe4bcf4
DD
291 srp_cm_handler, target);
292 if (IS_ERR(new_cm_id))
293 return PTR_ERR(new_cm_id);
294
295 if (target->cm_id)
296 ib_destroy_cm_id(target->cm_id);
297 target->cm_id = new_cm_id;
298
299 return 0;
300}
301
d1b4289e
BVA
302static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
303{
304 struct srp_device *dev = target->srp_host->srp_dev;
305 struct ib_fmr_pool_param fmr_param;
306
307 memset(&fmr_param, 0, sizeof(fmr_param));
308 fmr_param.pool_size = target->scsi_host->can_queue;
309 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
310 fmr_param.cache = 1;
52ede08f
BVA
311 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
312 fmr_param.page_shift = ilog2(dev->mr_page_size);
d1b4289e
BVA
313 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
314 IB_ACCESS_REMOTE_WRITE |
315 IB_ACCESS_REMOTE_READ);
316
317 return ib_create_fmr_pool(dev->pd, &fmr_param);
318}
319
5cfb1782
BVA
320/**
321 * srp_destroy_fr_pool() - free the resources owned by a pool
322 * @pool: Fast registration pool to be destroyed.
323 */
324static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
325{
326 int i;
327 struct srp_fr_desc *d;
328
329 if (!pool)
330 return;
331
332 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
333 if (d->frpl)
334 ib_free_fast_reg_page_list(d->frpl);
335 if (d->mr)
336 ib_dereg_mr(d->mr);
337 }
338 kfree(pool);
339}
340
341/**
342 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
343 * @device: IB device to allocate fast registration descriptors for.
344 * @pd: Protection domain associated with the FR descriptors.
345 * @pool_size: Number of descriptors to allocate.
346 * @max_page_list_len: Maximum fast registration work request page list length.
347 */
348static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
349 struct ib_pd *pd, int pool_size,
350 int max_page_list_len)
351{
352 struct srp_fr_pool *pool;
353 struct srp_fr_desc *d;
354 struct ib_mr *mr;
355 struct ib_fast_reg_page_list *frpl;
356 int i, ret = -EINVAL;
357
358 if (pool_size <= 0)
359 goto err;
360 ret = -ENOMEM;
361 pool = kzalloc(sizeof(struct srp_fr_pool) +
362 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
363 if (!pool)
364 goto err;
365 pool->size = pool_size;
366 pool->max_page_list_len = max_page_list_len;
367 spin_lock_init(&pool->lock);
368 INIT_LIST_HEAD(&pool->free_list);
369
370 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
371 mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
372 if (IS_ERR(mr)) {
373 ret = PTR_ERR(mr);
374 goto destroy_pool;
375 }
376 d->mr = mr;
377 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
378 if (IS_ERR(frpl)) {
379 ret = PTR_ERR(frpl);
380 goto destroy_pool;
381 }
382 d->frpl = frpl;
383 list_add_tail(&d->entry, &pool->free_list);
384 }
385
386out:
387 return pool;
388
389destroy_pool:
390 srp_destroy_fr_pool(pool);
391
392err:
393 pool = ERR_PTR(ret);
394 goto out;
395}
396
397/**
398 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
399 * @pool: Pool to obtain descriptor from.
400 */
401static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
402{
403 struct srp_fr_desc *d = NULL;
404 unsigned long flags;
405
406 spin_lock_irqsave(&pool->lock, flags);
407 if (!list_empty(&pool->free_list)) {
408 d = list_first_entry(&pool->free_list, typeof(*d), entry);
409 list_del(&d->entry);
410 }
411 spin_unlock_irqrestore(&pool->lock, flags);
412
413 return d;
414}
415
416/**
417 * srp_fr_pool_put() - put an FR descriptor back in the free list
418 * @pool: Pool the descriptor was allocated from.
419 * @desc: Pointer to an array of fast registration descriptor pointers.
420 * @n: Number of descriptors to put back.
421 *
422 * Note: The caller must already have queued an invalidation request for
423 * desc->mr->rkey before calling this function.
424 */
425static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
426 int n)
427{
428 unsigned long flags;
429 int i;
430
431 spin_lock_irqsave(&pool->lock, flags);
432 for (i = 0; i < n; i++)
433 list_add(&desc[i]->entry, &pool->free_list);
434 spin_unlock_irqrestore(&pool->lock, flags);
435}
436
437static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
438{
439 struct srp_device *dev = target->srp_host->srp_dev;
440
441 return srp_create_fr_pool(dev->dev, dev->pd,
442 target->scsi_host->can_queue,
443 dev->max_pages_per_mr);
444}
445
aef9ec39
RD
446static int srp_create_target_ib(struct srp_target_port *target)
447{
62154b2e 448 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39 449 struct ib_qp_init_attr *init_attr;
73aa89ed
IR
450 struct ib_cq *recv_cq, *send_cq;
451 struct ib_qp *qp;
d1b4289e 452 struct ib_fmr_pool *fmr_pool = NULL;
5cfb1782
BVA
453 struct srp_fr_pool *fr_pool = NULL;
454 const int m = 1 + dev->use_fast_reg;
aef9ec39
RD
455 int ret;
456
457 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
458 if (!init_attr)
459 return -ENOMEM;
460
62154b2e 461 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, target,
4d73f95f 462 target->queue_size, target->comp_vector);
73aa89ed
IR
463 if (IS_ERR(recv_cq)) {
464 ret = PTR_ERR(recv_cq);
da9d2f07 465 goto err;
aef9ec39
RD
466 }
467
62154b2e 468 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, target,
5cfb1782 469 m * target->queue_size, target->comp_vector);
73aa89ed
IR
470 if (IS_ERR(send_cq)) {
471 ret = PTR_ERR(send_cq);
da9d2f07 472 goto err_recv_cq;
9c03dc9f
BVA
473 }
474
73aa89ed 475 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
aef9ec39
RD
476
477 init_attr->event_handler = srp_qp_event;
5cfb1782 478 init_attr->cap.max_send_wr = m * target->queue_size;
4d73f95f 479 init_attr->cap.max_recv_wr = target->queue_size;
aef9ec39
RD
480 init_attr->cap.max_recv_sge = 1;
481 init_attr->cap.max_send_sge = 1;
5cfb1782 482 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
aef9ec39 483 init_attr->qp_type = IB_QPT_RC;
73aa89ed
IR
484 init_attr->send_cq = send_cq;
485 init_attr->recv_cq = recv_cq;
aef9ec39 486
62154b2e 487 qp = ib_create_qp(dev->pd, init_attr);
73aa89ed
IR
488 if (IS_ERR(qp)) {
489 ret = PTR_ERR(qp);
da9d2f07 490 goto err_send_cq;
aef9ec39
RD
491 }
492
73aa89ed 493 ret = srp_init_qp(target, qp);
da9d2f07
RD
494 if (ret)
495 goto err_qp;
aef9ec39 496
5cfb1782
BVA
497 if (dev->use_fast_reg && dev->has_fr) {
498 fr_pool = srp_alloc_fr_pool(target);
499 if (IS_ERR(fr_pool)) {
500 ret = PTR_ERR(fr_pool);
501 shost_printk(KERN_WARNING, target->scsi_host, PFX
502 "FR pool allocation failed (%d)\n", ret);
503 goto err_qp;
504 }
505 if (target->fr_pool)
506 srp_destroy_fr_pool(target->fr_pool);
507 target->fr_pool = fr_pool;
508 } else if (!dev->use_fast_reg && dev->has_fmr) {
d1b4289e
BVA
509 fmr_pool = srp_alloc_fmr_pool(target);
510 if (IS_ERR(fmr_pool)) {
511 ret = PTR_ERR(fmr_pool);
512 shost_printk(KERN_WARNING, target->scsi_host, PFX
513 "FMR pool allocation failed (%d)\n", ret);
514 goto err_qp;
515 }
516 if (target->fmr_pool)
517 ib_destroy_fmr_pool(target->fmr_pool);
518 target->fmr_pool = fmr_pool;
519 }
520
73aa89ed
IR
521 if (target->qp)
522 ib_destroy_qp(target->qp);
523 if (target->recv_cq)
524 ib_destroy_cq(target->recv_cq);
525 if (target->send_cq)
526 ib_destroy_cq(target->send_cq);
527
528 target->qp = qp;
529 target->recv_cq = recv_cq;
530 target->send_cq = send_cq;
531
da9d2f07
RD
532 kfree(init_attr);
533 return 0;
534
535err_qp:
73aa89ed 536 ib_destroy_qp(qp);
da9d2f07
RD
537
538err_send_cq:
73aa89ed 539 ib_destroy_cq(send_cq);
da9d2f07
RD
540
541err_recv_cq:
73aa89ed 542 ib_destroy_cq(recv_cq);
da9d2f07
RD
543
544err:
aef9ec39
RD
545 kfree(init_attr);
546 return ret;
547}
548
4d73f95f
BVA
549/*
550 * Note: this function may be called without srp_alloc_iu_bufs() having been
551 * invoked. Hence the target->[rt]x_ring checks.
552 */
aef9ec39
RD
553static void srp_free_target_ib(struct srp_target_port *target)
554{
5cfb1782 555 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39
RD
556 int i;
557
394c595e
BVA
558 if (target->cm_id) {
559 ib_destroy_cm_id(target->cm_id);
560 target->cm_id = NULL;
561 }
562
5cfb1782
BVA
563 if (dev->use_fast_reg) {
564 if (target->fr_pool)
565 srp_destroy_fr_pool(target->fr_pool);
566 } else {
567 if (target->fmr_pool)
568 ib_destroy_fmr_pool(target->fmr_pool);
569 }
aef9ec39 570 ib_destroy_qp(target->qp);
9c03dc9f
BVA
571 ib_destroy_cq(target->send_cq);
572 ib_destroy_cq(target->recv_cq);
aef9ec39 573
73aa89ed
IR
574 target->qp = NULL;
575 target->send_cq = target->recv_cq = NULL;
576
4d73f95f
BVA
577 if (target->rx_ring) {
578 for (i = 0; i < target->queue_size; ++i)
579 srp_free_iu(target->srp_host, target->rx_ring[i]);
580 kfree(target->rx_ring);
581 target->rx_ring = NULL;
582 }
583 if (target->tx_ring) {
584 for (i = 0; i < target->queue_size; ++i)
585 srp_free_iu(target->srp_host, target->tx_ring[i]);
586 kfree(target->tx_ring);
587 target->tx_ring = NULL;
588 }
aef9ec39
RD
589}
590
591static void srp_path_rec_completion(int status,
592 struct ib_sa_path_rec *pathrec,
593 void *target_ptr)
594{
595 struct srp_target_port *target = target_ptr;
596
597 target->status = status;
598 if (status)
7aa54bd7
DD
599 shost_printk(KERN_ERR, target->scsi_host,
600 PFX "Got failed path rec status %d\n", status);
aef9ec39
RD
601 else
602 target->path = *pathrec;
603 complete(&target->done);
604}
605
606static int srp_lookup_path(struct srp_target_port *target)
607{
a702adce
BVA
608 int ret;
609
aef9ec39
RD
610 target->path.numb_path = 1;
611
612 init_completion(&target->done);
613
c1a0b23b 614 target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
05321937 615 target->srp_host->srp_dev->dev,
aef9ec39
RD
616 target->srp_host->port,
617 &target->path,
247e020e 618 IB_SA_PATH_REC_SERVICE_ID |
aef9ec39
RD
619 IB_SA_PATH_REC_DGID |
620 IB_SA_PATH_REC_SGID |
621 IB_SA_PATH_REC_NUMB_PATH |
622 IB_SA_PATH_REC_PKEY,
623 SRP_PATH_REC_TIMEOUT_MS,
624 GFP_KERNEL,
625 srp_path_rec_completion,
626 target, &target->path_query);
627 if (target->path_query_id < 0)
628 return target->path_query_id;
629
a702adce
BVA
630 ret = wait_for_completion_interruptible(&target->done);
631 if (ret < 0)
632 return ret;
aef9ec39
RD
633
634 if (target->status < 0)
7aa54bd7
DD
635 shost_printk(KERN_WARNING, target->scsi_host,
636 PFX "Path record query failed\n");
aef9ec39
RD
637
638 return target->status;
639}
640
641static int srp_send_req(struct srp_target_port *target)
642{
643 struct {
644 struct ib_cm_req_param param;
645 struct srp_login_req priv;
646 } *req = NULL;
647 int status;
648
649 req = kzalloc(sizeof *req, GFP_KERNEL);
650 if (!req)
651 return -ENOMEM;
652
653 req->param.primary_path = &target->path;
654 req->param.alternate_path = NULL;
655 req->param.service_id = target->service_id;
656 req->param.qp_num = target->qp->qp_num;
657 req->param.qp_type = target->qp->qp_type;
658 req->param.private_data = &req->priv;
659 req->param.private_data_len = sizeof req->priv;
660 req->param.flow_control = 1;
661
662 get_random_bytes(&req->param.starting_psn, 4);
663 req->param.starting_psn &= 0xffffff;
664
665 /*
666 * Pick some arbitrary defaults here; we could make these
667 * module parameters if anyone cared about setting them.
668 */
669 req->param.responder_resources = 4;
670 req->param.remote_cm_response_timeout = 20;
671 req->param.local_cm_response_timeout = 20;
7bb312e4 672 req->param.retry_count = target->tl_retry_count;
aef9ec39
RD
673 req->param.rnr_retry_count = 7;
674 req->param.max_cm_retries = 15;
675
676 req->priv.opcode = SRP_LOGIN_REQ;
677 req->priv.tag = 0;
49248644 678 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
aef9ec39
RD
679 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
680 SRP_BUF_FORMAT_INDIRECT);
0c0450db 681 /*
3cd96564 682 * In the published SRP specification (draft rev. 16a), the
0c0450db
R
683 * port identifier format is 8 bytes of ID extension followed
684 * by 8 bytes of GUID. Older drafts put the two halves in the
685 * opposite order, so that the GUID comes first.
686 *
687 * Targets conforming to these obsolete drafts can be
688 * recognized by the I/O Class they report.
689 */
690 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
691 memcpy(req->priv.initiator_port_id,
01cb9bcb 692 &target->path.sgid.global.interface_id, 8);
0c0450db 693 memcpy(req->priv.initiator_port_id + 8,
01cb9bcb 694 &target->initiator_ext, 8);
0c0450db
R
695 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
696 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
697 } else {
698 memcpy(req->priv.initiator_port_id,
01cb9bcb
IR
699 &target->initiator_ext, 8);
700 memcpy(req->priv.initiator_port_id + 8,
701 &target->path.sgid.global.interface_id, 8);
0c0450db
R
702 memcpy(req->priv.target_port_id, &target->id_ext, 8);
703 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
704 }
705
aef9ec39
RD
706 /*
707 * Topspin/Cisco SRP targets will reject our login unless we
01cb9bcb
IR
708 * zero out the first 8 bytes of our initiator port ID and set
709 * the second 8 bytes to the local node GUID.
aef9ec39 710 */
5d7cbfd6 711 if (srp_target_is_topspin(target)) {
7aa54bd7
DD
712 shost_printk(KERN_DEBUG, target->scsi_host,
713 PFX "Topspin/Cisco initiator port ID workaround "
714 "activated for target GUID %016llx\n",
715 (unsigned long long) be64_to_cpu(target->ioc_guid));
aef9ec39 716 memset(req->priv.initiator_port_id, 0, 8);
01cb9bcb 717 memcpy(req->priv.initiator_port_id + 8,
05321937 718 &target->srp_host->srp_dev->dev->node_guid, 8);
aef9ec39 719 }
aef9ec39
RD
720
721 status = ib_send_cm_req(target->cm_id, &req->param);
722
723 kfree(req);
724
725 return status;
726}
727
ef6c49d8
BVA
728static bool srp_queue_remove_work(struct srp_target_port *target)
729{
730 bool changed = false;
731
732 spin_lock_irq(&target->lock);
733 if (target->state != SRP_TARGET_REMOVED) {
734 target->state = SRP_TARGET_REMOVED;
735 changed = true;
736 }
737 spin_unlock_irq(&target->lock);
738
739 if (changed)
bcc05910 740 queue_work(srp_remove_wq, &target->remove_work);
ef6c49d8
BVA
741
742 return changed;
743}
744
294c875a
BVA
745static bool srp_change_conn_state(struct srp_target_port *target,
746 bool connected)
747{
748 bool changed = false;
749
750 spin_lock_irq(&target->lock);
751 if (target->connected != connected) {
752 target->connected = connected;
753 changed = true;
754 }
755 spin_unlock_irq(&target->lock);
756
757 return changed;
758}
759
aef9ec39
RD
760static void srp_disconnect_target(struct srp_target_port *target)
761{
294c875a
BVA
762 if (srp_change_conn_state(target, false)) {
763 /* XXX should send SRP_I_LOGOUT request */
aef9ec39 764
294c875a
BVA
765 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
766 shost_printk(KERN_DEBUG, target->scsi_host,
767 PFX "Sending CM DREQ failed\n");
294c875a 768 }
e6581056 769 }
aef9ec39
RD
770}
771
8f26c9ff
DD
772static void srp_free_req_data(struct srp_target_port *target)
773{
5cfb1782
BVA
774 struct srp_device *dev = target->srp_host->srp_dev;
775 struct ib_device *ibdev = dev->dev;
8f26c9ff
DD
776 struct srp_request *req;
777 int i;
778
4d73f95f
BVA
779 if (!target->req_ring)
780 return;
781
782 for (i = 0; i < target->req_ring_size; ++i) {
783 req = &target->req_ring[i];
5cfb1782
BVA
784 if (dev->use_fast_reg)
785 kfree(req->fr_list);
786 else
787 kfree(req->fmr_list);
8f26c9ff 788 kfree(req->map_page);
c07d424d
DD
789 if (req->indirect_dma_addr) {
790 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
791 target->indirect_size,
792 DMA_TO_DEVICE);
793 }
794 kfree(req->indirect_desc);
8f26c9ff 795 }
4d73f95f
BVA
796
797 kfree(target->req_ring);
798 target->req_ring = NULL;
8f26c9ff
DD
799}
800
b81d00bd
BVA
801static int srp_alloc_req_data(struct srp_target_port *target)
802{
803 struct srp_device *srp_dev = target->srp_host->srp_dev;
804 struct ib_device *ibdev = srp_dev->dev;
805 struct srp_request *req;
5cfb1782 806 void *mr_list;
b81d00bd
BVA
807 dma_addr_t dma_addr;
808 int i, ret = -ENOMEM;
809
810 INIT_LIST_HEAD(&target->free_reqs);
811
4d73f95f
BVA
812 target->req_ring = kzalloc(target->req_ring_size *
813 sizeof(*target->req_ring), GFP_KERNEL);
814 if (!target->req_ring)
815 goto out;
816
817 for (i = 0; i < target->req_ring_size; ++i) {
b81d00bd 818 req = &target->req_ring[i];
5cfb1782
BVA
819 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
820 GFP_KERNEL);
821 if (!mr_list)
822 goto out;
823 if (srp_dev->use_fast_reg)
824 req->fr_list = mr_list;
825 else
826 req->fmr_list = mr_list;
52ede08f 827 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
d1b4289e 828 sizeof(void *), GFP_KERNEL);
5cfb1782
BVA
829 if (!req->map_page)
830 goto out;
b81d00bd 831 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
5cfb1782 832 if (!req->indirect_desc)
b81d00bd
BVA
833 goto out;
834
835 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
836 target->indirect_size,
837 DMA_TO_DEVICE);
838 if (ib_dma_mapping_error(ibdev, dma_addr))
839 goto out;
840
841 req->indirect_dma_addr = dma_addr;
842 req->index = i;
843 list_add_tail(&req->list, &target->free_reqs);
844 }
845 ret = 0;
846
847out:
848 return ret;
849}
850
683b159a
BVA
851/**
852 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
853 * @shost: SCSI host whose attributes to remove from sysfs.
854 *
855 * Note: Any attributes defined in the host template and that did not exist
856 * before invocation of this function will be ignored.
857 */
858static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
859{
860 struct device_attribute **attr;
861
862 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
863 device_remove_file(&shost->shost_dev, *attr);
864}
865
ee12d6a8
BVA
866static void srp_remove_target(struct srp_target_port *target)
867{
ef6c49d8
BVA
868 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
869
ee12d6a8 870 srp_del_scsi_host_attr(target->scsi_host);
9dd69a60 871 srp_rport_get(target->rport);
ee12d6a8
BVA
872 srp_remove_host(target->scsi_host);
873 scsi_remove_host(target->scsi_host);
93079162 874 srp_stop_rport_timers(target->rport);
ef6c49d8 875 srp_disconnect_target(target);
ee12d6a8 876 srp_free_target_ib(target);
c1120f89 877 cancel_work_sync(&target->tl_err_work);
9dd69a60 878 srp_rport_put(target->rport);
ee12d6a8 879 srp_free_req_data(target);
65d7dd2f
VP
880
881 spin_lock(&target->srp_host->target_lock);
882 list_del(&target->list);
883 spin_unlock(&target->srp_host->target_lock);
884
ee12d6a8
BVA
885 scsi_host_put(target->scsi_host);
886}
887
c4028958 888static void srp_remove_work(struct work_struct *work)
aef9ec39 889{
c4028958 890 struct srp_target_port *target =
ef6c49d8 891 container_of(work, struct srp_target_port, remove_work);
aef9ec39 892
ef6c49d8 893 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
aef9ec39 894
96fc248a 895 srp_remove_target(target);
aef9ec39
RD
896}
897
dc1bdbd9
BVA
898static void srp_rport_delete(struct srp_rport *rport)
899{
900 struct srp_target_port *target = rport->lld_data;
901
902 srp_queue_remove_work(target);
903}
904
aef9ec39
RD
905static int srp_connect_target(struct srp_target_port *target)
906{
907 int ret;
908
294c875a
BVA
909 WARN_ON_ONCE(target->connected);
910
948d1e88
BVA
911 target->qp_in_error = false;
912
aef9ec39
RD
913 ret = srp_lookup_path(target);
914 if (ret)
915 return ret;
916
917 while (1) {
918 init_completion(&target->done);
919 ret = srp_send_req(target);
920 if (ret)
921 return ret;
a702adce
BVA
922 ret = wait_for_completion_interruptible(&target->done);
923 if (ret < 0)
924 return ret;
aef9ec39
RD
925
926 /*
927 * The CM event handling code will set status to
928 * SRP_PORT_REDIRECT if we get a port redirect REJ
929 * back, or SRP_DLID_REDIRECT if we get a lid/qp
930 * redirect REJ back.
931 */
932 switch (target->status) {
933 case 0:
294c875a 934 srp_change_conn_state(target, true);
aef9ec39
RD
935 return 0;
936
937 case SRP_PORT_REDIRECT:
938 ret = srp_lookup_path(target);
939 if (ret)
940 return ret;
941 break;
942
943 case SRP_DLID_REDIRECT:
944 break;
945
9fe4bcf4 946 case SRP_STALE_CONN:
9fe4bcf4 947 shost_printk(KERN_ERR, target->scsi_host, PFX
205619f2
BVA
948 "giving up on stale connection\n");
949 target->status = -ECONNRESET;
950 return target->status;
9fe4bcf4 951
aef9ec39
RD
952 default:
953 return target->status;
954 }
955 }
956}
957
5cfb1782
BVA
958static int srp_inv_rkey(struct srp_target_port *target, u32 rkey)
959{
960 struct ib_send_wr *bad_wr;
961 struct ib_send_wr wr = {
962 .opcode = IB_WR_LOCAL_INV,
963 .wr_id = LOCAL_INV_WR_ID_MASK,
964 .next = NULL,
965 .num_sge = 0,
966 .send_flags = 0,
967 .ex.invalidate_rkey = rkey,
968 };
969
970 return ib_post_send(target->qp, &wr, &bad_wr);
971}
972
d945e1df
RD
973static void srp_unmap_data(struct scsi_cmnd *scmnd,
974 struct srp_target_port *target,
975 struct srp_request *req)
976{
5cfb1782
BVA
977 struct srp_device *dev = target->srp_host->srp_dev;
978 struct ib_device *ibdev = dev->dev;
979 int i, res;
8f26c9ff 980
bb350d1d 981 if (!scsi_sglist(scmnd) ||
d945e1df
RD
982 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
983 scmnd->sc_data_direction != DMA_FROM_DEVICE))
984 return;
985
5cfb1782
BVA
986 if (dev->use_fast_reg) {
987 struct srp_fr_desc **pfr;
988
989 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
990 res = srp_inv_rkey(target, (*pfr)->mr->rkey);
991 if (res < 0) {
992 shost_printk(KERN_ERR, target->scsi_host, PFX
993 "Queueing INV WR for rkey %#x failed (%d)\n",
994 (*pfr)->mr->rkey, res);
995 queue_work(system_long_wq,
996 &target->tl_err_work);
997 }
998 }
999 if (req->nmdesc)
1000 srp_fr_pool_put(target->fr_pool, req->fr_list,
1001 req->nmdesc);
1002 } else {
1003 struct ib_pool_fmr **pfmr;
1004
1005 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1006 ib_fmr_pool_unmap(*pfmr);
1007 }
f5358a17 1008
8f26c9ff
DD
1009 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1010 scmnd->sc_data_direction);
d945e1df
RD
1011}
1012
22032991
BVA
1013/**
1014 * srp_claim_req - Take ownership of the scmnd associated with a request.
1015 * @target: SRP target port.
1016 * @req: SRP request.
b3fe628d 1017 * @sdev: If not NULL, only take ownership for this SCSI device.
22032991
BVA
1018 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1019 * ownership of @req->scmnd if it equals @scmnd.
1020 *
1021 * Return value:
1022 * Either NULL or a pointer to the SCSI command the caller became owner of.
1023 */
1024static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
1025 struct srp_request *req,
b3fe628d 1026 struct scsi_device *sdev,
22032991
BVA
1027 struct scsi_cmnd *scmnd)
1028{
1029 unsigned long flags;
1030
1031 spin_lock_irqsave(&target->lock, flags);
b3fe628d
BVA
1032 if (req->scmnd &&
1033 (!sdev || req->scmnd->device == sdev) &&
1034 (!scmnd || req->scmnd == scmnd)) {
22032991
BVA
1035 scmnd = req->scmnd;
1036 req->scmnd = NULL;
22032991
BVA
1037 } else {
1038 scmnd = NULL;
1039 }
1040 spin_unlock_irqrestore(&target->lock, flags);
1041
1042 return scmnd;
1043}
1044
1045/**
1046 * srp_free_req() - Unmap data and add request to the free request list.
af24663b
BVA
1047 * @target: SRP target port.
1048 * @req: Request to be freed.
1049 * @scmnd: SCSI command associated with @req.
1050 * @req_lim_delta: Amount to be added to @target->req_lim.
22032991
BVA
1051 */
1052static void srp_free_req(struct srp_target_port *target,
1053 struct srp_request *req, struct scsi_cmnd *scmnd,
1054 s32 req_lim_delta)
526b4caa 1055{
94a9174c
BVA
1056 unsigned long flags;
1057
22032991
BVA
1058 srp_unmap_data(scmnd, target, req);
1059
e9684678 1060 spin_lock_irqsave(&target->lock, flags);
94a9174c 1061 target->req_lim += req_lim_delta;
536ae14e 1062 list_add_tail(&req->list, &target->free_reqs);
e9684678 1063 spin_unlock_irqrestore(&target->lock, flags);
526b4caa
IR
1064}
1065
ed9b2264 1066static void srp_finish_req(struct srp_target_port *target,
b3fe628d
BVA
1067 struct srp_request *req, struct scsi_device *sdev,
1068 int result)
526b4caa 1069{
b3fe628d 1070 struct scsi_cmnd *scmnd = srp_claim_req(target, req, sdev, NULL);
22032991
BVA
1071
1072 if (scmnd) {
9b796d06 1073 srp_free_req(target, req, scmnd, 0);
ed9b2264 1074 scmnd->result = result;
22032991 1075 scmnd->scsi_done(scmnd);
22032991 1076 }
526b4caa
IR
1077}
1078
ed9b2264 1079static void srp_terminate_io(struct srp_rport *rport)
aef9ec39 1080{
ed9b2264 1081 struct srp_target_port *target = rport->lld_data;
b3fe628d
BVA
1082 struct Scsi_Host *shost = target->scsi_host;
1083 struct scsi_device *sdev;
ed9b2264
BVA
1084 int i;
1085
b3fe628d
BVA
1086 /*
1087 * Invoking srp_terminate_io() while srp_queuecommand() is running
1088 * is not safe. Hence the warning statement below.
1089 */
1090 shost_for_each_device(sdev, shost)
1091 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1092
4d73f95f 1093 for (i = 0; i < target->req_ring_size; ++i) {
ed9b2264 1094 struct srp_request *req = &target->req_ring[i];
b3fe628d 1095 srp_finish_req(target, req, NULL, DID_TRANSPORT_FAILFAST << 16);
ed9b2264
BVA
1096 }
1097}
aef9ec39 1098
ed9b2264
BVA
1099/*
1100 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1101 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1102 * srp_reset_device() or srp_reset_host() calls will occur while this function
1103 * is in progress. One way to realize that is not to call this function
1104 * directly but to call srp_reconnect_rport() instead since that last function
1105 * serializes calls of this function via rport->mutex and also blocks
1106 * srp_queuecommand() calls before invoking this function.
1107 */
1108static int srp_rport_reconnect(struct srp_rport *rport)
1109{
1110 struct srp_target_port *target = rport->lld_data;
1111 int i, ret;
09be70a2 1112
aef9ec39 1113 srp_disconnect_target(target);
34aa654e
BVA
1114
1115 if (target->state == SRP_TARGET_SCANNING)
1116 return -ENODEV;
1117
aef9ec39 1118 /*
c7c4e7ff
BVA
1119 * Now get a new local CM ID so that we avoid confusing the target in
1120 * case things are really fouled up. Doing so also ensures that all CM
1121 * callbacks will have finished before a new QP is allocated.
aef9ec39 1122 */
9fe4bcf4 1123 ret = srp_new_cm_id(target);
aef9ec39 1124
4d73f95f 1125 for (i = 0; i < target->req_ring_size; ++i) {
536ae14e 1126 struct srp_request *req = &target->req_ring[i];
b3fe628d 1127 srp_finish_req(target, req, NULL, DID_RESET << 16);
536ae14e 1128 }
aef9ec39 1129
5cfb1782
BVA
1130 /*
1131 * Whether or not creating a new CM ID succeeded, create a new
1132 * QP. This guarantees that all callback functions for the old QP have
1133 * finished before any send requests are posted on the new QP.
1134 */
1135 ret += srp_create_target_ib(target);
1136
536ae14e 1137 INIT_LIST_HEAD(&target->free_tx);
4d73f95f 1138 for (i = 0; i < target->queue_size; ++i)
536ae14e 1139 list_add(&target->tx_ring[i]->list, &target->free_tx);
aef9ec39 1140
c7c4e7ff
BVA
1141 if (ret == 0)
1142 ret = srp_connect_target(target);
09be70a2 1143
ed9b2264
BVA
1144 if (ret == 0)
1145 shost_printk(KERN_INFO, target->scsi_host,
1146 PFX "reconnect succeeded\n");
aef9ec39
RD
1147
1148 return ret;
1149}
1150
8f26c9ff
DD
1151static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1152 unsigned int dma_len, u32 rkey)
f5358a17 1153{
8f26c9ff 1154 struct srp_direct_buf *desc = state->desc;
f5358a17 1155
8f26c9ff
DD
1156 desc->va = cpu_to_be64(dma_addr);
1157 desc->key = cpu_to_be32(rkey);
1158 desc->len = cpu_to_be32(dma_len);
f5358a17 1159
8f26c9ff
DD
1160 state->total_len += dma_len;
1161 state->desc++;
1162 state->ndesc++;
1163}
559ce8f1 1164
8f26c9ff
DD
1165static int srp_map_finish_fmr(struct srp_map_state *state,
1166 struct srp_target_port *target)
1167{
8f26c9ff
DD
1168 struct ib_pool_fmr *fmr;
1169 u64 io_addr = 0;
85507bcc 1170
d1b4289e 1171 fmr = ib_fmr_pool_map_phys(target->fmr_pool, state->pages,
8f26c9ff
DD
1172 state->npages, io_addr);
1173 if (IS_ERR(fmr))
1174 return PTR_ERR(fmr);
f5358a17 1175
8f26c9ff 1176 *state->next_fmr++ = fmr;
52ede08f 1177 state->nmdesc++;
f5358a17 1178
52ede08f 1179 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
539dde6f 1180
8f26c9ff
DD
1181 return 0;
1182}
1183
5cfb1782
BVA
1184static int srp_map_finish_fr(struct srp_map_state *state,
1185 struct srp_target_port *target)
1186{
1187 struct srp_device *dev = target->srp_host->srp_dev;
1188 struct ib_send_wr *bad_wr;
1189 struct ib_send_wr wr;
1190 struct srp_fr_desc *desc;
1191 u32 rkey;
1192
1193 desc = srp_fr_pool_get(target->fr_pool);
1194 if (!desc)
1195 return -ENOMEM;
1196
1197 rkey = ib_inc_rkey(desc->mr->rkey);
1198 ib_update_fast_reg_key(desc->mr, rkey);
1199
1200 memcpy(desc->frpl->page_list, state->pages,
1201 sizeof(state->pages[0]) * state->npages);
1202
1203 memset(&wr, 0, sizeof(wr));
1204 wr.opcode = IB_WR_FAST_REG_MR;
1205 wr.wr_id = FAST_REG_WR_ID_MASK;
1206 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1207 wr.wr.fast_reg.page_list = desc->frpl;
1208 wr.wr.fast_reg.page_list_len = state->npages;
1209 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1210 wr.wr.fast_reg.length = state->dma_len;
1211 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1212 IB_ACCESS_REMOTE_READ |
1213 IB_ACCESS_REMOTE_WRITE);
1214 wr.wr.fast_reg.rkey = desc->mr->lkey;
1215
1216 *state->next_fr++ = desc;
1217 state->nmdesc++;
1218
1219 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1220 desc->mr->rkey);
1221
1222 return ib_post_send(target->qp, &wr, &bad_wr);
1223}
1224
539dde6f
BVA
1225static int srp_finish_mapping(struct srp_map_state *state,
1226 struct srp_target_port *target)
1227{
1228 int ret = 0;
1229
1230 if (state->npages == 0)
1231 return 0;
1232
b1b8854d 1233 if (state->npages == 1 && !register_always)
52ede08f 1234 srp_map_desc(state, state->base_dma_addr, state->dma_len,
539dde6f
BVA
1235 target->rkey);
1236 else
5cfb1782
BVA
1237 ret = target->srp_host->srp_dev->use_fast_reg ?
1238 srp_map_finish_fr(state, target) :
1239 srp_map_finish_fmr(state, target);
539dde6f
BVA
1240
1241 if (ret == 0) {
1242 state->npages = 0;
52ede08f 1243 state->dma_len = 0;
539dde6f
BVA
1244 }
1245
1246 return ret;
1247}
1248
8f26c9ff
DD
1249static void srp_map_update_start(struct srp_map_state *state,
1250 struct scatterlist *sg, int sg_index,
1251 dma_addr_t dma_addr)
1252{
1253 state->unmapped_sg = sg;
1254 state->unmapped_index = sg_index;
1255 state->unmapped_addr = dma_addr;
1256}
85507bcc 1257
8f26c9ff
DD
1258static int srp_map_sg_entry(struct srp_map_state *state,
1259 struct srp_target_port *target,
1260 struct scatterlist *sg, int sg_index,
5cfb1782 1261 bool use_mr)
8f26c9ff
DD
1262{
1263 struct srp_device *dev = target->srp_host->srp_dev;
1264 struct ib_device *ibdev = dev->dev;
1265 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1266 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1267 unsigned int len;
1268 int ret;
1269
1270 if (!dma_len)
1271 return 0;
1272
5cfb1782
BVA
1273 if (!use_mr) {
1274 /*
1275 * Once we're in direct map mode for a request, we don't
1276 * go back to FMR or FR mode, so no need to update anything
8f26c9ff
DD
1277 * other than the descriptor.
1278 */
1279 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1280 return 0;
85507bcc 1281 }
f5358a17 1282
5cfb1782
BVA
1283 /*
1284 * Since not all RDMA HW drivers support non-zero page offsets for
1285 * FMR, if we start at an offset into a page, don't merge into the
1286 * current FMR mapping. Finish it out, and use the kernel's MR for
1287 * this sg entry.
8f26c9ff 1288 */
5cfb1782
BVA
1289 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
1290 dma_len > dev->mr_max_size) {
539dde6f 1291 ret = srp_finish_mapping(state, target);
8f26c9ff
DD
1292 if (ret)
1293 return ret;
1294
1295 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1296 srp_map_update_start(state, NULL, 0, 0);
1297 return 0;
f5358a17
RD
1298 }
1299
5cfb1782
BVA
1300 /*
1301 * If this is the first sg that will be mapped via FMR or via FR, save
1302 * our position. We need to know the first unmapped entry, its index,
1303 * and the first unmapped address within that entry to be able to
1304 * restart mapping after an error.
8f26c9ff
DD
1305 */
1306 if (!state->unmapped_sg)
1307 srp_map_update_start(state, sg, sg_index, dma_addr);
f5358a17 1308
8f26c9ff 1309 while (dma_len) {
5cfb1782
BVA
1310 unsigned offset = dma_addr & ~dev->mr_page_mask;
1311 if (state->npages == dev->max_pages_per_mr || offset != 0) {
539dde6f 1312 ret = srp_finish_mapping(state, target);
8f26c9ff
DD
1313 if (ret)
1314 return ret;
f5358a17 1315
8f26c9ff
DD
1316 srp_map_update_start(state, sg, sg_index, dma_addr);
1317 }
1318
5cfb1782 1319 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
f5358a17 1320
8f26c9ff
DD
1321 if (!state->npages)
1322 state->base_dma_addr = dma_addr;
5cfb1782 1323 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
52ede08f 1324 state->dma_len += len;
8f26c9ff
DD
1325 dma_addr += len;
1326 dma_len -= len;
1327 }
1328
5cfb1782
BVA
1329 /*
1330 * If the last entry of the MR wasn't a full page, then we need to
8f26c9ff
DD
1331 * close it out and start a new one -- we can only merge at page
1332 * boundries.
1333 */
1334 ret = 0;
52ede08f 1335 if (len != dev->mr_page_size) {
539dde6f 1336 ret = srp_finish_mapping(state, target);
8f26c9ff
DD
1337 if (!ret)
1338 srp_map_update_start(state, NULL, 0, 0);
1339 }
f5358a17
RD
1340 return ret;
1341}
1342
5cfb1782
BVA
1343static int srp_map_sg(struct srp_map_state *state,
1344 struct srp_target_port *target, struct srp_request *req,
1345 struct scatterlist *scat, int count)
76bc1e1d
BVA
1346{
1347 struct srp_device *dev = target->srp_host->srp_dev;
1348 struct ib_device *ibdev = dev->dev;
1349 struct scatterlist *sg;
5cfb1782
BVA
1350 int i;
1351 bool use_mr;
76bc1e1d
BVA
1352
1353 state->desc = req->indirect_desc;
1354 state->pages = req->map_page;
5cfb1782
BVA
1355 if (dev->use_fast_reg) {
1356 state->next_fr = req->fr_list;
1357 use_mr = !!target->fr_pool;
1358 } else {
1359 state->next_fmr = req->fmr_list;
1360 use_mr = !!target->fmr_pool;
1361 }
76bc1e1d
BVA
1362
1363 for_each_sg(scat, sg, count, i) {
5cfb1782
BVA
1364 if (srp_map_sg_entry(state, target, sg, i, use_mr)) {
1365 /*
1366 * Memory registration failed, so backtrack to the
1367 * first unmapped entry and continue on without using
1368 * memory registration.
76bc1e1d
BVA
1369 */
1370 dma_addr_t dma_addr;
1371 unsigned int dma_len;
1372
1373backtrack:
1374 sg = state->unmapped_sg;
1375 i = state->unmapped_index;
1376
1377 dma_addr = ib_sg_dma_address(ibdev, sg);
1378 dma_len = ib_sg_dma_len(ibdev, sg);
1379 dma_len -= (state->unmapped_addr - dma_addr);
1380 dma_addr = state->unmapped_addr;
5cfb1782 1381 use_mr = false;
76bc1e1d
BVA
1382 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1383 }
1384 }
1385
5cfb1782 1386 if (use_mr && srp_finish_mapping(state, target))
76bc1e1d
BVA
1387 goto backtrack;
1388
52ede08f 1389 req->nmdesc = state->nmdesc;
5cfb1782
BVA
1390
1391 return 0;
76bc1e1d
BVA
1392}
1393
aef9ec39
RD
1394static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
1395 struct srp_request *req)
1396{
76bc1e1d 1397 struct scatterlist *scat;
aef9ec39 1398 struct srp_cmd *cmd = req->cmd->buf;
76bc1e1d 1399 int len, nents, count;
85507bcc
RC
1400 struct srp_device *dev;
1401 struct ib_device *ibdev;
8f26c9ff
DD
1402 struct srp_map_state state;
1403 struct srp_indirect_buf *indirect_hdr;
8f26c9ff
DD
1404 u32 table_len;
1405 u8 fmt;
aef9ec39 1406
bb350d1d 1407 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
aef9ec39
RD
1408 return sizeof (struct srp_cmd);
1409
1410 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1411 scmnd->sc_data_direction != DMA_TO_DEVICE) {
7aa54bd7
DD
1412 shost_printk(KERN_WARNING, target->scsi_host,
1413 PFX "Unhandled data direction %d\n",
1414 scmnd->sc_data_direction);
aef9ec39
RD
1415 return -EINVAL;
1416 }
1417
bb350d1d
FT
1418 nents = scsi_sg_count(scmnd);
1419 scat = scsi_sglist(scmnd);
aef9ec39 1420
05321937 1421 dev = target->srp_host->srp_dev;
85507bcc
RC
1422 ibdev = dev->dev;
1423
1424 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
8f26c9ff
DD
1425 if (unlikely(count == 0))
1426 return -EIO;
f5358a17
RD
1427
1428 fmt = SRP_DATA_DESC_DIRECT;
1429 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
aef9ec39 1430
b1b8854d 1431 if (count == 1 && !register_always) {
f5358a17
RD
1432 /*
1433 * The midlayer only generated a single gather/scatter
1434 * entry, or DMA mapping coalesced everything to a
1435 * single entry. So a direct descriptor along with
1436 * the DMA MR suffices.
1437 */
cf368713 1438 struct srp_direct_buf *buf = (void *) cmd->add_data;
aef9ec39 1439
85507bcc 1440 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
9af76271 1441 buf->key = cpu_to_be32(target->rkey);
85507bcc 1442 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
8f26c9ff 1443
52ede08f 1444 req->nmdesc = 0;
8f26c9ff
DD
1445 goto map_complete;
1446 }
1447
5cfb1782
BVA
1448 /*
1449 * We have more than one scatter/gather entry, so build our indirect
1450 * descriptor table, trying to merge as many entries as we can.
8f26c9ff
DD
1451 */
1452 indirect_hdr = (void *) cmd->add_data;
1453
c07d424d
DD
1454 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1455 target->indirect_size, DMA_TO_DEVICE);
1456
8f26c9ff 1457 memset(&state, 0, sizeof(state));
5cfb1782 1458 srp_map_sg(&state, target, req, scat, count);
cf368713 1459
c07d424d
DD
1460 /* We've mapped the request, now pull as much of the indirect
1461 * descriptor table as we can into the command buffer. If this
1462 * target is not using an external indirect table, we are
1463 * guaranteed to fit into the command, as the SCSI layer won't
1464 * give us more S/G entries than we allow.
8f26c9ff 1465 */
8f26c9ff 1466 if (state.ndesc == 1) {
5cfb1782
BVA
1467 /*
1468 * Memory registration collapsed the sg-list into one entry,
8f26c9ff
DD
1469 * so use a direct descriptor.
1470 */
1471 struct srp_direct_buf *buf = (void *) cmd->add_data;
cf368713 1472
c07d424d 1473 *buf = req->indirect_desc[0];
8f26c9ff 1474 goto map_complete;
aef9ec39
RD
1475 }
1476
c07d424d
DD
1477 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1478 !target->allow_ext_sg)) {
1479 shost_printk(KERN_ERR, target->scsi_host,
1480 "Could not fit S/G list into SRP_CMD\n");
1481 return -EIO;
1482 }
1483
1484 count = min(state.ndesc, target->cmd_sg_cnt);
8f26c9ff
DD
1485 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1486
1487 fmt = SRP_DATA_DESC_INDIRECT;
1488 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
c07d424d 1489 len += count * sizeof (struct srp_direct_buf);
8f26c9ff 1490
c07d424d
DD
1491 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1492 count * sizeof (struct srp_direct_buf));
8f26c9ff 1493
c07d424d 1494 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
8f26c9ff
DD
1495 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1496 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1497 indirect_hdr->len = cpu_to_be32(state.total_len);
1498
1499 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
c07d424d 1500 cmd->data_out_desc_cnt = count;
8f26c9ff 1501 else
c07d424d
DD
1502 cmd->data_in_desc_cnt = count;
1503
1504 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1505 DMA_TO_DEVICE);
8f26c9ff
DD
1506
1507map_complete:
aef9ec39
RD
1508 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1509 cmd->buf_fmt = fmt << 4;
1510 else
1511 cmd->buf_fmt = fmt;
1512
aef9ec39
RD
1513 return len;
1514}
1515
76c75b25
BVA
1516/*
1517 * Return an IU and possible credit to the free pool
1518 */
1519static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
1520 enum srp_iu_type iu_type)
1521{
1522 unsigned long flags;
1523
e9684678 1524 spin_lock_irqsave(&target->lock, flags);
76c75b25
BVA
1525 list_add(&iu->list, &target->free_tx);
1526 if (iu_type != SRP_IU_RSP)
1527 ++target->req_lim;
e9684678 1528 spin_unlock_irqrestore(&target->lock, flags);
76c75b25
BVA
1529}
1530
05a1d750 1531/*
e9684678
BVA
1532 * Must be called with target->lock held to protect req_lim and free_tx.
1533 * If IU is not sent, it must be returned using srp_put_tx_iu().
05a1d750
DD
1534 *
1535 * Note:
1536 * An upper limit for the number of allocated information units for each
1537 * request type is:
1538 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1539 * more than Scsi_Host.can_queue requests.
1540 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1541 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1542 * one unanswered SRP request to an initiator.
1543 */
1544static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
1545 enum srp_iu_type iu_type)
1546{
1547 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1548 struct srp_iu *iu;
1549
1550 srp_send_completion(target->send_cq, target);
1551
dcb4cb85 1552 if (list_empty(&target->free_tx))
05a1d750
DD
1553 return NULL;
1554
1555 /* Initiator responses to target requests do not consume credits */
76c75b25
BVA
1556 if (iu_type != SRP_IU_RSP) {
1557 if (target->req_lim <= rsv) {
1558 ++target->zero_req_lim;
1559 return NULL;
1560 }
1561
1562 --target->req_lim;
05a1d750
DD
1563 }
1564
dcb4cb85 1565 iu = list_first_entry(&target->free_tx, struct srp_iu, list);
76c75b25 1566 list_del(&iu->list);
05a1d750
DD
1567 return iu;
1568}
1569
76c75b25
BVA
1570static int srp_post_send(struct srp_target_port *target,
1571 struct srp_iu *iu, int len)
05a1d750
DD
1572{
1573 struct ib_sge list;
1574 struct ib_send_wr wr, *bad_wr;
05a1d750
DD
1575
1576 list.addr = iu->dma;
1577 list.length = len;
9af76271 1578 list.lkey = target->lkey;
05a1d750
DD
1579
1580 wr.next = NULL;
dcb4cb85 1581 wr.wr_id = (uintptr_t) iu;
05a1d750
DD
1582 wr.sg_list = &list;
1583 wr.num_sge = 1;
1584 wr.opcode = IB_WR_SEND;
1585 wr.send_flags = IB_SEND_SIGNALED;
1586
76c75b25 1587 return ib_post_send(target->qp, &wr, &bad_wr);
05a1d750
DD
1588}
1589
dcb4cb85 1590static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
c996bb47 1591{
c996bb47 1592 struct ib_recv_wr wr, *bad_wr;
dcb4cb85 1593 struct ib_sge list;
c996bb47
BVA
1594
1595 list.addr = iu->dma;
1596 list.length = iu->size;
9af76271 1597 list.lkey = target->lkey;
c996bb47
BVA
1598
1599 wr.next = NULL;
dcb4cb85 1600 wr.wr_id = (uintptr_t) iu;
c996bb47
BVA
1601 wr.sg_list = &list;
1602 wr.num_sge = 1;
1603
dcb4cb85 1604 return ib_post_recv(target->qp, &wr, &bad_wr);
c996bb47
BVA
1605}
1606
aef9ec39
RD
1607static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1608{
1609 struct srp_request *req;
1610 struct scsi_cmnd *scmnd;
1611 unsigned long flags;
aef9ec39 1612
aef9ec39 1613 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
e9684678 1614 spin_lock_irqsave(&target->lock, flags);
94a9174c 1615 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
e9684678 1616 spin_unlock_irqrestore(&target->lock, flags);
94a9174c 1617
f8b6e31e
DD
1618 target->tsk_mgmt_status = -1;
1619 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1620 target->tsk_mgmt_status = rsp->data[3];
1621 complete(&target->tsk_mgmt_done);
aef9ec39 1622 } else {
f8b6e31e 1623 req = &target->req_ring[rsp->tag];
b3fe628d 1624 scmnd = srp_claim_req(target, req, NULL, NULL);
22032991 1625 if (!scmnd) {
7aa54bd7
DD
1626 shost_printk(KERN_ERR, target->scsi_host,
1627 "Null scmnd for RSP w/tag %016llx\n",
1628 (unsigned long long) rsp->tag);
22032991
BVA
1629
1630 spin_lock_irqsave(&target->lock, flags);
1631 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1632 spin_unlock_irqrestore(&target->lock, flags);
1633
1634 return;
1635 }
aef9ec39
RD
1636 scmnd->result = rsp->status;
1637
1638 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1639 memcpy(scmnd->sense_buffer, rsp->data +
1640 be32_to_cpu(rsp->resp_data_len),
1641 min_t(int, be32_to_cpu(rsp->sense_data_len),
1642 SCSI_SENSE_BUFFERSIZE));
1643 }
1644
e714531a 1645 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
bb350d1d 1646 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
e714531a
BVA
1647 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1648 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1649 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1650 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1651 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1652 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
aef9ec39 1653
22032991
BVA
1654 srp_free_req(target, req, scmnd,
1655 be32_to_cpu(rsp->req_lim_delta));
1656
f8b6e31e
DD
1657 scmnd->host_scribble = NULL;
1658 scmnd->scsi_done(scmnd);
aef9ec39 1659 }
aef9ec39
RD
1660}
1661
bb12588a
DD
1662static int srp_response_common(struct srp_target_port *target, s32 req_delta,
1663 void *rsp, int len)
1664{
76c75b25 1665 struct ib_device *dev = target->srp_host->srp_dev->dev;
bb12588a
DD
1666 unsigned long flags;
1667 struct srp_iu *iu;
76c75b25 1668 int err;
bb12588a 1669
e9684678 1670 spin_lock_irqsave(&target->lock, flags);
bb12588a 1671 target->req_lim += req_delta;
bb12588a 1672 iu = __srp_get_tx_iu(target, SRP_IU_RSP);
e9684678 1673 spin_unlock_irqrestore(&target->lock, flags);
76c75b25 1674
bb12588a
DD
1675 if (!iu) {
1676 shost_printk(KERN_ERR, target->scsi_host, PFX
1677 "no IU available to send response\n");
76c75b25 1678 return 1;
bb12588a
DD
1679 }
1680
1681 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1682 memcpy(iu->buf, rsp, len);
1683 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1684
76c75b25
BVA
1685 err = srp_post_send(target, iu, len);
1686 if (err) {
bb12588a
DD
1687 shost_printk(KERN_ERR, target->scsi_host, PFX
1688 "unable to post response: %d\n", err);
76c75b25
BVA
1689 srp_put_tx_iu(target, iu, SRP_IU_RSP);
1690 }
bb12588a 1691
bb12588a
DD
1692 return err;
1693}
1694
1695static void srp_process_cred_req(struct srp_target_port *target,
1696 struct srp_cred_req *req)
1697{
1698 struct srp_cred_rsp rsp = {
1699 .opcode = SRP_CRED_RSP,
1700 .tag = req->tag,
1701 };
1702 s32 delta = be32_to_cpu(req->req_lim_delta);
1703
1704 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1705 shost_printk(KERN_ERR, target->scsi_host, PFX
1706 "problems processing SRP_CRED_REQ\n");
1707}
1708
1709static void srp_process_aer_req(struct srp_target_port *target,
1710 struct srp_aer_req *req)
1711{
1712 struct srp_aer_rsp rsp = {
1713 .opcode = SRP_AER_RSP,
1714 .tag = req->tag,
1715 };
1716 s32 delta = be32_to_cpu(req->req_lim_delta);
1717
1718 shost_printk(KERN_ERR, target->scsi_host, PFX
1719 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1720
1721 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1722 shost_printk(KERN_ERR, target->scsi_host, PFX
1723 "problems processing SRP_AER_REQ\n");
1724}
1725
aef9ec39
RD
1726static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1727{
dcb4cb85 1728 struct ib_device *dev = target->srp_host->srp_dev->dev;
737b94eb 1729 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
c996bb47 1730 int res;
aef9ec39
RD
1731 u8 opcode;
1732
85507bcc
RC
1733 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
1734 DMA_FROM_DEVICE);
aef9ec39
RD
1735
1736 opcode = *(u8 *) iu->buf;
1737
1738 if (0) {
7aa54bd7
DD
1739 shost_printk(KERN_ERR, target->scsi_host,
1740 PFX "recv completion, opcode 0x%02x\n", opcode);
7a700811
BVA
1741 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1742 iu->buf, wc->byte_len, true);
aef9ec39
RD
1743 }
1744
1745 switch (opcode) {
1746 case SRP_RSP:
1747 srp_process_rsp(target, iu->buf);
1748 break;
1749
bb12588a
DD
1750 case SRP_CRED_REQ:
1751 srp_process_cred_req(target, iu->buf);
1752 break;
1753
1754 case SRP_AER_REQ:
1755 srp_process_aer_req(target, iu->buf);
1756 break;
1757
aef9ec39
RD
1758 case SRP_T_LOGOUT:
1759 /* XXX Handle target logout */
7aa54bd7
DD
1760 shost_printk(KERN_WARNING, target->scsi_host,
1761 PFX "Got target logout request\n");
aef9ec39
RD
1762 break;
1763
1764 default:
7aa54bd7
DD
1765 shost_printk(KERN_WARNING, target->scsi_host,
1766 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
aef9ec39
RD
1767 break;
1768 }
1769
85507bcc
RC
1770 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
1771 DMA_FROM_DEVICE);
c996bb47 1772
dcb4cb85 1773 res = srp_post_recv(target, iu);
c996bb47
BVA
1774 if (res != 0)
1775 shost_printk(KERN_ERR, target->scsi_host,
1776 PFX "Recv failed with error code %d\n", res);
aef9ec39
RD
1777}
1778
c1120f89
BVA
1779/**
1780 * srp_tl_err_work() - handle a transport layer error
af24663b 1781 * @work: Work structure embedded in an SRP target port.
c1120f89
BVA
1782 *
1783 * Note: This function may get invoked before the rport has been created,
1784 * hence the target->rport test.
1785 */
1786static void srp_tl_err_work(struct work_struct *work)
1787{
1788 struct srp_target_port *target;
1789
1790 target = container_of(work, struct srp_target_port, tl_err_work);
1791 if (target->rport)
1792 srp_start_tl_fail_timers(target->rport);
1793}
1794
5cfb1782
BVA
1795static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
1796 bool send_err, struct srp_target_port *target)
948d1e88 1797{
294c875a 1798 if (target->connected && !target->qp_in_error) {
5cfb1782
BVA
1799 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1800 shost_printk(KERN_ERR, target->scsi_host, PFX
1801 "LOCAL_INV failed with status %d\n",
1802 wc_status);
1803 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1804 shost_printk(KERN_ERR, target->scsi_host, PFX
1805 "FAST_REG_MR failed status %d\n",
1806 wc_status);
1807 } else {
1808 shost_printk(KERN_ERR, target->scsi_host,
1809 PFX "failed %s status %d for iu %p\n",
1810 send_err ? "send" : "receive",
1811 wc_status, (void *)(uintptr_t)wr_id);
1812 }
c1120f89 1813 queue_work(system_long_wq, &target->tl_err_work);
4f0af697 1814 }
948d1e88
BVA
1815 target->qp_in_error = true;
1816}
1817
9c03dc9f 1818static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
aef9ec39
RD
1819{
1820 struct srp_target_port *target = target_ptr;
1821 struct ib_wc wc;
aef9ec39
RD
1822
1823 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1824 while (ib_poll_cq(cq, 1, &wc) > 0) {
948d1e88
BVA
1825 if (likely(wc.status == IB_WC_SUCCESS)) {
1826 srp_handle_recv(target, &wc);
1827 } else {
5cfb1782 1828 srp_handle_qp_err(wc.wr_id, wc.status, false, target);
aef9ec39 1829 }
9c03dc9f
BVA
1830 }
1831}
1832
1833static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1834{
1835 struct srp_target_port *target = target_ptr;
1836 struct ib_wc wc;
dcb4cb85 1837 struct srp_iu *iu;
9c03dc9f
BVA
1838
1839 while (ib_poll_cq(cq, 1, &wc) > 0) {
948d1e88
BVA
1840 if (likely(wc.status == IB_WC_SUCCESS)) {
1841 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1842 list_add(&iu->list, &target->free_tx);
1843 } else {
5cfb1782 1844 srp_handle_qp_err(wc.wr_id, wc.status, true, target);
9c03dc9f 1845 }
aef9ec39
RD
1846 }
1847}
1848
76c75b25 1849static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
aef9ec39 1850{
76c75b25 1851 struct srp_target_port *target = host_to_target(shost);
a95cadb9 1852 struct srp_rport *rport = target->rport;
aef9ec39
RD
1853 struct srp_request *req;
1854 struct srp_iu *iu;
1855 struct srp_cmd *cmd;
85507bcc 1856 struct ib_device *dev;
76c75b25 1857 unsigned long flags;
d1b4289e 1858 int len, ret;
a95cadb9
BVA
1859 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1860
1861 /*
1862 * The SCSI EH thread is the only context from which srp_queuecommand()
1863 * can get invoked for blocked devices (SDEV_BLOCK /
1864 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1865 * locking the rport mutex if invoked from inside the SCSI EH.
1866 */
1867 if (in_scsi_eh)
1868 mutex_lock(&rport->mutex);
aef9ec39 1869
d1b4289e
BVA
1870 scmnd->result = srp_chkready(target->rport);
1871 if (unlikely(scmnd->result))
1872 goto err;
2ce19e72 1873
e9684678 1874 spin_lock_irqsave(&target->lock, flags);
bb12588a 1875 iu = __srp_get_tx_iu(target, SRP_IU_CMD);
aef9ec39 1876 if (!iu)
695b8349
BVA
1877 goto err_unlock;
1878
1879 req = list_first_entry(&target->free_reqs, struct srp_request, list);
1880 list_del(&req->list);
1881 spin_unlock_irqrestore(&target->lock, flags);
aef9ec39 1882
05321937 1883 dev = target->srp_host->srp_dev->dev;
49248644 1884 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
85507bcc 1885 DMA_TO_DEVICE);
aef9ec39 1886
f8b6e31e 1887 scmnd->host_scribble = (void *) req;
aef9ec39
RD
1888
1889 cmd = iu->buf;
1890 memset(cmd, 0, sizeof *cmd);
1891
1892 cmd->opcode = SRP_CMD;
1893 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
d945e1df 1894 cmd->tag = req->index;
aef9ec39
RD
1895 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1896
aef9ec39
RD
1897 req->scmnd = scmnd;
1898 req->cmd = iu;
aef9ec39
RD
1899
1900 len = srp_map_data(scmnd, target, req);
1901 if (len < 0) {
7aa54bd7 1902 shost_printk(KERN_ERR, target->scsi_host,
d1b4289e
BVA
1903 PFX "Failed to map data (%d)\n", len);
1904 /*
1905 * If we ran out of memory descriptors (-ENOMEM) because an
1906 * application is queuing many requests with more than
52ede08f 1907 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
d1b4289e
BVA
1908 * to reduce queue depth temporarily.
1909 */
1910 scmnd->result = len == -ENOMEM ?
1911 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
76c75b25 1912 goto err_iu;
aef9ec39
RD
1913 }
1914
49248644 1915 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
85507bcc 1916 DMA_TO_DEVICE);
aef9ec39 1917
76c75b25 1918 if (srp_post_send(target, iu, len)) {
7aa54bd7 1919 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
aef9ec39
RD
1920 goto err_unmap;
1921 }
1922
d1b4289e
BVA
1923 ret = 0;
1924
a95cadb9
BVA
1925unlock_rport:
1926 if (in_scsi_eh)
1927 mutex_unlock(&rport->mutex);
1928
d1b4289e 1929 return ret;
aef9ec39
RD
1930
1931err_unmap:
1932 srp_unmap_data(scmnd, target, req);
1933
76c75b25
BVA
1934err_iu:
1935 srp_put_tx_iu(target, iu, SRP_IU_CMD);
1936
024ca901
BVA
1937 /*
1938 * Avoid that the loops that iterate over the request ring can
1939 * encounter a dangling SCSI command pointer.
1940 */
1941 req->scmnd = NULL;
1942
e9684678 1943 spin_lock_irqsave(&target->lock, flags);
76c75b25 1944 list_add(&req->list, &target->free_reqs);
695b8349
BVA
1945
1946err_unlock:
e9684678 1947 spin_unlock_irqrestore(&target->lock, flags);
76c75b25 1948
d1b4289e
BVA
1949err:
1950 if (scmnd->result) {
1951 scmnd->scsi_done(scmnd);
1952 ret = 0;
1953 } else {
1954 ret = SCSI_MLQUEUE_HOST_BUSY;
1955 }
a95cadb9 1956
d1b4289e 1957 goto unlock_rport;
aef9ec39
RD
1958}
1959
4d73f95f
BVA
1960/*
1961 * Note: the resources allocated in this function are freed in
1962 * srp_free_target_ib().
1963 */
aef9ec39
RD
1964static int srp_alloc_iu_bufs(struct srp_target_port *target)
1965{
1966 int i;
1967
4d73f95f
BVA
1968 target->rx_ring = kzalloc(target->queue_size * sizeof(*target->rx_ring),
1969 GFP_KERNEL);
1970 if (!target->rx_ring)
1971 goto err_no_ring;
1972 target->tx_ring = kzalloc(target->queue_size * sizeof(*target->tx_ring),
1973 GFP_KERNEL);
1974 if (!target->tx_ring)
1975 goto err_no_ring;
1976
1977 for (i = 0; i < target->queue_size; ++i) {
aef9ec39
RD
1978 target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1979 target->max_ti_iu_len,
1980 GFP_KERNEL, DMA_FROM_DEVICE);
1981 if (!target->rx_ring[i])
1982 goto err;
1983 }
1984
4d73f95f 1985 for (i = 0; i < target->queue_size; ++i) {
aef9ec39 1986 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
49248644 1987 target->max_iu_len,
aef9ec39
RD
1988 GFP_KERNEL, DMA_TO_DEVICE);
1989 if (!target->tx_ring[i])
1990 goto err;
dcb4cb85
BVA
1991
1992 list_add(&target->tx_ring[i]->list, &target->free_tx);
aef9ec39
RD
1993 }
1994
1995 return 0;
1996
1997err:
4d73f95f 1998 for (i = 0; i < target->queue_size; ++i) {
aef9ec39 1999 srp_free_iu(target->srp_host, target->rx_ring[i]);
aef9ec39 2000 srp_free_iu(target->srp_host, target->tx_ring[i]);
aef9ec39
RD
2001 }
2002
4d73f95f
BVA
2003
2004err_no_ring:
2005 kfree(target->tx_ring);
2006 target->tx_ring = NULL;
2007 kfree(target->rx_ring);
2008 target->rx_ring = NULL;
2009
aef9ec39
RD
2010 return -ENOMEM;
2011}
2012
c9b03c1a
BVA
2013static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2014{
2015 uint64_t T_tr_ns, max_compl_time_ms;
2016 uint32_t rq_tmo_jiffies;
2017
2018 /*
2019 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2020 * table 91), both the QP timeout and the retry count have to be set
2021 * for RC QP's during the RTR to RTS transition.
2022 */
2023 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2024 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2025
2026 /*
2027 * Set target->rq_tmo_jiffies to one second more than the largest time
2028 * it can take before an error completion is generated. See also
2029 * C9-140..142 in the IBTA spec for more information about how to
2030 * convert the QP Local ACK Timeout value to nanoseconds.
2031 */
2032 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2033 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2034 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2035 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2036
2037 return rq_tmo_jiffies;
2038}
2039
961e0be8
DD
2040static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2041 struct srp_login_rsp *lrsp,
2042 struct srp_target_port *target)
2043{
2044 struct ib_qp_attr *qp_attr = NULL;
2045 int attr_mask = 0;
2046 int ret;
2047 int i;
2048
2049 if (lrsp->opcode == SRP_LOGIN_RSP) {
2050 target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2051 target->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2052
2053 /*
2054 * Reserve credits for task management so we don't
2055 * bounce requests back to the SCSI mid-layer.
2056 */
2057 target->scsi_host->can_queue
2058 = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2059 target->scsi_host->can_queue);
4d73f95f
BVA
2060 target->scsi_host->cmd_per_lun
2061 = min_t(int, target->scsi_host->can_queue,
2062 target->scsi_host->cmd_per_lun);
961e0be8
DD
2063 } else {
2064 shost_printk(KERN_WARNING, target->scsi_host,
2065 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2066 ret = -ECONNRESET;
2067 goto error;
2068 }
2069
4d73f95f 2070 if (!target->rx_ring) {
961e0be8
DD
2071 ret = srp_alloc_iu_bufs(target);
2072 if (ret)
2073 goto error;
2074 }
2075
2076 ret = -ENOMEM;
2077 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2078 if (!qp_attr)
2079 goto error;
2080
2081 qp_attr->qp_state = IB_QPS_RTR;
2082 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2083 if (ret)
2084 goto error_free;
2085
2086 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
2087 if (ret)
2088 goto error_free;
2089
4d73f95f 2090 for (i = 0; i < target->queue_size; i++) {
961e0be8
DD
2091 struct srp_iu *iu = target->rx_ring[i];
2092 ret = srp_post_recv(target, iu);
2093 if (ret)
2094 goto error_free;
2095 }
2096
2097 qp_attr->qp_state = IB_QPS_RTS;
2098 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2099 if (ret)
2100 goto error_free;
2101
c9b03c1a
BVA
2102 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2103
961e0be8
DD
2104 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
2105 if (ret)
2106 goto error_free;
2107
2108 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2109
2110error_free:
2111 kfree(qp_attr);
2112
2113error:
2114 target->status = ret;
2115}
2116
aef9ec39
RD
2117static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2118 struct ib_cm_event *event,
2119 struct srp_target_port *target)
2120{
7aa54bd7 2121 struct Scsi_Host *shost = target->scsi_host;
aef9ec39
RD
2122 struct ib_class_port_info *cpi;
2123 int opcode;
2124
2125 switch (event->param.rej_rcvd.reason) {
2126 case IB_CM_REJ_PORT_CM_REDIRECT:
2127 cpi = event->param.rej_rcvd.ari;
2128 target->path.dlid = cpi->redirect_lid;
2129 target->path.pkey = cpi->redirect_pkey;
2130 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2131 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
2132
2133 target->status = target->path.dlid ?
2134 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2135 break;
2136
2137 case IB_CM_REJ_PORT_REDIRECT:
5d7cbfd6 2138 if (srp_target_is_topspin(target)) {
aef9ec39
RD
2139 /*
2140 * Topspin/Cisco SRP gateways incorrectly send
2141 * reject reason code 25 when they mean 24
2142 * (port redirect).
2143 */
2144 memcpy(target->path.dgid.raw,
2145 event->param.rej_rcvd.ari, 16);
2146
7aa54bd7
DD
2147 shost_printk(KERN_DEBUG, shost,
2148 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2149 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
2150 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
aef9ec39
RD
2151
2152 target->status = SRP_PORT_REDIRECT;
2153 } else {
7aa54bd7
DD
2154 shost_printk(KERN_WARNING, shost,
2155 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
aef9ec39
RD
2156 target->status = -ECONNRESET;
2157 }
2158 break;
2159
2160 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
7aa54bd7
DD
2161 shost_printk(KERN_WARNING, shost,
2162 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
aef9ec39
RD
2163 target->status = -ECONNRESET;
2164 break;
2165
2166 case IB_CM_REJ_CONSUMER_DEFINED:
2167 opcode = *(u8 *) event->private_data;
2168 if (opcode == SRP_LOGIN_REJ) {
2169 struct srp_login_rej *rej = event->private_data;
2170 u32 reason = be32_to_cpu(rej->reason);
2171
2172 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
7aa54bd7
DD
2173 shost_printk(KERN_WARNING, shost,
2174 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
aef9ec39 2175 else
e7ffde01
BVA
2176 shost_printk(KERN_WARNING, shost, PFX
2177 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2178 target->path.sgid.raw,
2179 target->orig_dgid, reason);
aef9ec39 2180 } else
7aa54bd7
DD
2181 shost_printk(KERN_WARNING, shost,
2182 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2183 " opcode 0x%02x\n", opcode);
aef9ec39
RD
2184 target->status = -ECONNRESET;
2185 break;
2186
9fe4bcf4
DD
2187 case IB_CM_REJ_STALE_CONN:
2188 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
2189 target->status = SRP_STALE_CONN;
2190 break;
2191
aef9ec39 2192 default:
7aa54bd7
DD
2193 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2194 event->param.rej_rcvd.reason);
aef9ec39
RD
2195 target->status = -ECONNRESET;
2196 }
2197}
2198
2199static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2200{
2201 struct srp_target_port *target = cm_id->context;
aef9ec39 2202 int comp = 0;
aef9ec39
RD
2203
2204 switch (event->event) {
2205 case IB_CM_REQ_ERROR:
7aa54bd7
DD
2206 shost_printk(KERN_DEBUG, target->scsi_host,
2207 PFX "Sending CM REQ failed\n");
aef9ec39
RD
2208 comp = 1;
2209 target->status = -ECONNRESET;
2210 break;
2211
2212 case IB_CM_REP_RECEIVED:
2213 comp = 1;
961e0be8 2214 srp_cm_rep_handler(cm_id, event->private_data, target);
aef9ec39
RD
2215 break;
2216
2217 case IB_CM_REJ_RECEIVED:
7aa54bd7 2218 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
aef9ec39
RD
2219 comp = 1;
2220
2221 srp_cm_rej_handler(cm_id, event, target);
2222 break;
2223
b7ac4ab4 2224 case IB_CM_DREQ_RECEIVED:
7aa54bd7
DD
2225 shost_printk(KERN_WARNING, target->scsi_host,
2226 PFX "DREQ received - connection closed\n");
294c875a 2227 srp_change_conn_state(target, false);
b7ac4ab4 2228 if (ib_send_cm_drep(cm_id, NULL, 0))
7aa54bd7
DD
2229 shost_printk(KERN_ERR, target->scsi_host,
2230 PFX "Sending CM DREP failed\n");
c1120f89 2231 queue_work(system_long_wq, &target->tl_err_work);
aef9ec39
RD
2232 break;
2233
2234 case IB_CM_TIMEWAIT_EXIT:
7aa54bd7
DD
2235 shost_printk(KERN_ERR, target->scsi_host,
2236 PFX "connection closed\n");
ac72d766 2237 comp = 1;
aef9ec39 2238
aef9ec39
RD
2239 target->status = 0;
2240 break;
2241
b7ac4ab4
IR
2242 case IB_CM_MRA_RECEIVED:
2243 case IB_CM_DREQ_ERROR:
2244 case IB_CM_DREP_RECEIVED:
2245 break;
2246
aef9ec39 2247 default:
7aa54bd7
DD
2248 shost_printk(KERN_WARNING, target->scsi_host,
2249 PFX "Unhandled CM event %d\n", event->event);
aef9ec39
RD
2250 break;
2251 }
2252
2253 if (comp)
2254 complete(&target->done);
2255
aef9ec39
RD
2256 return 0;
2257}
2258
71444b97
JW
2259/**
2260 * srp_change_queue_depth - setting device queue depth
2261 * @sdev: scsi device struct
2262 * @qdepth: requested queue depth
2263 * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
2264 * (see include/scsi/scsi_host.h for definition)
2265 *
2266 * Returns queue depth.
2267 */
2268static int
2269srp_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
2270{
2271 struct Scsi_Host *shost = sdev->host;
2272 int max_depth;
2273 if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) {
2274 max_depth = shost->can_queue;
2275 if (!sdev->tagged_supported)
2276 max_depth = 1;
2277 if (qdepth > max_depth)
2278 qdepth = max_depth;
c8b09f6f 2279 scsi_adjust_queue_depth(sdev, qdepth);
71444b97
JW
2280 } else if (reason == SCSI_QDEPTH_QFULL)
2281 scsi_track_queue_full(sdev, qdepth);
2282 else
2283 return -EOPNOTSUPP;
2284
2285 return sdev->queue_depth;
2286}
2287
d945e1df 2288static int srp_send_tsk_mgmt(struct srp_target_port *target,
f8b6e31e 2289 u64 req_tag, unsigned int lun, u8 func)
aef9ec39 2290{
a95cadb9 2291 struct srp_rport *rport = target->rport;
19081f31 2292 struct ib_device *dev = target->srp_host->srp_dev->dev;
aef9ec39
RD
2293 struct srp_iu *iu;
2294 struct srp_tsk_mgmt *tsk_mgmt;
aef9ec39 2295
3780d1f0
BVA
2296 if (!target->connected || target->qp_in_error)
2297 return -1;
2298
f8b6e31e 2299 init_completion(&target->tsk_mgmt_done);
aef9ec39 2300
a95cadb9
BVA
2301 /*
2302 * Lock the rport mutex to avoid that srp_create_target_ib() is
2303 * invoked while a task management function is being sent.
2304 */
2305 mutex_lock(&rport->mutex);
e9684678 2306 spin_lock_irq(&target->lock);
bb12588a 2307 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
e9684678 2308 spin_unlock_irq(&target->lock);
76c75b25 2309
a95cadb9
BVA
2310 if (!iu) {
2311 mutex_unlock(&rport->mutex);
2312
76c75b25 2313 return -1;
a95cadb9 2314 }
aef9ec39 2315
19081f31
DD
2316 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2317 DMA_TO_DEVICE);
aef9ec39
RD
2318 tsk_mgmt = iu->buf;
2319 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2320
2321 tsk_mgmt->opcode = SRP_TSK_MGMT;
f8b6e31e
DD
2322 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
2323 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
aef9ec39 2324 tsk_mgmt->tsk_mgmt_func = func;
f8b6e31e 2325 tsk_mgmt->task_tag = req_tag;
aef9ec39 2326
19081f31
DD
2327 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2328 DMA_TO_DEVICE);
76c75b25
BVA
2329 if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
2330 srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
a95cadb9
BVA
2331 mutex_unlock(&rport->mutex);
2332
76c75b25
BVA
2333 return -1;
2334 }
a95cadb9 2335 mutex_unlock(&rport->mutex);
d945e1df 2336
f8b6e31e 2337 if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
aef9ec39 2338 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
d945e1df 2339 return -1;
aef9ec39 2340
d945e1df 2341 return 0;
d945e1df
RD
2342}
2343
aef9ec39
RD
2344static int srp_abort(struct scsi_cmnd *scmnd)
2345{
d945e1df 2346 struct srp_target_port *target = host_to_target(scmnd->device->host);
f8b6e31e 2347 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
086f44f5 2348 int ret;
d945e1df 2349
7aa54bd7 2350 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
aef9ec39 2351
b3fe628d 2352 if (!req || !srp_claim_req(target, req, NULL, scmnd))
99b6697a 2353 return SUCCESS;
086f44f5 2354 if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
80d5e8a2 2355 SRP_TSK_ABORT_TASK) == 0)
086f44f5 2356 ret = SUCCESS;
ed9b2264 2357 else if (target->rport->state == SRP_RPORT_LOST)
99e1c139 2358 ret = FAST_IO_FAIL;
086f44f5
BVA
2359 else
2360 ret = FAILED;
22032991
BVA
2361 srp_free_req(target, req, scmnd, 0);
2362 scmnd->result = DID_ABORT << 16;
d8536670 2363 scmnd->scsi_done(scmnd);
d945e1df 2364
086f44f5 2365 return ret;
aef9ec39
RD
2366}
2367
2368static int srp_reset_device(struct scsi_cmnd *scmnd)
2369{
d945e1df 2370 struct srp_target_port *target = host_to_target(scmnd->device->host);
536ae14e 2371 int i;
d945e1df 2372
7aa54bd7 2373 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
aef9ec39 2374
f8b6e31e
DD
2375 if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
2376 SRP_TSK_LUN_RESET))
d945e1df 2377 return FAILED;
f8b6e31e 2378 if (target->tsk_mgmt_status)
d945e1df
RD
2379 return FAILED;
2380
4d73f95f 2381 for (i = 0; i < target->req_ring_size; ++i) {
536ae14e 2382 struct srp_request *req = &target->req_ring[i];
b3fe628d 2383 srp_finish_req(target, req, scmnd->device, DID_RESET << 16);
536ae14e 2384 }
d945e1df 2385
d945e1df 2386 return SUCCESS;
aef9ec39
RD
2387}
2388
2389static int srp_reset_host(struct scsi_cmnd *scmnd)
2390{
2391 struct srp_target_port *target = host_to_target(scmnd->device->host);
aef9ec39 2392
7aa54bd7 2393 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
aef9ec39 2394
ed9b2264 2395 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
aef9ec39
RD
2396}
2397
c9b03c1a
BVA
2398static int srp_slave_configure(struct scsi_device *sdev)
2399{
2400 struct Scsi_Host *shost = sdev->host;
2401 struct srp_target_port *target = host_to_target(shost);
2402 struct request_queue *q = sdev->request_queue;
2403 unsigned long timeout;
2404
2405 if (sdev->type == TYPE_DISK) {
2406 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2407 blk_queue_rq_timeout(q, timeout);
2408 }
2409
2410 return 0;
2411}
2412
ee959b00
TJ
2413static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2414 char *buf)
6ecb0c84 2415{
ee959b00 2416 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2417
6ecb0c84
RD
2418 return sprintf(buf, "0x%016llx\n",
2419 (unsigned long long) be64_to_cpu(target->id_ext));
2420}
2421
ee959b00
TJ
2422static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2423 char *buf)
6ecb0c84 2424{
ee959b00 2425 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2426
6ecb0c84
RD
2427 return sprintf(buf, "0x%016llx\n",
2428 (unsigned long long) be64_to_cpu(target->ioc_guid));
2429}
2430
ee959b00
TJ
2431static ssize_t show_service_id(struct device *dev,
2432 struct device_attribute *attr, char *buf)
6ecb0c84 2433{
ee959b00 2434 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2435
6ecb0c84
RD
2436 return sprintf(buf, "0x%016llx\n",
2437 (unsigned long long) be64_to_cpu(target->service_id));
2438}
2439
ee959b00
TJ
2440static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2441 char *buf)
6ecb0c84 2442{
ee959b00 2443 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2444
6ecb0c84
RD
2445 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
2446}
2447
848b3082
BVA
2448static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2449 char *buf)
2450{
2451 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2452
2453 return sprintf(buf, "%pI6\n", target->path.sgid.raw);
2454}
2455
ee959b00
TJ
2456static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2457 char *buf)
6ecb0c84 2458{
ee959b00 2459 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2460
5b095d98 2461 return sprintf(buf, "%pI6\n", target->path.dgid.raw);
6ecb0c84
RD
2462}
2463
ee959b00
TJ
2464static ssize_t show_orig_dgid(struct device *dev,
2465 struct device_attribute *attr, char *buf)
3633b3d0 2466{
ee959b00 2467 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3633b3d0 2468
5b095d98 2469 return sprintf(buf, "%pI6\n", target->orig_dgid);
3633b3d0
IR
2470}
2471
89de7486
BVA
2472static ssize_t show_req_lim(struct device *dev,
2473 struct device_attribute *attr, char *buf)
2474{
2475 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2476
89de7486
BVA
2477 return sprintf(buf, "%d\n", target->req_lim);
2478}
2479
ee959b00
TJ
2480static ssize_t show_zero_req_lim(struct device *dev,
2481 struct device_attribute *attr, char *buf)
6bfa24fa 2482{
ee959b00 2483 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6bfa24fa 2484
6bfa24fa
RD
2485 return sprintf(buf, "%d\n", target->zero_req_lim);
2486}
2487
ee959b00
TJ
2488static ssize_t show_local_ib_port(struct device *dev,
2489 struct device_attribute *attr, char *buf)
ded7f1a1 2490{
ee959b00 2491 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1
IR
2492
2493 return sprintf(buf, "%d\n", target->srp_host->port);
2494}
2495
ee959b00
TJ
2496static ssize_t show_local_ib_device(struct device *dev,
2497 struct device_attribute *attr, char *buf)
ded7f1a1 2498{
ee959b00 2499 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1 2500
05321937 2501 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
ded7f1a1
IR
2502}
2503
4b5e5f41
BVA
2504static ssize_t show_comp_vector(struct device *dev,
2505 struct device_attribute *attr, char *buf)
2506{
2507 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2508
2509 return sprintf(buf, "%d\n", target->comp_vector);
2510}
2511
7bb312e4
VP
2512static ssize_t show_tl_retry_count(struct device *dev,
2513 struct device_attribute *attr, char *buf)
2514{
2515 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2516
2517 return sprintf(buf, "%d\n", target->tl_retry_count);
2518}
2519
49248644
DD
2520static ssize_t show_cmd_sg_entries(struct device *dev,
2521 struct device_attribute *attr, char *buf)
2522{
2523 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2524
2525 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2526}
2527
c07d424d
DD
2528static ssize_t show_allow_ext_sg(struct device *dev,
2529 struct device_attribute *attr, char *buf)
2530{
2531 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2532
2533 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2534}
2535
ee959b00
TJ
2536static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2537static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2538static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2539static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
848b3082 2540static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
ee959b00
TJ
2541static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2542static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
89de7486 2543static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
ee959b00
TJ
2544static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2545static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2546static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
4b5e5f41 2547static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
7bb312e4 2548static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
49248644 2549static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
c07d424d 2550static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
ee959b00
TJ
2551
2552static struct device_attribute *srp_host_attrs[] = {
2553 &dev_attr_id_ext,
2554 &dev_attr_ioc_guid,
2555 &dev_attr_service_id,
2556 &dev_attr_pkey,
848b3082 2557 &dev_attr_sgid,
ee959b00
TJ
2558 &dev_attr_dgid,
2559 &dev_attr_orig_dgid,
89de7486 2560 &dev_attr_req_lim,
ee959b00
TJ
2561 &dev_attr_zero_req_lim,
2562 &dev_attr_local_ib_port,
2563 &dev_attr_local_ib_device,
4b5e5f41 2564 &dev_attr_comp_vector,
7bb312e4 2565 &dev_attr_tl_retry_count,
49248644 2566 &dev_attr_cmd_sg_entries,
c07d424d 2567 &dev_attr_allow_ext_sg,
6ecb0c84
RD
2568 NULL
2569};
2570
aef9ec39
RD
2571static struct scsi_host_template srp_template = {
2572 .module = THIS_MODULE,
b7f008fd
RD
2573 .name = "InfiniBand SRP initiator",
2574 .proc_name = DRV_NAME,
c9b03c1a 2575 .slave_configure = srp_slave_configure,
aef9ec39
RD
2576 .info = srp_target_info,
2577 .queuecommand = srp_queuecommand,
71444b97 2578 .change_queue_depth = srp_change_queue_depth,
a62182f3 2579 .change_queue_type = scsi_change_queue_type,
aef9ec39
RD
2580 .eh_abort_handler = srp_abort,
2581 .eh_device_reset_handler = srp_reset_device,
2582 .eh_host_reset_handler = srp_reset_host,
2742c1da 2583 .skip_settle_delay = true,
49248644 2584 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
4d73f95f 2585 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
aef9ec39 2586 .this_id = -1,
4d73f95f 2587 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
6ecb0c84
RD
2588 .use_clustering = ENABLE_CLUSTERING,
2589 .shost_attrs = srp_host_attrs
aef9ec39
RD
2590};
2591
34aa654e
BVA
2592static int srp_sdev_count(struct Scsi_Host *host)
2593{
2594 struct scsi_device *sdev;
2595 int c = 0;
2596
2597 shost_for_each_device(sdev, host)
2598 c++;
2599
2600 return c;
2601}
2602
aef9ec39
RD
2603static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2604{
3236822b
FT
2605 struct srp_rport_identifiers ids;
2606 struct srp_rport *rport;
2607
34aa654e 2608 target->state = SRP_TARGET_SCANNING;
aef9ec39
RD
2609 sprintf(target->target_name, "SRP.T10:%016llX",
2610 (unsigned long long) be64_to_cpu(target->id_ext));
2611
05321937 2612 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
aef9ec39
RD
2613 return -ENODEV;
2614
3236822b
FT
2615 memcpy(ids.port_id, &target->id_ext, 8);
2616 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
aebd5e47 2617 ids.roles = SRP_RPORT_ROLE_TARGET;
3236822b
FT
2618 rport = srp_rport_add(target->scsi_host, &ids);
2619 if (IS_ERR(rport)) {
2620 scsi_remove_host(target->scsi_host);
2621 return PTR_ERR(rport);
2622 }
2623
dc1bdbd9 2624 rport->lld_data = target;
9dd69a60 2625 target->rport = rport;
dc1bdbd9 2626
b3589fd4 2627 spin_lock(&host->target_lock);
aef9ec39 2628 list_add_tail(&target->list, &host->target_list);
b3589fd4 2629 spin_unlock(&host->target_lock);
aef9ec39 2630
aef9ec39 2631 scsi_scan_target(&target->scsi_host->shost_gendev,
1962a4a1 2632 0, target->scsi_id, SCAN_WILD_CARD, 0);
aef9ec39 2633
34aa654e
BVA
2634 if (!target->connected || target->qp_in_error) {
2635 shost_printk(KERN_INFO, target->scsi_host,
2636 PFX "SCSI scan failed - removing SCSI host\n");
2637 srp_queue_remove_work(target);
2638 goto out;
2639 }
2640
2641 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2642 dev_name(&target->scsi_host->shost_gendev),
2643 srp_sdev_count(target->scsi_host));
2644
2645 spin_lock_irq(&target->lock);
2646 if (target->state == SRP_TARGET_SCANNING)
2647 target->state = SRP_TARGET_LIVE;
2648 spin_unlock_irq(&target->lock);
2649
2650out:
aef9ec39
RD
2651 return 0;
2652}
2653
ee959b00 2654static void srp_release_dev(struct device *dev)
aef9ec39
RD
2655{
2656 struct srp_host *host =
ee959b00 2657 container_of(dev, struct srp_host, dev);
aef9ec39
RD
2658
2659 complete(&host->released);
2660}
2661
2662static struct class srp_class = {
2663 .name = "infiniband_srp",
ee959b00 2664 .dev_release = srp_release_dev
aef9ec39
RD
2665};
2666
96fc248a
BVA
2667/**
2668 * srp_conn_unique() - check whether the connection to a target is unique
af24663b
BVA
2669 * @host: SRP host.
2670 * @target: SRP target port.
96fc248a
BVA
2671 */
2672static bool srp_conn_unique(struct srp_host *host,
2673 struct srp_target_port *target)
2674{
2675 struct srp_target_port *t;
2676 bool ret = false;
2677
2678 if (target->state == SRP_TARGET_REMOVED)
2679 goto out;
2680
2681 ret = true;
2682
2683 spin_lock(&host->target_lock);
2684 list_for_each_entry(t, &host->target_list, list) {
2685 if (t != target &&
2686 target->id_ext == t->id_ext &&
2687 target->ioc_guid == t->ioc_guid &&
2688 target->initiator_ext == t->initiator_ext) {
2689 ret = false;
2690 break;
2691 }
2692 }
2693 spin_unlock(&host->target_lock);
2694
2695out:
2696 return ret;
2697}
2698
aef9ec39
RD
2699/*
2700 * Target ports are added by writing
2701 *
2702 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2703 * pkey=<P_Key>,service_id=<service ID>
2704 *
2705 * to the add_target sysfs attribute.
2706 */
2707enum {
2708 SRP_OPT_ERR = 0,
2709 SRP_OPT_ID_EXT = 1 << 0,
2710 SRP_OPT_IOC_GUID = 1 << 1,
2711 SRP_OPT_DGID = 1 << 2,
2712 SRP_OPT_PKEY = 1 << 3,
2713 SRP_OPT_SERVICE_ID = 1 << 4,
2714 SRP_OPT_MAX_SECT = 1 << 5,
52fb2b50 2715 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
0c0450db 2716 SRP_OPT_IO_CLASS = 1 << 7,
01cb9bcb 2717 SRP_OPT_INITIATOR_EXT = 1 << 8,
49248644 2718 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
c07d424d
DD
2719 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2720 SRP_OPT_SG_TABLESIZE = 1 << 11,
4b5e5f41 2721 SRP_OPT_COMP_VECTOR = 1 << 12,
7bb312e4 2722 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
4d73f95f 2723 SRP_OPT_QUEUE_SIZE = 1 << 14,
aef9ec39
RD
2724 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2725 SRP_OPT_IOC_GUID |
2726 SRP_OPT_DGID |
2727 SRP_OPT_PKEY |
2728 SRP_OPT_SERVICE_ID),
2729};
2730
a447c093 2731static const match_table_t srp_opt_tokens = {
52fb2b50
VP
2732 { SRP_OPT_ID_EXT, "id_ext=%s" },
2733 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2734 { SRP_OPT_DGID, "dgid=%s" },
2735 { SRP_OPT_PKEY, "pkey=%x" },
2736 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2737 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2738 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
0c0450db 2739 { SRP_OPT_IO_CLASS, "io_class=%x" },
01cb9bcb 2740 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
49248644 2741 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
c07d424d
DD
2742 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2743 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
4b5e5f41 2744 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
7bb312e4 2745 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
4d73f95f 2746 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
52fb2b50 2747 { SRP_OPT_ERR, NULL }
aef9ec39
RD
2748};
2749
2750static int srp_parse_options(const char *buf, struct srp_target_port *target)
2751{
2752 char *options, *sep_opt;
2753 char *p;
2754 char dgid[3];
2755 substring_t args[MAX_OPT_ARGS];
2756 int opt_mask = 0;
2757 int token;
2758 int ret = -EINVAL;
2759 int i;
2760
2761 options = kstrdup(buf, GFP_KERNEL);
2762 if (!options)
2763 return -ENOMEM;
2764
2765 sep_opt = options;
2766 while ((p = strsep(&sep_opt, ",")) != NULL) {
2767 if (!*p)
2768 continue;
2769
2770 token = match_token(p, srp_opt_tokens, args);
2771 opt_mask |= token;
2772
2773 switch (token) {
2774 case SRP_OPT_ID_EXT:
2775 p = match_strdup(args);
a20f3a6d
IR
2776 if (!p) {
2777 ret = -ENOMEM;
2778 goto out;
2779 }
aef9ec39
RD
2780 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2781 kfree(p);
2782 break;
2783
2784 case SRP_OPT_IOC_GUID:
2785 p = match_strdup(args);
a20f3a6d
IR
2786 if (!p) {
2787 ret = -ENOMEM;
2788 goto out;
2789 }
aef9ec39
RD
2790 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2791 kfree(p);
2792 break;
2793
2794 case SRP_OPT_DGID:
2795 p = match_strdup(args);
a20f3a6d
IR
2796 if (!p) {
2797 ret = -ENOMEM;
2798 goto out;
2799 }
aef9ec39 2800 if (strlen(p) != 32) {
e0bda7d8 2801 pr_warn("bad dest GID parameter '%s'\n", p);
ce1823f0 2802 kfree(p);
aef9ec39
RD
2803 goto out;
2804 }
2805
2806 for (i = 0; i < 16; ++i) {
2807 strlcpy(dgid, p + i * 2, 3);
2808 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
2809 }
bf17c1c7 2810 kfree(p);
3633b3d0 2811 memcpy(target->orig_dgid, target->path.dgid.raw, 16);
aef9ec39
RD
2812 break;
2813
2814 case SRP_OPT_PKEY:
2815 if (match_hex(args, &token)) {
e0bda7d8 2816 pr_warn("bad P_Key parameter '%s'\n", p);
aef9ec39
RD
2817 goto out;
2818 }
2819 target->path.pkey = cpu_to_be16(token);
2820 break;
2821
2822 case SRP_OPT_SERVICE_ID:
2823 p = match_strdup(args);
a20f3a6d
IR
2824 if (!p) {
2825 ret = -ENOMEM;
2826 goto out;
2827 }
aef9ec39 2828 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
247e020e 2829 target->path.service_id = target->service_id;
aef9ec39
RD
2830 kfree(p);
2831 break;
2832
2833 case SRP_OPT_MAX_SECT:
2834 if (match_int(args, &token)) {
e0bda7d8 2835 pr_warn("bad max sect parameter '%s'\n", p);
aef9ec39
RD
2836 goto out;
2837 }
2838 target->scsi_host->max_sectors = token;
2839 break;
2840
4d73f95f
BVA
2841 case SRP_OPT_QUEUE_SIZE:
2842 if (match_int(args, &token) || token < 1) {
2843 pr_warn("bad queue_size parameter '%s'\n", p);
2844 goto out;
2845 }
2846 target->scsi_host->can_queue = token;
2847 target->queue_size = token + SRP_RSP_SQ_SIZE +
2848 SRP_TSK_MGMT_SQ_SIZE;
2849 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
2850 target->scsi_host->cmd_per_lun = token;
2851 break;
2852
52fb2b50 2853 case SRP_OPT_MAX_CMD_PER_LUN:
4d73f95f 2854 if (match_int(args, &token) || token < 1) {
e0bda7d8
BVA
2855 pr_warn("bad max cmd_per_lun parameter '%s'\n",
2856 p);
52fb2b50
VP
2857 goto out;
2858 }
4d73f95f 2859 target->scsi_host->cmd_per_lun = token;
52fb2b50
VP
2860 break;
2861
0c0450db
R
2862 case SRP_OPT_IO_CLASS:
2863 if (match_hex(args, &token)) {
e0bda7d8 2864 pr_warn("bad IO class parameter '%s'\n", p);
0c0450db
R
2865 goto out;
2866 }
2867 if (token != SRP_REV10_IB_IO_CLASS &&
2868 token != SRP_REV16A_IB_IO_CLASS) {
e0bda7d8
BVA
2869 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
2870 token, SRP_REV10_IB_IO_CLASS,
2871 SRP_REV16A_IB_IO_CLASS);
0c0450db
R
2872 goto out;
2873 }
2874 target->io_class = token;
2875 break;
2876
01cb9bcb
IR
2877 case SRP_OPT_INITIATOR_EXT:
2878 p = match_strdup(args);
a20f3a6d
IR
2879 if (!p) {
2880 ret = -ENOMEM;
2881 goto out;
2882 }
01cb9bcb
IR
2883 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2884 kfree(p);
2885 break;
2886
49248644
DD
2887 case SRP_OPT_CMD_SG_ENTRIES:
2888 if (match_int(args, &token) || token < 1 || token > 255) {
e0bda7d8
BVA
2889 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
2890 p);
49248644
DD
2891 goto out;
2892 }
2893 target->cmd_sg_cnt = token;
2894 break;
2895
c07d424d
DD
2896 case SRP_OPT_ALLOW_EXT_SG:
2897 if (match_int(args, &token)) {
e0bda7d8 2898 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
c07d424d
DD
2899 goto out;
2900 }
2901 target->allow_ext_sg = !!token;
2902 break;
2903
2904 case SRP_OPT_SG_TABLESIZE:
2905 if (match_int(args, &token) || token < 1 ||
2906 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
e0bda7d8
BVA
2907 pr_warn("bad max sg_tablesize parameter '%s'\n",
2908 p);
c07d424d
DD
2909 goto out;
2910 }
2911 target->sg_tablesize = token;
2912 break;
2913
4b5e5f41
BVA
2914 case SRP_OPT_COMP_VECTOR:
2915 if (match_int(args, &token) || token < 0) {
2916 pr_warn("bad comp_vector parameter '%s'\n", p);
2917 goto out;
2918 }
2919 target->comp_vector = token;
2920 break;
2921
7bb312e4
VP
2922 case SRP_OPT_TL_RETRY_COUNT:
2923 if (match_int(args, &token) || token < 2 || token > 7) {
2924 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
2925 p);
2926 goto out;
2927 }
2928 target->tl_retry_count = token;
2929 break;
2930
aef9ec39 2931 default:
e0bda7d8
BVA
2932 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
2933 p);
aef9ec39
RD
2934 goto out;
2935 }
2936 }
2937
2938 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
2939 ret = 0;
2940 else
2941 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
2942 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
2943 !(srp_opt_tokens[i].token & opt_mask))
e0bda7d8
BVA
2944 pr_warn("target creation request is missing parameter '%s'\n",
2945 srp_opt_tokens[i].pattern);
aef9ec39 2946
4d73f95f
BVA
2947 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
2948 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
2949 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
2950 target->scsi_host->cmd_per_lun,
2951 target->scsi_host->can_queue);
2952
aef9ec39
RD
2953out:
2954 kfree(options);
2955 return ret;
2956}
2957
ee959b00
TJ
2958static ssize_t srp_create_target(struct device *dev,
2959 struct device_attribute *attr,
aef9ec39
RD
2960 const char *buf, size_t count)
2961{
2962 struct srp_host *host =
ee959b00 2963 container_of(dev, struct srp_host, dev);
aef9ec39
RD
2964 struct Scsi_Host *target_host;
2965 struct srp_target_port *target;
d1b4289e
BVA
2966 struct srp_device *srp_dev = host->srp_dev;
2967 struct ib_device *ibdev = srp_dev->dev;
b81d00bd 2968 int ret;
aef9ec39
RD
2969
2970 target_host = scsi_host_alloc(&srp_template,
2971 sizeof (struct srp_target_port));
2972 if (!target_host)
2973 return -ENOMEM;
2974
49248644 2975 target_host->transportt = ib_srp_transport_template;
fd1b6c4a
BVA
2976 target_host->max_channel = 0;
2977 target_host->max_id = 1;
3c8edf0e
AR
2978 target_host->max_lun = SRP_MAX_LUN;
2979 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
5f068992 2980
aef9ec39 2981 target = host_to_target(target_host);
aef9ec39 2982
49248644
DD
2983 target->io_class = SRP_REV16A_IB_IO_CLASS;
2984 target->scsi_host = target_host;
2985 target->srp_host = host;
2986 target->lkey = host->srp_dev->mr->lkey;
2987 target->rkey = host->srp_dev->mr->rkey;
2988 target->cmd_sg_cnt = cmd_sg_entries;
c07d424d
DD
2989 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
2990 target->allow_ext_sg = allow_ext_sg;
7bb312e4 2991 target->tl_retry_count = 7;
4d73f95f 2992 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
aef9ec39 2993
34aa654e
BVA
2994 /*
2995 * Avoid that the SCSI host can be removed by srp_remove_target()
2996 * before this function returns.
2997 */
2998 scsi_host_get(target->scsi_host);
2999
2d7091bc
BVA
3000 mutex_lock(&host->add_target_mutex);
3001
aef9ec39
RD
3002 ret = srp_parse_options(buf, target);
3003 if (ret)
3004 goto err;
3005
4d73f95f
BVA
3006 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3007
96fc248a
BVA
3008 if (!srp_conn_unique(target->srp_host, target)) {
3009 shost_printk(KERN_INFO, target->scsi_host,
3010 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3011 be64_to_cpu(target->id_ext),
3012 be64_to_cpu(target->ioc_guid),
3013 be64_to_cpu(target->initiator_ext));
3014 ret = -EEXIST;
3015 goto err;
3016 }
3017
5cfb1782 3018 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
d1b4289e 3019 target->cmd_sg_cnt < target->sg_tablesize) {
5cfb1782 3020 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
c07d424d
DD
3021 target->sg_tablesize = target->cmd_sg_cnt;
3022 }
3023
3024 target_host->sg_tablesize = target->sg_tablesize;
3025 target->indirect_size = target->sg_tablesize *
3026 sizeof (struct srp_direct_buf);
49248644
DD
3027 target->max_iu_len = sizeof (struct srp_cmd) +
3028 sizeof (struct srp_indirect_buf) +
3029 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3030
c1120f89 3031 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
ef6c49d8 3032 INIT_WORK(&target->remove_work, srp_remove_work);
8f26c9ff
DD
3033 spin_lock_init(&target->lock);
3034 INIT_LIST_HEAD(&target->free_tx);
b81d00bd
BVA
3035 ret = srp_alloc_req_data(target);
3036 if (ret)
3037 goto err_free_mem;
8f26c9ff 3038
2088ca66
SG
3039 ret = ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
3040 if (ret)
3041 goto err_free_mem;
aef9ec39 3042
aef9ec39
RD
3043 ret = srp_create_target_ib(target);
3044 if (ret)
8f26c9ff 3045 goto err_free_mem;
aef9ec39 3046
9fe4bcf4
DD
3047 ret = srp_new_cm_id(target);
3048 if (ret)
8f26c9ff 3049 goto err_free_ib;
aef9ec39
RD
3050
3051 ret = srp_connect_target(target);
3052 if (ret) {
7aa54bd7
DD
3053 shost_printk(KERN_ERR, target->scsi_host,
3054 PFX "Connection failed\n");
394c595e 3055 goto err_free_ib;
aef9ec39
RD
3056 }
3057
3058 ret = srp_add_target(host, target);
3059 if (ret)
3060 goto err_disconnect;
3061
34aa654e
BVA
3062 if (target->state != SRP_TARGET_REMOVED) {
3063 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3064 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3065 be64_to_cpu(target->id_ext),
3066 be64_to_cpu(target->ioc_guid),
3067 be16_to_cpu(target->path.pkey),
3068 be64_to_cpu(target->service_id),
3069 target->path.sgid.raw, target->orig_dgid);
3070 }
e7ffde01 3071
2d7091bc
BVA
3072 ret = count;
3073
3074out:
3075 mutex_unlock(&host->add_target_mutex);
34aa654e
BVA
3076
3077 scsi_host_put(target->scsi_host);
3078
2d7091bc 3079 return ret;
aef9ec39
RD
3080
3081err_disconnect:
3082 srp_disconnect_target(target);
3083
8f26c9ff 3084err_free_ib:
aef9ec39
RD
3085 srp_free_target_ib(target);
3086
8f26c9ff
DD
3087err_free_mem:
3088 srp_free_req_data(target);
3089
aef9ec39
RD
3090err:
3091 scsi_host_put(target_host);
2d7091bc 3092 goto out;
aef9ec39
RD
3093}
3094
ee959b00 3095static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
aef9ec39 3096
ee959b00
TJ
3097static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3098 char *buf)
aef9ec39 3099{
ee959b00 3100 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39 3101
05321937 3102 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
aef9ec39
RD
3103}
3104
ee959b00 3105static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
aef9ec39 3106
ee959b00
TJ
3107static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3108 char *buf)
aef9ec39 3109{
ee959b00 3110 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39
RD
3111
3112 return sprintf(buf, "%d\n", host->port);
3113}
3114
ee959b00 3115static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
aef9ec39 3116
f5358a17 3117static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
aef9ec39
RD
3118{
3119 struct srp_host *host;
3120
3121 host = kzalloc(sizeof *host, GFP_KERNEL);
3122 if (!host)
3123 return NULL;
3124
3125 INIT_LIST_HEAD(&host->target_list);
b3589fd4 3126 spin_lock_init(&host->target_lock);
aef9ec39 3127 init_completion(&host->released);
2d7091bc 3128 mutex_init(&host->add_target_mutex);
05321937 3129 host->srp_dev = device;
aef9ec39
RD
3130 host->port = port;
3131
ee959b00
TJ
3132 host->dev.class = &srp_class;
3133 host->dev.parent = device->dev->dma_device;
d927e38c 3134 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
aef9ec39 3135
ee959b00 3136 if (device_register(&host->dev))
f5358a17 3137 goto free_host;
ee959b00 3138 if (device_create_file(&host->dev, &dev_attr_add_target))
aef9ec39 3139 goto err_class;
ee959b00 3140 if (device_create_file(&host->dev, &dev_attr_ibdev))
aef9ec39 3141 goto err_class;
ee959b00 3142 if (device_create_file(&host->dev, &dev_attr_port))
aef9ec39
RD
3143 goto err_class;
3144
3145 return host;
3146
3147err_class:
ee959b00 3148 device_unregister(&host->dev);
aef9ec39 3149
f5358a17 3150free_host:
aef9ec39
RD
3151 kfree(host);
3152
3153 return NULL;
3154}
3155
3156static void srp_add_one(struct ib_device *device)
3157{
f5358a17
RD
3158 struct srp_device *srp_dev;
3159 struct ib_device_attr *dev_attr;
aef9ec39 3160 struct srp_host *host;
52ede08f
BVA
3161 int mr_page_shift, s, e, p;
3162 u64 max_pages_per_mr;
aef9ec39 3163
f5358a17
RD
3164 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3165 if (!dev_attr)
cf311cd4 3166 return;
aef9ec39 3167
f5358a17 3168 if (ib_query_device(device, dev_attr)) {
e0bda7d8 3169 pr_warn("Query device failed for %s\n", device->name);
f5358a17
RD
3170 goto free_attr;
3171 }
3172
3173 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3174 if (!srp_dev)
3175 goto free_attr;
3176
d1b4289e
BVA
3177 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3178 device->map_phys_fmr && device->unmap_fmr);
5cfb1782
BVA
3179 srp_dev->has_fr = (dev_attr->device_cap_flags &
3180 IB_DEVICE_MEM_MGT_EXTENSIONS);
3181 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3182 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3183
3184 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3185 (!srp_dev->has_fmr || prefer_fr));
d1b4289e 3186
f5358a17
RD
3187 /*
3188 * Use the smallest page size supported by the HCA, down to a
8f26c9ff
DD
3189 * minimum of 4096 bytes. We're unlikely to build large sglists
3190 * out of smaller entries.
f5358a17 3191 */
52ede08f
BVA
3192 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3193 srp_dev->mr_page_size = 1 << mr_page_shift;
3194 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3195 max_pages_per_mr = dev_attr->max_mr_size;
3196 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3197 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3198 max_pages_per_mr);
5cfb1782
BVA
3199 if (srp_dev->use_fast_reg) {
3200 srp_dev->max_pages_per_mr =
3201 min_t(u32, srp_dev->max_pages_per_mr,
3202 dev_attr->max_fast_reg_page_list_len);
3203 }
52ede08f
BVA
3204 srp_dev->mr_max_size = srp_dev->mr_page_size *
3205 srp_dev->max_pages_per_mr;
5cfb1782 3206 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
52ede08f 3207 device->name, mr_page_shift, dev_attr->max_mr_size,
5cfb1782 3208 dev_attr->max_fast_reg_page_list_len,
52ede08f 3209 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
f5358a17
RD
3210
3211 INIT_LIST_HEAD(&srp_dev->dev_list);
3212
3213 srp_dev->dev = device;
3214 srp_dev->pd = ib_alloc_pd(device);
3215 if (IS_ERR(srp_dev->pd))
3216 goto free_dev;
3217
3218 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3219 IB_ACCESS_LOCAL_WRITE |
3220 IB_ACCESS_REMOTE_READ |
3221 IB_ACCESS_REMOTE_WRITE);
3222 if (IS_ERR(srp_dev->mr))
3223 goto err_pd;
3224
07ebafba 3225 if (device->node_type == RDMA_NODE_IB_SWITCH) {
aef9ec39
RD
3226 s = 0;
3227 e = 0;
3228 } else {
3229 s = 1;
3230 e = device->phys_port_cnt;
3231 }
3232
3233 for (p = s; p <= e; ++p) {
f5358a17 3234 host = srp_add_port(srp_dev, p);
aef9ec39 3235 if (host)
f5358a17 3236 list_add_tail(&host->list, &srp_dev->dev_list);
aef9ec39
RD
3237 }
3238
f5358a17
RD
3239 ib_set_client_data(device, &srp_client, srp_dev);
3240
3241 goto free_attr;
3242
3243err_pd:
3244 ib_dealloc_pd(srp_dev->pd);
3245
3246free_dev:
3247 kfree(srp_dev);
3248
3249free_attr:
3250 kfree(dev_attr);
aef9ec39
RD
3251}
3252
3253static void srp_remove_one(struct ib_device *device)
3254{
f5358a17 3255 struct srp_device *srp_dev;
aef9ec39 3256 struct srp_host *host, *tmp_host;
ef6c49d8 3257 struct srp_target_port *target;
aef9ec39 3258
f5358a17 3259 srp_dev = ib_get_client_data(device, &srp_client);
1fe0cb84
DB
3260 if (!srp_dev)
3261 return;
aef9ec39 3262
f5358a17 3263 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
ee959b00 3264 device_unregister(&host->dev);
aef9ec39
RD
3265 /*
3266 * Wait for the sysfs entry to go away, so that no new
3267 * target ports can be created.
3268 */
3269 wait_for_completion(&host->released);
3270
3271 /*
ef6c49d8 3272 * Remove all target ports.
aef9ec39 3273 */
b3589fd4 3274 spin_lock(&host->target_lock);
ef6c49d8
BVA
3275 list_for_each_entry(target, &host->target_list, list)
3276 srp_queue_remove_work(target);
b3589fd4 3277 spin_unlock(&host->target_lock);
aef9ec39
RD
3278
3279 /*
bcc05910 3280 * Wait for tl_err and target port removal tasks.
aef9ec39 3281 */
ef6c49d8 3282 flush_workqueue(system_long_wq);
bcc05910 3283 flush_workqueue(srp_remove_wq);
aef9ec39 3284
aef9ec39
RD
3285 kfree(host);
3286 }
3287
f5358a17
RD
3288 ib_dereg_mr(srp_dev->mr);
3289 ib_dealloc_pd(srp_dev->pd);
3290
3291 kfree(srp_dev);
aef9ec39
RD
3292}
3293
3236822b 3294static struct srp_function_template ib_srp_transport_functions = {
ed9b2264
BVA
3295 .has_rport_state = true,
3296 .reset_timer_if_blocked = true,
a95cadb9 3297 .reconnect_delay = &srp_reconnect_delay,
ed9b2264
BVA
3298 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3299 .dev_loss_tmo = &srp_dev_loss_tmo,
3300 .reconnect = srp_rport_reconnect,
dc1bdbd9 3301 .rport_delete = srp_rport_delete,
ed9b2264 3302 .terminate_rport_io = srp_terminate_io,
3236822b
FT
3303};
3304
aef9ec39
RD
3305static int __init srp_init_module(void)
3306{
3307 int ret;
3308
dcb4cb85 3309 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
dd5e6e38 3310
49248644 3311 if (srp_sg_tablesize) {
e0bda7d8 3312 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
49248644
DD
3313 if (!cmd_sg_entries)
3314 cmd_sg_entries = srp_sg_tablesize;
3315 }
3316
3317 if (!cmd_sg_entries)
3318 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3319
3320 if (cmd_sg_entries > 255) {
e0bda7d8 3321 pr_warn("Clamping cmd_sg_entries to 255\n");
49248644 3322 cmd_sg_entries = 255;
1e89a194
DD
3323 }
3324
c07d424d
DD
3325 if (!indirect_sg_entries)
3326 indirect_sg_entries = cmd_sg_entries;
3327 else if (indirect_sg_entries < cmd_sg_entries) {
e0bda7d8
BVA
3328 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3329 cmd_sg_entries);
c07d424d
DD
3330 indirect_sg_entries = cmd_sg_entries;
3331 }
3332
bcc05910 3333 srp_remove_wq = create_workqueue("srp_remove");
da05be29
WY
3334 if (!srp_remove_wq) {
3335 ret = -ENOMEM;
bcc05910
BVA
3336 goto out;
3337 }
3338
3339 ret = -ENOMEM;
3236822b
FT
3340 ib_srp_transport_template =
3341 srp_attach_transport(&ib_srp_transport_functions);
3342 if (!ib_srp_transport_template)
bcc05910 3343 goto destroy_wq;
3236822b 3344
aef9ec39
RD
3345 ret = class_register(&srp_class);
3346 if (ret) {
e0bda7d8 3347 pr_err("couldn't register class infiniband_srp\n");
bcc05910 3348 goto release_tr;
aef9ec39
RD
3349 }
3350
c1a0b23b
MT
3351 ib_sa_register_client(&srp_sa_client);
3352
aef9ec39
RD
3353 ret = ib_register_client(&srp_client);
3354 if (ret) {
e0bda7d8 3355 pr_err("couldn't register IB client\n");
bcc05910 3356 goto unreg_sa;
aef9ec39
RD
3357 }
3358
bcc05910
BVA
3359out:
3360 return ret;
3361
3362unreg_sa:
3363 ib_sa_unregister_client(&srp_sa_client);
3364 class_unregister(&srp_class);
3365
3366release_tr:
3367 srp_release_transport(ib_srp_transport_template);
3368
3369destroy_wq:
3370 destroy_workqueue(srp_remove_wq);
3371 goto out;
aef9ec39
RD
3372}
3373
3374static void __exit srp_cleanup_module(void)
3375{
3376 ib_unregister_client(&srp_client);
c1a0b23b 3377 ib_sa_unregister_client(&srp_sa_client);
aef9ec39 3378 class_unregister(&srp_class);
3236822b 3379 srp_release_transport(ib_srp_transport_template);
bcc05910 3380 destroy_workqueue(srp_remove_wq);
aef9ec39
RD
3381}
3382
3383module_init(srp_init_module);
3384module_exit(srp_cleanup_module);