]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/infiniband/ulp/srp/ib_srp.c
IB/srp: Remove !ch->target tests from the reconnect code
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / ulp / srp / ib_srp.c
CommitLineData
aef9ec39
RD
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
aef9ec39
RD
31 */
32
d236cd0e 33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
e0bda7d8 34
aef9ec39
RD
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
de25968c 42#include <linux/jiffies.h>
56b5390c 43#include <rdma/ib_cache.h>
aef9ec39 44
60063497 45#include <linux/atomic.h>
aef9ec39
RD
46
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
71444b97 50#include <scsi/scsi_tcq.h>
aef9ec39 51#include <scsi/srp.h>
3236822b 52#include <scsi/scsi_transport_srp.h>
aef9ec39 53
aef9ec39
RD
54#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
e8ca4135
VP
58#define DRV_VERSION "1.0"
59#define DRV_RELDATE "July 1, 2013"
aef9ec39
RD
60
61MODULE_AUTHOR("Roland Dreier");
33ab3e5b 62MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
aef9ec39 63MODULE_LICENSE("Dual BSD/GPL");
33ab3e5b
BVA
64MODULE_VERSION(DRV_VERSION);
65MODULE_INFO(release_date, DRV_RELDATE);
aef9ec39 66
49248644
DD
67static unsigned int srp_sg_tablesize;
68static unsigned int cmd_sg_entries;
c07d424d
DD
69static unsigned int indirect_sg_entries;
70static bool allow_ext_sg;
5cfb1782 71static bool prefer_fr;
b1b8854d 72static bool register_always;
49248644 73static int topspin_workarounds = 1;
74b0a15b 74
49248644
DD
75module_param(srp_sg_tablesize, uint, 0444);
76MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
74b0a15b 77
49248644
DD
78module_param(cmd_sg_entries, uint, 0444);
79MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
aef9ec39 81
c07d424d
DD
82module_param(indirect_sg_entries, uint, 0444);
83MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85
86module_param(allow_ext_sg, bool, 0444);
87MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89
aef9ec39
RD
90module_param(topspin_workarounds, int, 0444);
91MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93
5cfb1782
BVA
94module_param(prefer_fr, bool, 0444);
95MODULE_PARM_DESC(prefer_fr,
96"Whether to use fast registration if both FMR and fast registration are supported");
97
b1b8854d
BVA
98module_param(register_always, bool, 0444);
99MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
101
ed9b2264
BVA
102static struct kernel_param_ops srp_tmo_ops;
103
a95cadb9
BVA
104static int srp_reconnect_delay = 10;
105module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106 S_IRUGO | S_IWUSR);
107MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108
ed9b2264
BVA
109static int srp_fast_io_fail_tmo = 15;
110module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111 S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
116
a95cadb9 117static int srp_dev_loss_tmo = 600;
ed9b2264
BVA
118module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119 S_IRUGO | S_IWUSR);
120MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
127
d92c0da7
BVA
128static unsigned ch_count;
129module_param(ch_count, uint, 0444);
130MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132
aef9ec39
RD
133static void srp_add_one(struct ib_device *device);
134static void srp_remove_one(struct ib_device *device);
509c07bc
BVA
135static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
136static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
aef9ec39
RD
137static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
138
3236822b 139static struct scsi_transport_template *ib_srp_transport_template;
bcc05910 140static struct workqueue_struct *srp_remove_wq;
3236822b 141
aef9ec39
RD
142static struct ib_client srp_client = {
143 .name = "srp",
144 .add = srp_add_one,
145 .remove = srp_remove_one
146};
147
c1a0b23b
MT
148static struct ib_sa_client srp_sa_client;
149
ed9b2264
BVA
150static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
151{
152 int tmo = *(int *)kp->arg;
153
154 if (tmo >= 0)
155 return sprintf(buffer, "%d", tmo);
156 else
157 return sprintf(buffer, "off");
158}
159
160static int srp_tmo_set(const char *val, const struct kernel_param *kp)
161{
162 int tmo, res;
163
164 if (strncmp(val, "off", 3) != 0) {
165 res = kstrtoint(val, 0, &tmo);
166 if (res)
167 goto out;
168 } else {
169 tmo = -1;
170 }
a95cadb9
BVA
171 if (kp->arg == &srp_reconnect_delay)
172 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
173 srp_dev_loss_tmo);
174 else if (kp->arg == &srp_fast_io_fail_tmo)
175 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
ed9b2264 176 else
a95cadb9
BVA
177 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
178 tmo);
ed9b2264
BVA
179 if (res)
180 goto out;
181 *(int *)kp->arg = tmo;
182
183out:
184 return res;
185}
186
187static struct kernel_param_ops srp_tmo_ops = {
188 .get = srp_tmo_get,
189 .set = srp_tmo_set,
190};
191
aef9ec39
RD
192static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
193{
194 return (struct srp_target_port *) host->hostdata;
195}
196
197static const char *srp_target_info(struct Scsi_Host *host)
198{
199 return host_to_target(host)->target_name;
200}
201
5d7cbfd6
RD
202static int srp_target_is_topspin(struct srp_target_port *target)
203{
204 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
3d1ff48d 205 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
5d7cbfd6
RD
206
207 return topspin_workarounds &&
3d1ff48d
RK
208 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
209 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
5d7cbfd6
RD
210}
211
aef9ec39
RD
212static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
213 gfp_t gfp_mask,
214 enum dma_data_direction direction)
215{
216 struct srp_iu *iu;
217
218 iu = kmalloc(sizeof *iu, gfp_mask);
219 if (!iu)
220 goto out;
221
222 iu->buf = kzalloc(size, gfp_mask);
223 if (!iu->buf)
224 goto out_free_iu;
225
05321937
GKH
226 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
227 direction);
228 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
aef9ec39
RD
229 goto out_free_buf;
230
231 iu->size = size;
232 iu->direction = direction;
233
234 return iu;
235
236out_free_buf:
237 kfree(iu->buf);
238out_free_iu:
239 kfree(iu);
240out:
241 return NULL;
242}
243
244static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
245{
246 if (!iu)
247 return;
248
05321937
GKH
249 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
250 iu->direction);
aef9ec39
RD
251 kfree(iu->buf);
252 kfree(iu);
253}
254
255static void srp_qp_event(struct ib_event *event, void *context)
256{
e0bda7d8 257 pr_debug("QP event %d\n", event->event);
aef9ec39
RD
258}
259
260static int srp_init_qp(struct srp_target_port *target,
261 struct ib_qp *qp)
262{
263 struct ib_qp_attr *attr;
264 int ret;
265
266 attr = kmalloc(sizeof *attr, GFP_KERNEL);
267 if (!attr)
268 return -ENOMEM;
269
56b5390c
BVA
270 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
271 target->srp_host->port,
272 be16_to_cpu(target->pkey),
273 &attr->pkey_index);
aef9ec39
RD
274 if (ret)
275 goto out;
276
277 attr->qp_state = IB_QPS_INIT;
278 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
279 IB_ACCESS_REMOTE_WRITE);
280 attr->port_num = target->srp_host->port;
281
282 ret = ib_modify_qp(qp, attr,
283 IB_QP_STATE |
284 IB_QP_PKEY_INDEX |
285 IB_QP_ACCESS_FLAGS |
286 IB_QP_PORT);
287
288out:
289 kfree(attr);
290 return ret;
291}
292
509c07bc 293static int srp_new_cm_id(struct srp_rdma_ch *ch)
9fe4bcf4 294{
509c07bc 295 struct srp_target_port *target = ch->target;
9fe4bcf4
DD
296 struct ib_cm_id *new_cm_id;
297
05321937 298 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
509c07bc 299 srp_cm_handler, ch);
9fe4bcf4
DD
300 if (IS_ERR(new_cm_id))
301 return PTR_ERR(new_cm_id);
302
509c07bc
BVA
303 if (ch->cm_id)
304 ib_destroy_cm_id(ch->cm_id);
305 ch->cm_id = new_cm_id;
306 ch->path.sgid = target->sgid;
307 ch->path.dgid = target->orig_dgid;
308 ch->path.pkey = target->pkey;
309 ch->path.service_id = target->service_id;
9fe4bcf4
DD
310
311 return 0;
312}
313
d1b4289e
BVA
314static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
315{
316 struct srp_device *dev = target->srp_host->srp_dev;
317 struct ib_fmr_pool_param fmr_param;
318
319 memset(&fmr_param, 0, sizeof(fmr_param));
320 fmr_param.pool_size = target->scsi_host->can_queue;
321 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
322 fmr_param.cache = 1;
52ede08f
BVA
323 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
324 fmr_param.page_shift = ilog2(dev->mr_page_size);
d1b4289e
BVA
325 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
326 IB_ACCESS_REMOTE_WRITE |
327 IB_ACCESS_REMOTE_READ);
328
329 return ib_create_fmr_pool(dev->pd, &fmr_param);
330}
331
5cfb1782
BVA
332/**
333 * srp_destroy_fr_pool() - free the resources owned by a pool
334 * @pool: Fast registration pool to be destroyed.
335 */
336static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
337{
338 int i;
339 struct srp_fr_desc *d;
340
341 if (!pool)
342 return;
343
344 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
345 if (d->frpl)
346 ib_free_fast_reg_page_list(d->frpl);
347 if (d->mr)
348 ib_dereg_mr(d->mr);
349 }
350 kfree(pool);
351}
352
353/**
354 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
355 * @device: IB device to allocate fast registration descriptors for.
356 * @pd: Protection domain associated with the FR descriptors.
357 * @pool_size: Number of descriptors to allocate.
358 * @max_page_list_len: Maximum fast registration work request page list length.
359 */
360static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
361 struct ib_pd *pd, int pool_size,
362 int max_page_list_len)
363{
364 struct srp_fr_pool *pool;
365 struct srp_fr_desc *d;
366 struct ib_mr *mr;
367 struct ib_fast_reg_page_list *frpl;
368 int i, ret = -EINVAL;
369
370 if (pool_size <= 0)
371 goto err;
372 ret = -ENOMEM;
373 pool = kzalloc(sizeof(struct srp_fr_pool) +
374 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
375 if (!pool)
376 goto err;
377 pool->size = pool_size;
378 pool->max_page_list_len = max_page_list_len;
379 spin_lock_init(&pool->lock);
380 INIT_LIST_HEAD(&pool->free_list);
381
382 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
383 mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
384 if (IS_ERR(mr)) {
385 ret = PTR_ERR(mr);
386 goto destroy_pool;
387 }
388 d->mr = mr;
389 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
390 if (IS_ERR(frpl)) {
391 ret = PTR_ERR(frpl);
392 goto destroy_pool;
393 }
394 d->frpl = frpl;
395 list_add_tail(&d->entry, &pool->free_list);
396 }
397
398out:
399 return pool;
400
401destroy_pool:
402 srp_destroy_fr_pool(pool);
403
404err:
405 pool = ERR_PTR(ret);
406 goto out;
407}
408
409/**
410 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
411 * @pool: Pool to obtain descriptor from.
412 */
413static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
414{
415 struct srp_fr_desc *d = NULL;
416 unsigned long flags;
417
418 spin_lock_irqsave(&pool->lock, flags);
419 if (!list_empty(&pool->free_list)) {
420 d = list_first_entry(&pool->free_list, typeof(*d), entry);
421 list_del(&d->entry);
422 }
423 spin_unlock_irqrestore(&pool->lock, flags);
424
425 return d;
426}
427
428/**
429 * srp_fr_pool_put() - put an FR descriptor back in the free list
430 * @pool: Pool the descriptor was allocated from.
431 * @desc: Pointer to an array of fast registration descriptor pointers.
432 * @n: Number of descriptors to put back.
433 *
434 * Note: The caller must already have queued an invalidation request for
435 * desc->mr->rkey before calling this function.
436 */
437static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
438 int n)
439{
440 unsigned long flags;
441 int i;
442
443 spin_lock_irqsave(&pool->lock, flags);
444 for (i = 0; i < n; i++)
445 list_add(&desc[i]->entry, &pool->free_list);
446 spin_unlock_irqrestore(&pool->lock, flags);
447}
448
449static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
450{
451 struct srp_device *dev = target->srp_host->srp_dev;
452
453 return srp_create_fr_pool(dev->dev, dev->pd,
454 target->scsi_host->can_queue,
455 dev->max_pages_per_mr);
456}
457
7dad6b2e
BVA
458/**
459 * srp_destroy_qp() - destroy an RDMA queue pair
460 * @ch: SRP RDMA channel.
461 *
462 * Change a queue pair into the error state and wait until all receive
463 * completions have been processed before destroying it. This avoids that
464 * the receive completion handler can access the queue pair while it is
465 * being destroyed.
466 */
467static void srp_destroy_qp(struct srp_rdma_ch *ch)
468{
7dad6b2e
BVA
469 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
470 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
471 struct ib_recv_wr *bad_wr;
472 int ret;
473
474 /* Destroying a QP and reusing ch->done is only safe if not connected */
c014c8cd 475 WARN_ON_ONCE(ch->connected);
7dad6b2e
BVA
476
477 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
478 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
479 if (ret)
480 goto out;
481
482 init_completion(&ch->done);
483 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
484 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
485 if (ret == 0)
486 wait_for_completion(&ch->done);
487
488out:
489 ib_destroy_qp(ch->qp);
490}
491
509c07bc 492static int srp_create_ch_ib(struct srp_rdma_ch *ch)
aef9ec39 493{
509c07bc 494 struct srp_target_port *target = ch->target;
62154b2e 495 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39 496 struct ib_qp_init_attr *init_attr;
73aa89ed
IR
497 struct ib_cq *recv_cq, *send_cq;
498 struct ib_qp *qp;
d1b4289e 499 struct ib_fmr_pool *fmr_pool = NULL;
5cfb1782
BVA
500 struct srp_fr_pool *fr_pool = NULL;
501 const int m = 1 + dev->use_fast_reg;
aef9ec39
RD
502 int ret;
503
504 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
505 if (!init_attr)
506 return -ENOMEM;
507
7dad6b2e 508 /* + 1 for SRP_LAST_WR_ID */
509c07bc 509 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
7dad6b2e 510 target->queue_size + 1, ch->comp_vector);
73aa89ed
IR
511 if (IS_ERR(recv_cq)) {
512 ret = PTR_ERR(recv_cq);
da9d2f07 513 goto err;
aef9ec39
RD
514 }
515
509c07bc
BVA
516 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
517 m * target->queue_size, ch->comp_vector);
73aa89ed
IR
518 if (IS_ERR(send_cq)) {
519 ret = PTR_ERR(send_cq);
da9d2f07 520 goto err_recv_cq;
9c03dc9f
BVA
521 }
522
73aa89ed 523 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
aef9ec39
RD
524
525 init_attr->event_handler = srp_qp_event;
5cfb1782 526 init_attr->cap.max_send_wr = m * target->queue_size;
7dad6b2e 527 init_attr->cap.max_recv_wr = target->queue_size + 1;
aef9ec39
RD
528 init_attr->cap.max_recv_sge = 1;
529 init_attr->cap.max_send_sge = 1;
5cfb1782 530 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
aef9ec39 531 init_attr->qp_type = IB_QPT_RC;
73aa89ed
IR
532 init_attr->send_cq = send_cq;
533 init_attr->recv_cq = recv_cq;
aef9ec39 534
62154b2e 535 qp = ib_create_qp(dev->pd, init_attr);
73aa89ed
IR
536 if (IS_ERR(qp)) {
537 ret = PTR_ERR(qp);
da9d2f07 538 goto err_send_cq;
aef9ec39
RD
539 }
540
73aa89ed 541 ret = srp_init_qp(target, qp);
da9d2f07
RD
542 if (ret)
543 goto err_qp;
aef9ec39 544
5cfb1782
BVA
545 if (dev->use_fast_reg && dev->has_fr) {
546 fr_pool = srp_alloc_fr_pool(target);
547 if (IS_ERR(fr_pool)) {
548 ret = PTR_ERR(fr_pool);
549 shost_printk(KERN_WARNING, target->scsi_host, PFX
550 "FR pool allocation failed (%d)\n", ret);
551 goto err_qp;
552 }
509c07bc
BVA
553 if (ch->fr_pool)
554 srp_destroy_fr_pool(ch->fr_pool);
555 ch->fr_pool = fr_pool;
5cfb1782 556 } else if (!dev->use_fast_reg && dev->has_fmr) {
d1b4289e
BVA
557 fmr_pool = srp_alloc_fmr_pool(target);
558 if (IS_ERR(fmr_pool)) {
559 ret = PTR_ERR(fmr_pool);
560 shost_printk(KERN_WARNING, target->scsi_host, PFX
561 "FMR pool allocation failed (%d)\n", ret);
562 goto err_qp;
563 }
509c07bc
BVA
564 if (ch->fmr_pool)
565 ib_destroy_fmr_pool(ch->fmr_pool);
566 ch->fmr_pool = fmr_pool;
d1b4289e
BVA
567 }
568
509c07bc 569 if (ch->qp)
7dad6b2e 570 srp_destroy_qp(ch);
509c07bc
BVA
571 if (ch->recv_cq)
572 ib_destroy_cq(ch->recv_cq);
573 if (ch->send_cq)
574 ib_destroy_cq(ch->send_cq);
73aa89ed 575
509c07bc
BVA
576 ch->qp = qp;
577 ch->recv_cq = recv_cq;
578 ch->send_cq = send_cq;
73aa89ed 579
da9d2f07
RD
580 kfree(init_attr);
581 return 0;
582
583err_qp:
73aa89ed 584 ib_destroy_qp(qp);
da9d2f07
RD
585
586err_send_cq:
73aa89ed 587 ib_destroy_cq(send_cq);
da9d2f07
RD
588
589err_recv_cq:
73aa89ed 590 ib_destroy_cq(recv_cq);
da9d2f07
RD
591
592err:
aef9ec39
RD
593 kfree(init_attr);
594 return ret;
595}
596
4d73f95f
BVA
597/*
598 * Note: this function may be called without srp_alloc_iu_bufs() having been
509c07bc 599 * invoked. Hence the ch->[rt]x_ring checks.
4d73f95f 600 */
509c07bc
BVA
601static void srp_free_ch_ib(struct srp_target_port *target,
602 struct srp_rdma_ch *ch)
aef9ec39 603{
5cfb1782 604 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39
RD
605 int i;
606
d92c0da7
BVA
607 if (!ch->target)
608 return;
609
509c07bc
BVA
610 if (ch->cm_id) {
611 ib_destroy_cm_id(ch->cm_id);
612 ch->cm_id = NULL;
394c595e
BVA
613 }
614
d92c0da7
BVA
615 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
616 if (!ch->qp)
617 return;
618
5cfb1782 619 if (dev->use_fast_reg) {
509c07bc
BVA
620 if (ch->fr_pool)
621 srp_destroy_fr_pool(ch->fr_pool);
5cfb1782 622 } else {
509c07bc
BVA
623 if (ch->fmr_pool)
624 ib_destroy_fmr_pool(ch->fmr_pool);
5cfb1782 625 }
7dad6b2e 626 srp_destroy_qp(ch);
509c07bc
BVA
627 ib_destroy_cq(ch->send_cq);
628 ib_destroy_cq(ch->recv_cq);
aef9ec39 629
d92c0da7
BVA
630 /*
631 * Avoid that the SCSI error handler tries to use this channel after
632 * it has been freed. The SCSI error handler can namely continue
633 * trying to perform recovery actions after scsi_remove_host()
634 * returned.
635 */
636 ch->target = NULL;
637
509c07bc
BVA
638 ch->qp = NULL;
639 ch->send_cq = ch->recv_cq = NULL;
73aa89ed 640
509c07bc 641 if (ch->rx_ring) {
4d73f95f 642 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
643 srp_free_iu(target->srp_host, ch->rx_ring[i]);
644 kfree(ch->rx_ring);
645 ch->rx_ring = NULL;
4d73f95f 646 }
509c07bc 647 if (ch->tx_ring) {
4d73f95f 648 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
649 srp_free_iu(target->srp_host, ch->tx_ring[i]);
650 kfree(ch->tx_ring);
651 ch->tx_ring = NULL;
4d73f95f 652 }
aef9ec39
RD
653}
654
655static void srp_path_rec_completion(int status,
656 struct ib_sa_path_rec *pathrec,
509c07bc 657 void *ch_ptr)
aef9ec39 658{
509c07bc
BVA
659 struct srp_rdma_ch *ch = ch_ptr;
660 struct srp_target_port *target = ch->target;
aef9ec39 661
509c07bc 662 ch->status = status;
aef9ec39 663 if (status)
7aa54bd7
DD
664 shost_printk(KERN_ERR, target->scsi_host,
665 PFX "Got failed path rec status %d\n", status);
aef9ec39 666 else
509c07bc
BVA
667 ch->path = *pathrec;
668 complete(&ch->done);
aef9ec39
RD
669}
670
509c07bc 671static int srp_lookup_path(struct srp_rdma_ch *ch)
aef9ec39 672{
509c07bc 673 struct srp_target_port *target = ch->target;
a702adce
BVA
674 int ret;
675
509c07bc
BVA
676 ch->path.numb_path = 1;
677
678 init_completion(&ch->done);
679
680 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
681 target->srp_host->srp_dev->dev,
682 target->srp_host->port,
683 &ch->path,
684 IB_SA_PATH_REC_SERVICE_ID |
685 IB_SA_PATH_REC_DGID |
686 IB_SA_PATH_REC_SGID |
687 IB_SA_PATH_REC_NUMB_PATH |
688 IB_SA_PATH_REC_PKEY,
689 SRP_PATH_REC_TIMEOUT_MS,
690 GFP_KERNEL,
691 srp_path_rec_completion,
692 ch, &ch->path_query);
693 if (ch->path_query_id < 0)
694 return ch->path_query_id;
695
696 ret = wait_for_completion_interruptible(&ch->done);
a702adce
BVA
697 if (ret < 0)
698 return ret;
aef9ec39 699
509c07bc 700 if (ch->status < 0)
7aa54bd7
DD
701 shost_printk(KERN_WARNING, target->scsi_host,
702 PFX "Path record query failed\n");
aef9ec39 703
509c07bc 704 return ch->status;
aef9ec39
RD
705}
706
d92c0da7 707static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
aef9ec39 708{
509c07bc 709 struct srp_target_port *target = ch->target;
aef9ec39
RD
710 struct {
711 struct ib_cm_req_param param;
712 struct srp_login_req priv;
713 } *req = NULL;
714 int status;
715
716 req = kzalloc(sizeof *req, GFP_KERNEL);
717 if (!req)
718 return -ENOMEM;
719
509c07bc 720 req->param.primary_path = &ch->path;
aef9ec39
RD
721 req->param.alternate_path = NULL;
722 req->param.service_id = target->service_id;
509c07bc
BVA
723 req->param.qp_num = ch->qp->qp_num;
724 req->param.qp_type = ch->qp->qp_type;
aef9ec39
RD
725 req->param.private_data = &req->priv;
726 req->param.private_data_len = sizeof req->priv;
727 req->param.flow_control = 1;
728
729 get_random_bytes(&req->param.starting_psn, 4);
730 req->param.starting_psn &= 0xffffff;
731
732 /*
733 * Pick some arbitrary defaults here; we could make these
734 * module parameters if anyone cared about setting them.
735 */
736 req->param.responder_resources = 4;
737 req->param.remote_cm_response_timeout = 20;
738 req->param.local_cm_response_timeout = 20;
7bb312e4 739 req->param.retry_count = target->tl_retry_count;
aef9ec39
RD
740 req->param.rnr_retry_count = 7;
741 req->param.max_cm_retries = 15;
742
743 req->priv.opcode = SRP_LOGIN_REQ;
744 req->priv.tag = 0;
49248644 745 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
aef9ec39
RD
746 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
747 SRP_BUF_FORMAT_INDIRECT);
d92c0da7
BVA
748 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
749 SRP_MULTICHAN_SINGLE);
0c0450db 750 /*
3cd96564 751 * In the published SRP specification (draft rev. 16a), the
0c0450db
R
752 * port identifier format is 8 bytes of ID extension followed
753 * by 8 bytes of GUID. Older drafts put the two halves in the
754 * opposite order, so that the GUID comes first.
755 *
756 * Targets conforming to these obsolete drafts can be
757 * recognized by the I/O Class they report.
758 */
759 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
760 memcpy(req->priv.initiator_port_id,
747fe000 761 &target->sgid.global.interface_id, 8);
0c0450db 762 memcpy(req->priv.initiator_port_id + 8,
01cb9bcb 763 &target->initiator_ext, 8);
0c0450db
R
764 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
765 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
766 } else {
767 memcpy(req->priv.initiator_port_id,
01cb9bcb
IR
768 &target->initiator_ext, 8);
769 memcpy(req->priv.initiator_port_id + 8,
747fe000 770 &target->sgid.global.interface_id, 8);
0c0450db
R
771 memcpy(req->priv.target_port_id, &target->id_ext, 8);
772 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
773 }
774
aef9ec39
RD
775 /*
776 * Topspin/Cisco SRP targets will reject our login unless we
01cb9bcb
IR
777 * zero out the first 8 bytes of our initiator port ID and set
778 * the second 8 bytes to the local node GUID.
aef9ec39 779 */
5d7cbfd6 780 if (srp_target_is_topspin(target)) {
7aa54bd7
DD
781 shost_printk(KERN_DEBUG, target->scsi_host,
782 PFX "Topspin/Cisco initiator port ID workaround "
783 "activated for target GUID %016llx\n",
45c37cad 784 be64_to_cpu(target->ioc_guid));
aef9ec39 785 memset(req->priv.initiator_port_id, 0, 8);
01cb9bcb 786 memcpy(req->priv.initiator_port_id + 8,
05321937 787 &target->srp_host->srp_dev->dev->node_guid, 8);
aef9ec39 788 }
aef9ec39 789
509c07bc 790 status = ib_send_cm_req(ch->cm_id, &req->param);
aef9ec39
RD
791
792 kfree(req);
793
794 return status;
795}
796
ef6c49d8
BVA
797static bool srp_queue_remove_work(struct srp_target_port *target)
798{
799 bool changed = false;
800
801 spin_lock_irq(&target->lock);
802 if (target->state != SRP_TARGET_REMOVED) {
803 target->state = SRP_TARGET_REMOVED;
804 changed = true;
805 }
806 spin_unlock_irq(&target->lock);
807
808 if (changed)
bcc05910 809 queue_work(srp_remove_wq, &target->remove_work);
ef6c49d8
BVA
810
811 return changed;
812}
813
aef9ec39
RD
814static void srp_disconnect_target(struct srp_target_port *target)
815{
d92c0da7
BVA
816 struct srp_rdma_ch *ch;
817 int i;
509c07bc 818
c014c8cd 819 /* XXX should send SRP_I_LOGOUT request */
aef9ec39 820
c014c8cd
BVA
821 for (i = 0; i < target->ch_count; i++) {
822 ch = &target->ch[i];
823 ch->connected = false;
824 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
825 shost_printk(KERN_DEBUG, target->scsi_host,
826 PFX "Sending CM DREQ failed\n");
294c875a 827 }
e6581056 828 }
aef9ec39
RD
829}
830
509c07bc
BVA
831static void srp_free_req_data(struct srp_target_port *target,
832 struct srp_rdma_ch *ch)
8f26c9ff 833{
5cfb1782
BVA
834 struct srp_device *dev = target->srp_host->srp_dev;
835 struct ib_device *ibdev = dev->dev;
8f26c9ff
DD
836 struct srp_request *req;
837 int i;
838
47513cf4 839 if (!ch->req_ring)
4d73f95f
BVA
840 return;
841
842 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 843 req = &ch->req_ring[i];
5cfb1782
BVA
844 if (dev->use_fast_reg)
845 kfree(req->fr_list);
846 else
847 kfree(req->fmr_list);
8f26c9ff 848 kfree(req->map_page);
c07d424d
DD
849 if (req->indirect_dma_addr) {
850 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
851 target->indirect_size,
852 DMA_TO_DEVICE);
853 }
854 kfree(req->indirect_desc);
8f26c9ff 855 }
4d73f95f 856
509c07bc
BVA
857 kfree(ch->req_ring);
858 ch->req_ring = NULL;
8f26c9ff
DD
859}
860
509c07bc 861static int srp_alloc_req_data(struct srp_rdma_ch *ch)
b81d00bd 862{
509c07bc 863 struct srp_target_port *target = ch->target;
b81d00bd
BVA
864 struct srp_device *srp_dev = target->srp_host->srp_dev;
865 struct ib_device *ibdev = srp_dev->dev;
866 struct srp_request *req;
5cfb1782 867 void *mr_list;
b81d00bd
BVA
868 dma_addr_t dma_addr;
869 int i, ret = -ENOMEM;
870
509c07bc
BVA
871 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
872 GFP_KERNEL);
873 if (!ch->req_ring)
4d73f95f
BVA
874 goto out;
875
876 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 877 req = &ch->req_ring[i];
5cfb1782
BVA
878 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
879 GFP_KERNEL);
880 if (!mr_list)
881 goto out;
882 if (srp_dev->use_fast_reg)
883 req->fr_list = mr_list;
884 else
885 req->fmr_list = mr_list;
52ede08f 886 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
d1b4289e 887 sizeof(void *), GFP_KERNEL);
5cfb1782
BVA
888 if (!req->map_page)
889 goto out;
b81d00bd 890 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
5cfb1782 891 if (!req->indirect_desc)
b81d00bd
BVA
892 goto out;
893
894 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
895 target->indirect_size,
896 DMA_TO_DEVICE);
897 if (ib_dma_mapping_error(ibdev, dma_addr))
898 goto out;
899
900 req->indirect_dma_addr = dma_addr;
b81d00bd
BVA
901 }
902 ret = 0;
903
904out:
905 return ret;
906}
907
683b159a
BVA
908/**
909 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
910 * @shost: SCSI host whose attributes to remove from sysfs.
911 *
912 * Note: Any attributes defined in the host template and that did not exist
913 * before invocation of this function will be ignored.
914 */
915static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
916{
917 struct device_attribute **attr;
918
919 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
920 device_remove_file(&shost->shost_dev, *attr);
921}
922
ee12d6a8
BVA
923static void srp_remove_target(struct srp_target_port *target)
924{
d92c0da7
BVA
925 struct srp_rdma_ch *ch;
926 int i;
509c07bc 927
ef6c49d8
BVA
928 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
929
ee12d6a8 930 srp_del_scsi_host_attr(target->scsi_host);
9dd69a60 931 srp_rport_get(target->rport);
ee12d6a8
BVA
932 srp_remove_host(target->scsi_host);
933 scsi_remove_host(target->scsi_host);
93079162 934 srp_stop_rport_timers(target->rport);
ef6c49d8 935 srp_disconnect_target(target);
d92c0da7
BVA
936 for (i = 0; i < target->ch_count; i++) {
937 ch = &target->ch[i];
938 srp_free_ch_ib(target, ch);
939 }
c1120f89 940 cancel_work_sync(&target->tl_err_work);
9dd69a60 941 srp_rport_put(target->rport);
d92c0da7
BVA
942 for (i = 0; i < target->ch_count; i++) {
943 ch = &target->ch[i];
944 srp_free_req_data(target, ch);
945 }
946 kfree(target->ch);
947 target->ch = NULL;
65d7dd2f
VP
948
949 spin_lock(&target->srp_host->target_lock);
950 list_del(&target->list);
951 spin_unlock(&target->srp_host->target_lock);
952
ee12d6a8
BVA
953 scsi_host_put(target->scsi_host);
954}
955
c4028958 956static void srp_remove_work(struct work_struct *work)
aef9ec39 957{
c4028958 958 struct srp_target_port *target =
ef6c49d8 959 container_of(work, struct srp_target_port, remove_work);
aef9ec39 960
ef6c49d8 961 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
aef9ec39 962
96fc248a 963 srp_remove_target(target);
aef9ec39
RD
964}
965
dc1bdbd9
BVA
966static void srp_rport_delete(struct srp_rport *rport)
967{
968 struct srp_target_port *target = rport->lld_data;
969
970 srp_queue_remove_work(target);
971}
972
c014c8cd
BVA
973/**
974 * srp_connected_ch() - number of connected channels
975 * @target: SRP target port.
976 */
977static int srp_connected_ch(struct srp_target_port *target)
978{
979 int i, c = 0;
980
981 for (i = 0; i < target->ch_count; i++)
982 c += target->ch[i].connected;
983
984 return c;
985}
986
d92c0da7 987static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
aef9ec39 988{
509c07bc 989 struct srp_target_port *target = ch->target;
aef9ec39
RD
990 int ret;
991
c014c8cd 992 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
294c875a 993
509c07bc 994 ret = srp_lookup_path(ch);
aef9ec39
RD
995 if (ret)
996 return ret;
997
998 while (1) {
509c07bc 999 init_completion(&ch->done);
d92c0da7 1000 ret = srp_send_req(ch, multich);
aef9ec39
RD
1001 if (ret)
1002 return ret;
509c07bc 1003 ret = wait_for_completion_interruptible(&ch->done);
a702adce
BVA
1004 if (ret < 0)
1005 return ret;
aef9ec39
RD
1006
1007 /*
1008 * The CM event handling code will set status to
1009 * SRP_PORT_REDIRECT if we get a port redirect REJ
1010 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1011 * redirect REJ back.
1012 */
509c07bc 1013 switch (ch->status) {
aef9ec39 1014 case 0:
c014c8cd 1015 ch->connected = true;
aef9ec39
RD
1016 return 0;
1017
1018 case SRP_PORT_REDIRECT:
509c07bc 1019 ret = srp_lookup_path(ch);
aef9ec39
RD
1020 if (ret)
1021 return ret;
1022 break;
1023
1024 case SRP_DLID_REDIRECT:
1025 break;
1026
9fe4bcf4 1027 case SRP_STALE_CONN:
9fe4bcf4 1028 shost_printk(KERN_ERR, target->scsi_host, PFX
205619f2 1029 "giving up on stale connection\n");
509c07bc
BVA
1030 ch->status = -ECONNRESET;
1031 return ch->status;
9fe4bcf4 1032
aef9ec39 1033 default:
509c07bc 1034 return ch->status;
aef9ec39
RD
1035 }
1036 }
1037}
1038
509c07bc 1039static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
5cfb1782
BVA
1040{
1041 struct ib_send_wr *bad_wr;
1042 struct ib_send_wr wr = {
1043 .opcode = IB_WR_LOCAL_INV,
1044 .wr_id = LOCAL_INV_WR_ID_MASK,
1045 .next = NULL,
1046 .num_sge = 0,
1047 .send_flags = 0,
1048 .ex.invalidate_rkey = rkey,
1049 };
1050
509c07bc 1051 return ib_post_send(ch->qp, &wr, &bad_wr);
5cfb1782
BVA
1052}
1053
d945e1df 1054static void srp_unmap_data(struct scsi_cmnd *scmnd,
509c07bc 1055 struct srp_rdma_ch *ch,
d945e1df
RD
1056 struct srp_request *req)
1057{
509c07bc 1058 struct srp_target_port *target = ch->target;
5cfb1782
BVA
1059 struct srp_device *dev = target->srp_host->srp_dev;
1060 struct ib_device *ibdev = dev->dev;
1061 int i, res;
8f26c9ff 1062
bb350d1d 1063 if (!scsi_sglist(scmnd) ||
d945e1df
RD
1064 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1065 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1066 return;
1067
5cfb1782
BVA
1068 if (dev->use_fast_reg) {
1069 struct srp_fr_desc **pfr;
1070
1071 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
509c07bc 1072 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
5cfb1782
BVA
1073 if (res < 0) {
1074 shost_printk(KERN_ERR, target->scsi_host, PFX
1075 "Queueing INV WR for rkey %#x failed (%d)\n",
1076 (*pfr)->mr->rkey, res);
1077 queue_work(system_long_wq,
1078 &target->tl_err_work);
1079 }
1080 }
1081 if (req->nmdesc)
509c07bc 1082 srp_fr_pool_put(ch->fr_pool, req->fr_list,
5cfb1782
BVA
1083 req->nmdesc);
1084 } else {
1085 struct ib_pool_fmr **pfmr;
1086
1087 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1088 ib_fmr_pool_unmap(*pfmr);
1089 }
f5358a17 1090
8f26c9ff
DD
1091 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1092 scmnd->sc_data_direction);
d945e1df
RD
1093}
1094
22032991
BVA
1095/**
1096 * srp_claim_req - Take ownership of the scmnd associated with a request.
509c07bc 1097 * @ch: SRP RDMA channel.
22032991 1098 * @req: SRP request.
b3fe628d 1099 * @sdev: If not NULL, only take ownership for this SCSI device.
22032991
BVA
1100 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1101 * ownership of @req->scmnd if it equals @scmnd.
1102 *
1103 * Return value:
1104 * Either NULL or a pointer to the SCSI command the caller became owner of.
1105 */
509c07bc 1106static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
22032991 1107 struct srp_request *req,
b3fe628d 1108 struct scsi_device *sdev,
22032991
BVA
1109 struct scsi_cmnd *scmnd)
1110{
1111 unsigned long flags;
1112
509c07bc 1113 spin_lock_irqsave(&ch->lock, flags);
b3fe628d
BVA
1114 if (req->scmnd &&
1115 (!sdev || req->scmnd->device == sdev) &&
1116 (!scmnd || req->scmnd == scmnd)) {
22032991
BVA
1117 scmnd = req->scmnd;
1118 req->scmnd = NULL;
22032991
BVA
1119 } else {
1120 scmnd = NULL;
1121 }
509c07bc 1122 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1123
1124 return scmnd;
1125}
1126
1127/**
1128 * srp_free_req() - Unmap data and add request to the free request list.
509c07bc 1129 * @ch: SRP RDMA channel.
af24663b
BVA
1130 * @req: Request to be freed.
1131 * @scmnd: SCSI command associated with @req.
1132 * @req_lim_delta: Amount to be added to @target->req_lim.
22032991 1133 */
509c07bc
BVA
1134static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1135 struct scsi_cmnd *scmnd, s32 req_lim_delta)
526b4caa 1136{
94a9174c
BVA
1137 unsigned long flags;
1138
509c07bc 1139 srp_unmap_data(scmnd, ch, req);
22032991 1140
509c07bc
BVA
1141 spin_lock_irqsave(&ch->lock, flags);
1142 ch->req_lim += req_lim_delta;
509c07bc 1143 spin_unlock_irqrestore(&ch->lock, flags);
526b4caa
IR
1144}
1145
509c07bc
BVA
1146static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1147 struct scsi_device *sdev, int result)
526b4caa 1148{
509c07bc 1149 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
22032991
BVA
1150
1151 if (scmnd) {
509c07bc 1152 srp_free_req(ch, req, scmnd, 0);
ed9b2264 1153 scmnd->result = result;
22032991 1154 scmnd->scsi_done(scmnd);
22032991 1155 }
526b4caa
IR
1156}
1157
ed9b2264 1158static void srp_terminate_io(struct srp_rport *rport)
aef9ec39 1159{
ed9b2264 1160 struct srp_target_port *target = rport->lld_data;
d92c0da7 1161 struct srp_rdma_ch *ch;
b3fe628d
BVA
1162 struct Scsi_Host *shost = target->scsi_host;
1163 struct scsi_device *sdev;
d92c0da7 1164 int i, j;
ed9b2264 1165
b3fe628d
BVA
1166 /*
1167 * Invoking srp_terminate_io() while srp_queuecommand() is running
1168 * is not safe. Hence the warning statement below.
1169 */
1170 shost_for_each_device(sdev, shost)
1171 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1172
d92c0da7
BVA
1173 for (i = 0; i < target->ch_count; i++) {
1174 ch = &target->ch[i];
509c07bc 1175
d92c0da7
BVA
1176 for (j = 0; j < target->req_ring_size; ++j) {
1177 struct srp_request *req = &ch->req_ring[j];
1178
1179 srp_finish_req(ch, req, NULL,
1180 DID_TRANSPORT_FAILFAST << 16);
1181 }
ed9b2264
BVA
1182 }
1183}
aef9ec39 1184
ed9b2264
BVA
1185/*
1186 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1187 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1188 * srp_reset_device() or srp_reset_host() calls will occur while this function
1189 * is in progress. One way to realize that is not to call this function
1190 * directly but to call srp_reconnect_rport() instead since that last function
1191 * serializes calls of this function via rport->mutex and also blocks
1192 * srp_queuecommand() calls before invoking this function.
1193 */
1194static int srp_rport_reconnect(struct srp_rport *rport)
1195{
1196 struct srp_target_port *target = rport->lld_data;
d92c0da7
BVA
1197 struct srp_rdma_ch *ch;
1198 int i, j, ret = 0;
1199 bool multich = false;
09be70a2 1200
aef9ec39 1201 srp_disconnect_target(target);
34aa654e
BVA
1202
1203 if (target->state == SRP_TARGET_SCANNING)
1204 return -ENODEV;
1205
aef9ec39 1206 /*
c7c4e7ff
BVA
1207 * Now get a new local CM ID so that we avoid confusing the target in
1208 * case things are really fouled up. Doing so also ensures that all CM
1209 * callbacks will have finished before a new QP is allocated.
aef9ec39 1210 */
d92c0da7
BVA
1211 for (i = 0; i < target->ch_count; i++) {
1212 ch = &target->ch[i];
d92c0da7 1213 ret += srp_new_cm_id(ch);
536ae14e 1214 }
d92c0da7
BVA
1215 for (i = 0; i < target->ch_count; i++) {
1216 ch = &target->ch[i];
d92c0da7
BVA
1217 for (j = 0; j < target->req_ring_size; ++j) {
1218 struct srp_request *req = &ch->req_ring[j];
aef9ec39 1219
d92c0da7
BVA
1220 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1221 }
1222 }
1223 for (i = 0; i < target->ch_count; i++) {
1224 ch = &target->ch[i];
d92c0da7
BVA
1225 /*
1226 * Whether or not creating a new CM ID succeeded, create a new
1227 * QP. This guarantees that all completion callback function
1228 * invocations have finished before request resetting starts.
1229 */
1230 ret += srp_create_ch_ib(ch);
aef9ec39 1231
d92c0da7
BVA
1232 INIT_LIST_HEAD(&ch->free_tx);
1233 for (j = 0; j < target->queue_size; ++j)
1234 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1235 }
8de9fe3a
BVA
1236
1237 target->qp_in_error = false;
1238
d92c0da7
BVA
1239 for (i = 0; i < target->ch_count; i++) {
1240 ch = &target->ch[i];
bbac5ccf 1241 if (ret)
d92c0da7 1242 break;
d92c0da7
BVA
1243 ret = srp_connect_ch(ch, multich);
1244 multich = true;
1245 }
09be70a2 1246
ed9b2264
BVA
1247 if (ret == 0)
1248 shost_printk(KERN_INFO, target->scsi_host,
1249 PFX "reconnect succeeded\n");
aef9ec39
RD
1250
1251 return ret;
1252}
1253
8f26c9ff
DD
1254static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1255 unsigned int dma_len, u32 rkey)
f5358a17 1256{
8f26c9ff 1257 struct srp_direct_buf *desc = state->desc;
f5358a17 1258
8f26c9ff
DD
1259 desc->va = cpu_to_be64(dma_addr);
1260 desc->key = cpu_to_be32(rkey);
1261 desc->len = cpu_to_be32(dma_len);
f5358a17 1262
8f26c9ff
DD
1263 state->total_len += dma_len;
1264 state->desc++;
1265 state->ndesc++;
1266}
559ce8f1 1267
8f26c9ff 1268static int srp_map_finish_fmr(struct srp_map_state *state,
509c07bc 1269 struct srp_rdma_ch *ch)
8f26c9ff 1270{
8f26c9ff
DD
1271 struct ib_pool_fmr *fmr;
1272 u64 io_addr = 0;
85507bcc 1273
509c07bc 1274 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
8f26c9ff
DD
1275 state->npages, io_addr);
1276 if (IS_ERR(fmr))
1277 return PTR_ERR(fmr);
f5358a17 1278
8f26c9ff 1279 *state->next_fmr++ = fmr;
52ede08f 1280 state->nmdesc++;
f5358a17 1281
52ede08f 1282 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
539dde6f 1283
8f26c9ff
DD
1284 return 0;
1285}
1286
5cfb1782 1287static int srp_map_finish_fr(struct srp_map_state *state,
509c07bc 1288 struct srp_rdma_ch *ch)
5cfb1782 1289{
509c07bc 1290 struct srp_target_port *target = ch->target;
5cfb1782
BVA
1291 struct srp_device *dev = target->srp_host->srp_dev;
1292 struct ib_send_wr *bad_wr;
1293 struct ib_send_wr wr;
1294 struct srp_fr_desc *desc;
1295 u32 rkey;
1296
509c07bc 1297 desc = srp_fr_pool_get(ch->fr_pool);
5cfb1782
BVA
1298 if (!desc)
1299 return -ENOMEM;
1300
1301 rkey = ib_inc_rkey(desc->mr->rkey);
1302 ib_update_fast_reg_key(desc->mr, rkey);
1303
1304 memcpy(desc->frpl->page_list, state->pages,
1305 sizeof(state->pages[0]) * state->npages);
1306
1307 memset(&wr, 0, sizeof(wr));
1308 wr.opcode = IB_WR_FAST_REG_MR;
1309 wr.wr_id = FAST_REG_WR_ID_MASK;
1310 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1311 wr.wr.fast_reg.page_list = desc->frpl;
1312 wr.wr.fast_reg.page_list_len = state->npages;
1313 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1314 wr.wr.fast_reg.length = state->dma_len;
1315 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1316 IB_ACCESS_REMOTE_READ |
1317 IB_ACCESS_REMOTE_WRITE);
1318 wr.wr.fast_reg.rkey = desc->mr->lkey;
1319
1320 *state->next_fr++ = desc;
1321 state->nmdesc++;
1322
1323 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1324 desc->mr->rkey);
1325
509c07bc 1326 return ib_post_send(ch->qp, &wr, &bad_wr);
5cfb1782
BVA
1327}
1328
539dde6f 1329static int srp_finish_mapping(struct srp_map_state *state,
509c07bc 1330 struct srp_rdma_ch *ch)
539dde6f 1331{
509c07bc 1332 struct srp_target_port *target = ch->target;
539dde6f
BVA
1333 int ret = 0;
1334
1335 if (state->npages == 0)
1336 return 0;
1337
b1b8854d 1338 if (state->npages == 1 && !register_always)
52ede08f 1339 srp_map_desc(state, state->base_dma_addr, state->dma_len,
539dde6f
BVA
1340 target->rkey);
1341 else
5cfb1782 1342 ret = target->srp_host->srp_dev->use_fast_reg ?
509c07bc
BVA
1343 srp_map_finish_fr(state, ch) :
1344 srp_map_finish_fmr(state, ch);
539dde6f
BVA
1345
1346 if (ret == 0) {
1347 state->npages = 0;
52ede08f 1348 state->dma_len = 0;
539dde6f
BVA
1349 }
1350
1351 return ret;
1352}
1353
8f26c9ff
DD
1354static void srp_map_update_start(struct srp_map_state *state,
1355 struct scatterlist *sg, int sg_index,
1356 dma_addr_t dma_addr)
1357{
1358 state->unmapped_sg = sg;
1359 state->unmapped_index = sg_index;
1360 state->unmapped_addr = dma_addr;
1361}
85507bcc 1362
8f26c9ff 1363static int srp_map_sg_entry(struct srp_map_state *state,
509c07bc 1364 struct srp_rdma_ch *ch,
8f26c9ff 1365 struct scatterlist *sg, int sg_index,
5cfb1782 1366 bool use_mr)
8f26c9ff 1367{
509c07bc 1368 struct srp_target_port *target = ch->target;
8f26c9ff
DD
1369 struct srp_device *dev = target->srp_host->srp_dev;
1370 struct ib_device *ibdev = dev->dev;
1371 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1372 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1373 unsigned int len;
1374 int ret;
1375
1376 if (!dma_len)
1377 return 0;
1378
5cfb1782
BVA
1379 if (!use_mr) {
1380 /*
1381 * Once we're in direct map mode for a request, we don't
1382 * go back to FMR or FR mode, so no need to update anything
8f26c9ff
DD
1383 * other than the descriptor.
1384 */
1385 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1386 return 0;
85507bcc 1387 }
f5358a17 1388
5cfb1782
BVA
1389 /*
1390 * Since not all RDMA HW drivers support non-zero page offsets for
1391 * FMR, if we start at an offset into a page, don't merge into the
1392 * current FMR mapping. Finish it out, and use the kernel's MR for
1393 * this sg entry.
8f26c9ff 1394 */
5cfb1782
BVA
1395 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
1396 dma_len > dev->mr_max_size) {
509c07bc 1397 ret = srp_finish_mapping(state, ch);
8f26c9ff
DD
1398 if (ret)
1399 return ret;
1400
1401 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1402 srp_map_update_start(state, NULL, 0, 0);
1403 return 0;
f5358a17
RD
1404 }
1405
5cfb1782
BVA
1406 /*
1407 * If this is the first sg that will be mapped via FMR or via FR, save
1408 * our position. We need to know the first unmapped entry, its index,
1409 * and the first unmapped address within that entry to be able to
1410 * restart mapping after an error.
8f26c9ff
DD
1411 */
1412 if (!state->unmapped_sg)
1413 srp_map_update_start(state, sg, sg_index, dma_addr);
f5358a17 1414
8f26c9ff 1415 while (dma_len) {
5cfb1782
BVA
1416 unsigned offset = dma_addr & ~dev->mr_page_mask;
1417 if (state->npages == dev->max_pages_per_mr || offset != 0) {
509c07bc 1418 ret = srp_finish_mapping(state, ch);
8f26c9ff
DD
1419 if (ret)
1420 return ret;
f5358a17 1421
8f26c9ff
DD
1422 srp_map_update_start(state, sg, sg_index, dma_addr);
1423 }
1424
5cfb1782 1425 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
f5358a17 1426
8f26c9ff
DD
1427 if (!state->npages)
1428 state->base_dma_addr = dma_addr;
5cfb1782 1429 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
52ede08f 1430 state->dma_len += len;
8f26c9ff
DD
1431 dma_addr += len;
1432 dma_len -= len;
1433 }
1434
5cfb1782
BVA
1435 /*
1436 * If the last entry of the MR wasn't a full page, then we need to
8f26c9ff
DD
1437 * close it out and start a new one -- we can only merge at page
1438 * boundries.
1439 */
1440 ret = 0;
52ede08f 1441 if (len != dev->mr_page_size) {
509c07bc 1442 ret = srp_finish_mapping(state, ch);
8f26c9ff
DD
1443 if (!ret)
1444 srp_map_update_start(state, NULL, 0, 0);
1445 }
f5358a17
RD
1446 return ret;
1447}
1448
509c07bc
BVA
1449static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1450 struct srp_request *req, struct scatterlist *scat,
1451 int count)
76bc1e1d 1452{
509c07bc 1453 struct srp_target_port *target = ch->target;
76bc1e1d
BVA
1454 struct srp_device *dev = target->srp_host->srp_dev;
1455 struct ib_device *ibdev = dev->dev;
1456 struct scatterlist *sg;
5cfb1782
BVA
1457 int i;
1458 bool use_mr;
76bc1e1d
BVA
1459
1460 state->desc = req->indirect_desc;
1461 state->pages = req->map_page;
5cfb1782
BVA
1462 if (dev->use_fast_reg) {
1463 state->next_fr = req->fr_list;
509c07bc 1464 use_mr = !!ch->fr_pool;
5cfb1782
BVA
1465 } else {
1466 state->next_fmr = req->fmr_list;
509c07bc 1467 use_mr = !!ch->fmr_pool;
5cfb1782 1468 }
76bc1e1d
BVA
1469
1470 for_each_sg(scat, sg, count, i) {
509c07bc 1471 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
5cfb1782
BVA
1472 /*
1473 * Memory registration failed, so backtrack to the
1474 * first unmapped entry and continue on without using
1475 * memory registration.
76bc1e1d
BVA
1476 */
1477 dma_addr_t dma_addr;
1478 unsigned int dma_len;
1479
1480backtrack:
1481 sg = state->unmapped_sg;
1482 i = state->unmapped_index;
1483
1484 dma_addr = ib_sg_dma_address(ibdev, sg);
1485 dma_len = ib_sg_dma_len(ibdev, sg);
1486 dma_len -= (state->unmapped_addr - dma_addr);
1487 dma_addr = state->unmapped_addr;
5cfb1782 1488 use_mr = false;
76bc1e1d
BVA
1489 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1490 }
1491 }
1492
509c07bc 1493 if (use_mr && srp_finish_mapping(state, ch))
76bc1e1d
BVA
1494 goto backtrack;
1495
52ede08f 1496 req->nmdesc = state->nmdesc;
5cfb1782
BVA
1497
1498 return 0;
76bc1e1d
BVA
1499}
1500
509c07bc 1501static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
aef9ec39
RD
1502 struct srp_request *req)
1503{
509c07bc 1504 struct srp_target_port *target = ch->target;
76bc1e1d 1505 struct scatterlist *scat;
aef9ec39 1506 struct srp_cmd *cmd = req->cmd->buf;
76bc1e1d 1507 int len, nents, count;
85507bcc
RC
1508 struct srp_device *dev;
1509 struct ib_device *ibdev;
8f26c9ff
DD
1510 struct srp_map_state state;
1511 struct srp_indirect_buf *indirect_hdr;
8f26c9ff
DD
1512 u32 table_len;
1513 u8 fmt;
aef9ec39 1514
bb350d1d 1515 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
aef9ec39
RD
1516 return sizeof (struct srp_cmd);
1517
1518 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1519 scmnd->sc_data_direction != DMA_TO_DEVICE) {
7aa54bd7
DD
1520 shost_printk(KERN_WARNING, target->scsi_host,
1521 PFX "Unhandled data direction %d\n",
1522 scmnd->sc_data_direction);
aef9ec39
RD
1523 return -EINVAL;
1524 }
1525
bb350d1d
FT
1526 nents = scsi_sg_count(scmnd);
1527 scat = scsi_sglist(scmnd);
aef9ec39 1528
05321937 1529 dev = target->srp_host->srp_dev;
85507bcc
RC
1530 ibdev = dev->dev;
1531
1532 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
8f26c9ff
DD
1533 if (unlikely(count == 0))
1534 return -EIO;
f5358a17
RD
1535
1536 fmt = SRP_DATA_DESC_DIRECT;
1537 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
aef9ec39 1538
b1b8854d 1539 if (count == 1 && !register_always) {
f5358a17
RD
1540 /*
1541 * The midlayer only generated a single gather/scatter
1542 * entry, or DMA mapping coalesced everything to a
1543 * single entry. So a direct descriptor along with
1544 * the DMA MR suffices.
1545 */
cf368713 1546 struct srp_direct_buf *buf = (void *) cmd->add_data;
aef9ec39 1547
85507bcc 1548 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
9af76271 1549 buf->key = cpu_to_be32(target->rkey);
85507bcc 1550 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
8f26c9ff 1551
52ede08f 1552 req->nmdesc = 0;
8f26c9ff
DD
1553 goto map_complete;
1554 }
1555
5cfb1782
BVA
1556 /*
1557 * We have more than one scatter/gather entry, so build our indirect
1558 * descriptor table, trying to merge as many entries as we can.
8f26c9ff
DD
1559 */
1560 indirect_hdr = (void *) cmd->add_data;
1561
c07d424d
DD
1562 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1563 target->indirect_size, DMA_TO_DEVICE);
1564
8f26c9ff 1565 memset(&state, 0, sizeof(state));
509c07bc 1566 srp_map_sg(&state, ch, req, scat, count);
cf368713 1567
c07d424d
DD
1568 /* We've mapped the request, now pull as much of the indirect
1569 * descriptor table as we can into the command buffer. If this
1570 * target is not using an external indirect table, we are
1571 * guaranteed to fit into the command, as the SCSI layer won't
1572 * give us more S/G entries than we allow.
8f26c9ff 1573 */
8f26c9ff 1574 if (state.ndesc == 1) {
5cfb1782
BVA
1575 /*
1576 * Memory registration collapsed the sg-list into one entry,
8f26c9ff
DD
1577 * so use a direct descriptor.
1578 */
1579 struct srp_direct_buf *buf = (void *) cmd->add_data;
cf368713 1580
c07d424d 1581 *buf = req->indirect_desc[0];
8f26c9ff 1582 goto map_complete;
aef9ec39
RD
1583 }
1584
c07d424d
DD
1585 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1586 !target->allow_ext_sg)) {
1587 shost_printk(KERN_ERR, target->scsi_host,
1588 "Could not fit S/G list into SRP_CMD\n");
1589 return -EIO;
1590 }
1591
1592 count = min(state.ndesc, target->cmd_sg_cnt);
8f26c9ff
DD
1593 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1594
1595 fmt = SRP_DATA_DESC_INDIRECT;
1596 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
c07d424d 1597 len += count * sizeof (struct srp_direct_buf);
8f26c9ff 1598
c07d424d
DD
1599 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1600 count * sizeof (struct srp_direct_buf));
8f26c9ff 1601
c07d424d 1602 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
8f26c9ff
DD
1603 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1604 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1605 indirect_hdr->len = cpu_to_be32(state.total_len);
1606
1607 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
c07d424d 1608 cmd->data_out_desc_cnt = count;
8f26c9ff 1609 else
c07d424d
DD
1610 cmd->data_in_desc_cnt = count;
1611
1612 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1613 DMA_TO_DEVICE);
8f26c9ff
DD
1614
1615map_complete:
aef9ec39
RD
1616 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1617 cmd->buf_fmt = fmt << 4;
1618 else
1619 cmd->buf_fmt = fmt;
1620
aef9ec39
RD
1621 return len;
1622}
1623
76c75b25
BVA
1624/*
1625 * Return an IU and possible credit to the free pool
1626 */
509c07bc 1627static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
76c75b25
BVA
1628 enum srp_iu_type iu_type)
1629{
1630 unsigned long flags;
1631
509c07bc
BVA
1632 spin_lock_irqsave(&ch->lock, flags);
1633 list_add(&iu->list, &ch->free_tx);
76c75b25 1634 if (iu_type != SRP_IU_RSP)
509c07bc
BVA
1635 ++ch->req_lim;
1636 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25
BVA
1637}
1638
05a1d750 1639/*
509c07bc 1640 * Must be called with ch->lock held to protect req_lim and free_tx.
e9684678 1641 * If IU is not sent, it must be returned using srp_put_tx_iu().
05a1d750
DD
1642 *
1643 * Note:
1644 * An upper limit for the number of allocated information units for each
1645 * request type is:
1646 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1647 * more than Scsi_Host.can_queue requests.
1648 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1649 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1650 * one unanswered SRP request to an initiator.
1651 */
509c07bc 1652static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
05a1d750
DD
1653 enum srp_iu_type iu_type)
1654{
509c07bc 1655 struct srp_target_port *target = ch->target;
05a1d750
DD
1656 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1657 struct srp_iu *iu;
1658
509c07bc 1659 srp_send_completion(ch->send_cq, ch);
05a1d750 1660
509c07bc 1661 if (list_empty(&ch->free_tx))
05a1d750
DD
1662 return NULL;
1663
1664 /* Initiator responses to target requests do not consume credits */
76c75b25 1665 if (iu_type != SRP_IU_RSP) {
509c07bc 1666 if (ch->req_lim <= rsv) {
76c75b25
BVA
1667 ++target->zero_req_lim;
1668 return NULL;
1669 }
1670
509c07bc 1671 --ch->req_lim;
05a1d750
DD
1672 }
1673
509c07bc 1674 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
76c75b25 1675 list_del(&iu->list);
05a1d750
DD
1676 return iu;
1677}
1678
509c07bc 1679static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
05a1d750 1680{
509c07bc 1681 struct srp_target_port *target = ch->target;
05a1d750
DD
1682 struct ib_sge list;
1683 struct ib_send_wr wr, *bad_wr;
05a1d750
DD
1684
1685 list.addr = iu->dma;
1686 list.length = len;
9af76271 1687 list.lkey = target->lkey;
05a1d750
DD
1688
1689 wr.next = NULL;
dcb4cb85 1690 wr.wr_id = (uintptr_t) iu;
05a1d750
DD
1691 wr.sg_list = &list;
1692 wr.num_sge = 1;
1693 wr.opcode = IB_WR_SEND;
1694 wr.send_flags = IB_SEND_SIGNALED;
1695
509c07bc 1696 return ib_post_send(ch->qp, &wr, &bad_wr);
05a1d750
DD
1697}
1698
509c07bc 1699static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
c996bb47 1700{
509c07bc 1701 struct srp_target_port *target = ch->target;
c996bb47 1702 struct ib_recv_wr wr, *bad_wr;
dcb4cb85 1703 struct ib_sge list;
c996bb47
BVA
1704
1705 list.addr = iu->dma;
1706 list.length = iu->size;
9af76271 1707 list.lkey = target->lkey;
c996bb47
BVA
1708
1709 wr.next = NULL;
dcb4cb85 1710 wr.wr_id = (uintptr_t) iu;
c996bb47
BVA
1711 wr.sg_list = &list;
1712 wr.num_sge = 1;
1713
509c07bc 1714 return ib_post_recv(ch->qp, &wr, &bad_wr);
c996bb47
BVA
1715}
1716
509c07bc 1717static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
aef9ec39 1718{
509c07bc 1719 struct srp_target_port *target = ch->target;
aef9ec39
RD
1720 struct srp_request *req;
1721 struct scsi_cmnd *scmnd;
1722 unsigned long flags;
aef9ec39 1723
aef9ec39 1724 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
509c07bc
BVA
1725 spin_lock_irqsave(&ch->lock, flags);
1726 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1727 spin_unlock_irqrestore(&ch->lock, flags);
94a9174c 1728
509c07bc 1729 ch->tsk_mgmt_status = -1;
f8b6e31e 1730 if (be32_to_cpu(rsp->resp_data_len) >= 4)
509c07bc
BVA
1731 ch->tsk_mgmt_status = rsp->data[3];
1732 complete(&ch->tsk_mgmt_done);
aef9ec39 1733 } else {
77f2c1a4
BVA
1734 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1735 if (scmnd) {
1736 req = (void *)scmnd->host_scribble;
1737 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1738 }
22032991 1739 if (!scmnd) {
7aa54bd7 1740 shost_printk(KERN_ERR, target->scsi_host,
d92c0da7
BVA
1741 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1742 rsp->tag, ch - target->ch, ch->qp->qp_num);
22032991 1743
509c07bc
BVA
1744 spin_lock_irqsave(&ch->lock, flags);
1745 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1746 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1747
1748 return;
1749 }
aef9ec39
RD
1750 scmnd->result = rsp->status;
1751
1752 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1753 memcpy(scmnd->sense_buffer, rsp->data +
1754 be32_to_cpu(rsp->resp_data_len),
1755 min_t(int, be32_to_cpu(rsp->sense_data_len),
1756 SCSI_SENSE_BUFFERSIZE));
1757 }
1758
e714531a 1759 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
bb350d1d 1760 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
e714531a
BVA
1761 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1762 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1763 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1764 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1765 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1766 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
aef9ec39 1767
509c07bc 1768 srp_free_req(ch, req, scmnd,
22032991
BVA
1769 be32_to_cpu(rsp->req_lim_delta));
1770
f8b6e31e
DD
1771 scmnd->host_scribble = NULL;
1772 scmnd->scsi_done(scmnd);
aef9ec39 1773 }
aef9ec39
RD
1774}
1775
509c07bc 1776static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
bb12588a
DD
1777 void *rsp, int len)
1778{
509c07bc 1779 struct srp_target_port *target = ch->target;
76c75b25 1780 struct ib_device *dev = target->srp_host->srp_dev->dev;
bb12588a
DD
1781 unsigned long flags;
1782 struct srp_iu *iu;
76c75b25 1783 int err;
bb12588a 1784
509c07bc
BVA
1785 spin_lock_irqsave(&ch->lock, flags);
1786 ch->req_lim += req_delta;
1787 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1788 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25 1789
bb12588a
DD
1790 if (!iu) {
1791 shost_printk(KERN_ERR, target->scsi_host, PFX
1792 "no IU available to send response\n");
76c75b25 1793 return 1;
bb12588a
DD
1794 }
1795
1796 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1797 memcpy(iu->buf, rsp, len);
1798 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1799
509c07bc 1800 err = srp_post_send(ch, iu, len);
76c75b25 1801 if (err) {
bb12588a
DD
1802 shost_printk(KERN_ERR, target->scsi_host, PFX
1803 "unable to post response: %d\n", err);
509c07bc 1804 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
76c75b25 1805 }
bb12588a 1806
bb12588a
DD
1807 return err;
1808}
1809
509c07bc 1810static void srp_process_cred_req(struct srp_rdma_ch *ch,
bb12588a
DD
1811 struct srp_cred_req *req)
1812{
1813 struct srp_cred_rsp rsp = {
1814 .opcode = SRP_CRED_RSP,
1815 .tag = req->tag,
1816 };
1817 s32 delta = be32_to_cpu(req->req_lim_delta);
1818
509c07bc
BVA
1819 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1820 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
bb12588a
DD
1821 "problems processing SRP_CRED_REQ\n");
1822}
1823
509c07bc 1824static void srp_process_aer_req(struct srp_rdma_ch *ch,
bb12588a
DD
1825 struct srp_aer_req *req)
1826{
509c07bc 1827 struct srp_target_port *target = ch->target;
bb12588a
DD
1828 struct srp_aer_rsp rsp = {
1829 .opcode = SRP_AER_RSP,
1830 .tag = req->tag,
1831 };
1832 s32 delta = be32_to_cpu(req->req_lim_delta);
1833
1834 shost_printk(KERN_ERR, target->scsi_host, PFX
1835 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1836
509c07bc 1837 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
bb12588a
DD
1838 shost_printk(KERN_ERR, target->scsi_host, PFX
1839 "problems processing SRP_AER_REQ\n");
1840}
1841
509c07bc 1842static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
aef9ec39 1843{
509c07bc 1844 struct srp_target_port *target = ch->target;
dcb4cb85 1845 struct ib_device *dev = target->srp_host->srp_dev->dev;
737b94eb 1846 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
c996bb47 1847 int res;
aef9ec39
RD
1848 u8 opcode;
1849
509c07bc 1850 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 1851 DMA_FROM_DEVICE);
aef9ec39
RD
1852
1853 opcode = *(u8 *) iu->buf;
1854
1855 if (0) {
7aa54bd7
DD
1856 shost_printk(KERN_ERR, target->scsi_host,
1857 PFX "recv completion, opcode 0x%02x\n", opcode);
7a700811
BVA
1858 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1859 iu->buf, wc->byte_len, true);
aef9ec39
RD
1860 }
1861
1862 switch (opcode) {
1863 case SRP_RSP:
509c07bc 1864 srp_process_rsp(ch, iu->buf);
aef9ec39
RD
1865 break;
1866
bb12588a 1867 case SRP_CRED_REQ:
509c07bc 1868 srp_process_cred_req(ch, iu->buf);
bb12588a
DD
1869 break;
1870
1871 case SRP_AER_REQ:
509c07bc 1872 srp_process_aer_req(ch, iu->buf);
bb12588a
DD
1873 break;
1874
aef9ec39
RD
1875 case SRP_T_LOGOUT:
1876 /* XXX Handle target logout */
7aa54bd7
DD
1877 shost_printk(KERN_WARNING, target->scsi_host,
1878 PFX "Got target logout request\n");
aef9ec39
RD
1879 break;
1880
1881 default:
7aa54bd7
DD
1882 shost_printk(KERN_WARNING, target->scsi_host,
1883 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
aef9ec39
RD
1884 break;
1885 }
1886
509c07bc 1887 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 1888 DMA_FROM_DEVICE);
c996bb47 1889
509c07bc 1890 res = srp_post_recv(ch, iu);
c996bb47
BVA
1891 if (res != 0)
1892 shost_printk(KERN_ERR, target->scsi_host,
1893 PFX "Recv failed with error code %d\n", res);
aef9ec39
RD
1894}
1895
c1120f89
BVA
1896/**
1897 * srp_tl_err_work() - handle a transport layer error
af24663b 1898 * @work: Work structure embedded in an SRP target port.
c1120f89
BVA
1899 *
1900 * Note: This function may get invoked before the rport has been created,
1901 * hence the target->rport test.
1902 */
1903static void srp_tl_err_work(struct work_struct *work)
1904{
1905 struct srp_target_port *target;
1906
1907 target = container_of(work, struct srp_target_port, tl_err_work);
1908 if (target->rport)
1909 srp_start_tl_fail_timers(target->rport);
1910}
1911
5cfb1782 1912static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
7dad6b2e 1913 bool send_err, struct srp_rdma_ch *ch)
948d1e88 1914{
7dad6b2e
BVA
1915 struct srp_target_port *target = ch->target;
1916
1917 if (wr_id == SRP_LAST_WR_ID) {
1918 complete(&ch->done);
1919 return;
1920 }
1921
c014c8cd 1922 if (ch->connected && !target->qp_in_error) {
5cfb1782
BVA
1923 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1924 shost_printk(KERN_ERR, target->scsi_host, PFX
1925 "LOCAL_INV failed with status %d\n",
1926 wc_status);
1927 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1928 shost_printk(KERN_ERR, target->scsi_host, PFX
1929 "FAST_REG_MR failed status %d\n",
1930 wc_status);
1931 } else {
1932 shost_printk(KERN_ERR, target->scsi_host,
1933 PFX "failed %s status %d for iu %p\n",
1934 send_err ? "send" : "receive",
1935 wc_status, (void *)(uintptr_t)wr_id);
1936 }
c1120f89 1937 queue_work(system_long_wq, &target->tl_err_work);
4f0af697 1938 }
948d1e88
BVA
1939 target->qp_in_error = true;
1940}
1941
509c07bc 1942static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
aef9ec39 1943{
509c07bc 1944 struct srp_rdma_ch *ch = ch_ptr;
aef9ec39 1945 struct ib_wc wc;
aef9ec39
RD
1946
1947 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1948 while (ib_poll_cq(cq, 1, &wc) > 0) {
948d1e88 1949 if (likely(wc.status == IB_WC_SUCCESS)) {
509c07bc 1950 srp_handle_recv(ch, &wc);
948d1e88 1951 } else {
7dad6b2e 1952 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
aef9ec39 1953 }
9c03dc9f
BVA
1954 }
1955}
1956
509c07bc 1957static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
9c03dc9f 1958{
509c07bc 1959 struct srp_rdma_ch *ch = ch_ptr;
9c03dc9f 1960 struct ib_wc wc;
dcb4cb85 1961 struct srp_iu *iu;
9c03dc9f
BVA
1962
1963 while (ib_poll_cq(cq, 1, &wc) > 0) {
948d1e88
BVA
1964 if (likely(wc.status == IB_WC_SUCCESS)) {
1965 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
509c07bc 1966 list_add(&iu->list, &ch->free_tx);
948d1e88 1967 } else {
7dad6b2e 1968 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
9c03dc9f 1969 }
aef9ec39
RD
1970 }
1971}
1972
76c75b25 1973static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
aef9ec39 1974{
76c75b25 1975 struct srp_target_port *target = host_to_target(shost);
a95cadb9 1976 struct srp_rport *rport = target->rport;
509c07bc 1977 struct srp_rdma_ch *ch;
aef9ec39
RD
1978 struct srp_request *req;
1979 struct srp_iu *iu;
1980 struct srp_cmd *cmd;
85507bcc 1981 struct ib_device *dev;
76c75b25 1982 unsigned long flags;
77f2c1a4
BVA
1983 u32 tag;
1984 u16 idx;
d1b4289e 1985 int len, ret;
a95cadb9
BVA
1986 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1987
1988 /*
1989 * The SCSI EH thread is the only context from which srp_queuecommand()
1990 * can get invoked for blocked devices (SDEV_BLOCK /
1991 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1992 * locking the rport mutex if invoked from inside the SCSI EH.
1993 */
1994 if (in_scsi_eh)
1995 mutex_lock(&rport->mutex);
aef9ec39 1996
d1b4289e
BVA
1997 scmnd->result = srp_chkready(target->rport);
1998 if (unlikely(scmnd->result))
1999 goto err;
2ce19e72 2000
77f2c1a4
BVA
2001 WARN_ON_ONCE(scmnd->request->tag < 0);
2002 tag = blk_mq_unique_tag(scmnd->request);
d92c0da7 2003 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
77f2c1a4
BVA
2004 idx = blk_mq_unique_tag_to_tag(tag);
2005 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2006 dev_name(&shost->shost_gendev), tag, idx,
2007 target->req_ring_size);
509c07bc
BVA
2008
2009 spin_lock_irqsave(&ch->lock, flags);
2010 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
509c07bc 2011 spin_unlock_irqrestore(&ch->lock, flags);
aef9ec39 2012
77f2c1a4
BVA
2013 if (!iu)
2014 goto err;
2015
2016 req = &ch->req_ring[idx];
05321937 2017 dev = target->srp_host->srp_dev->dev;
49248644 2018 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
85507bcc 2019 DMA_TO_DEVICE);
aef9ec39 2020
f8b6e31e 2021 scmnd->host_scribble = (void *) req;
aef9ec39
RD
2022
2023 cmd = iu->buf;
2024 memset(cmd, 0, sizeof *cmd);
2025
2026 cmd->opcode = SRP_CMD;
2027 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
77f2c1a4 2028 cmd->tag = tag;
aef9ec39
RD
2029 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2030
aef9ec39
RD
2031 req->scmnd = scmnd;
2032 req->cmd = iu;
aef9ec39 2033
509c07bc 2034 len = srp_map_data(scmnd, ch, req);
aef9ec39 2035 if (len < 0) {
7aa54bd7 2036 shost_printk(KERN_ERR, target->scsi_host,
d1b4289e
BVA
2037 PFX "Failed to map data (%d)\n", len);
2038 /*
2039 * If we ran out of memory descriptors (-ENOMEM) because an
2040 * application is queuing many requests with more than
52ede08f 2041 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
d1b4289e
BVA
2042 * to reduce queue depth temporarily.
2043 */
2044 scmnd->result = len == -ENOMEM ?
2045 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
76c75b25 2046 goto err_iu;
aef9ec39
RD
2047 }
2048
49248644 2049 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
85507bcc 2050 DMA_TO_DEVICE);
aef9ec39 2051
509c07bc 2052 if (srp_post_send(ch, iu, len)) {
7aa54bd7 2053 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
aef9ec39
RD
2054 goto err_unmap;
2055 }
2056
d1b4289e
BVA
2057 ret = 0;
2058
a95cadb9
BVA
2059unlock_rport:
2060 if (in_scsi_eh)
2061 mutex_unlock(&rport->mutex);
2062
d1b4289e 2063 return ret;
aef9ec39
RD
2064
2065err_unmap:
509c07bc 2066 srp_unmap_data(scmnd, ch, req);
aef9ec39 2067
76c75b25 2068err_iu:
509c07bc 2069 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
76c75b25 2070
024ca901
BVA
2071 /*
2072 * Avoid that the loops that iterate over the request ring can
2073 * encounter a dangling SCSI command pointer.
2074 */
2075 req->scmnd = NULL;
2076
d1b4289e
BVA
2077err:
2078 if (scmnd->result) {
2079 scmnd->scsi_done(scmnd);
2080 ret = 0;
2081 } else {
2082 ret = SCSI_MLQUEUE_HOST_BUSY;
2083 }
a95cadb9 2084
d1b4289e 2085 goto unlock_rport;
aef9ec39
RD
2086}
2087
4d73f95f
BVA
2088/*
2089 * Note: the resources allocated in this function are freed in
509c07bc 2090 * srp_free_ch_ib().
4d73f95f 2091 */
509c07bc 2092static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
aef9ec39 2093{
509c07bc 2094 struct srp_target_port *target = ch->target;
aef9ec39
RD
2095 int i;
2096
509c07bc
BVA
2097 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2098 GFP_KERNEL);
2099 if (!ch->rx_ring)
4d73f95f 2100 goto err_no_ring;
509c07bc
BVA
2101 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2102 GFP_KERNEL);
2103 if (!ch->tx_ring)
4d73f95f
BVA
2104 goto err_no_ring;
2105
2106 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2107 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2108 ch->max_ti_iu_len,
2109 GFP_KERNEL, DMA_FROM_DEVICE);
2110 if (!ch->rx_ring[i])
aef9ec39
RD
2111 goto err;
2112 }
2113
4d73f95f 2114 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2115 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2116 target->max_iu_len,
2117 GFP_KERNEL, DMA_TO_DEVICE);
2118 if (!ch->tx_ring[i])
aef9ec39 2119 goto err;
dcb4cb85 2120
509c07bc 2121 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
aef9ec39
RD
2122 }
2123
2124 return 0;
2125
2126err:
4d73f95f 2127 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2128 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2129 srp_free_iu(target->srp_host, ch->tx_ring[i]);
aef9ec39
RD
2130 }
2131
4d73f95f
BVA
2132
2133err_no_ring:
509c07bc
BVA
2134 kfree(ch->tx_ring);
2135 ch->tx_ring = NULL;
2136 kfree(ch->rx_ring);
2137 ch->rx_ring = NULL;
4d73f95f 2138
aef9ec39
RD
2139 return -ENOMEM;
2140}
2141
c9b03c1a
BVA
2142static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2143{
2144 uint64_t T_tr_ns, max_compl_time_ms;
2145 uint32_t rq_tmo_jiffies;
2146
2147 /*
2148 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2149 * table 91), both the QP timeout and the retry count have to be set
2150 * for RC QP's during the RTR to RTS transition.
2151 */
2152 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2153 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2154
2155 /*
2156 * Set target->rq_tmo_jiffies to one second more than the largest time
2157 * it can take before an error completion is generated. See also
2158 * C9-140..142 in the IBTA spec for more information about how to
2159 * convert the QP Local ACK Timeout value to nanoseconds.
2160 */
2161 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2162 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2163 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2164 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2165
2166 return rq_tmo_jiffies;
2167}
2168
961e0be8
DD
2169static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2170 struct srp_login_rsp *lrsp,
509c07bc 2171 struct srp_rdma_ch *ch)
961e0be8 2172{
509c07bc 2173 struct srp_target_port *target = ch->target;
961e0be8
DD
2174 struct ib_qp_attr *qp_attr = NULL;
2175 int attr_mask = 0;
2176 int ret;
2177 int i;
2178
2179 if (lrsp->opcode == SRP_LOGIN_RSP) {
509c07bc
BVA
2180 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2181 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
961e0be8
DD
2182
2183 /*
2184 * Reserve credits for task management so we don't
2185 * bounce requests back to the SCSI mid-layer.
2186 */
2187 target->scsi_host->can_queue
509c07bc 2188 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
961e0be8 2189 target->scsi_host->can_queue);
4d73f95f
BVA
2190 target->scsi_host->cmd_per_lun
2191 = min_t(int, target->scsi_host->can_queue,
2192 target->scsi_host->cmd_per_lun);
961e0be8
DD
2193 } else {
2194 shost_printk(KERN_WARNING, target->scsi_host,
2195 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2196 ret = -ECONNRESET;
2197 goto error;
2198 }
2199
509c07bc
BVA
2200 if (!ch->rx_ring) {
2201 ret = srp_alloc_iu_bufs(ch);
961e0be8
DD
2202 if (ret)
2203 goto error;
2204 }
2205
2206 ret = -ENOMEM;
2207 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2208 if (!qp_attr)
2209 goto error;
2210
2211 qp_attr->qp_state = IB_QPS_RTR;
2212 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2213 if (ret)
2214 goto error_free;
2215
509c07bc 2216 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
961e0be8
DD
2217 if (ret)
2218 goto error_free;
2219
4d73f95f 2220 for (i = 0; i < target->queue_size; i++) {
509c07bc
BVA
2221 struct srp_iu *iu = ch->rx_ring[i];
2222
2223 ret = srp_post_recv(ch, iu);
961e0be8
DD
2224 if (ret)
2225 goto error_free;
2226 }
2227
2228 qp_attr->qp_state = IB_QPS_RTS;
2229 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2230 if (ret)
2231 goto error_free;
2232
c9b03c1a
BVA
2233 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2234
509c07bc 2235 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
961e0be8
DD
2236 if (ret)
2237 goto error_free;
2238
2239 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2240
2241error_free:
2242 kfree(qp_attr);
2243
2244error:
509c07bc 2245 ch->status = ret;
961e0be8
DD
2246}
2247
aef9ec39
RD
2248static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2249 struct ib_cm_event *event,
509c07bc 2250 struct srp_rdma_ch *ch)
aef9ec39 2251{
509c07bc 2252 struct srp_target_port *target = ch->target;
7aa54bd7 2253 struct Scsi_Host *shost = target->scsi_host;
aef9ec39
RD
2254 struct ib_class_port_info *cpi;
2255 int opcode;
2256
2257 switch (event->param.rej_rcvd.reason) {
2258 case IB_CM_REJ_PORT_CM_REDIRECT:
2259 cpi = event->param.rej_rcvd.ari;
509c07bc
BVA
2260 ch->path.dlid = cpi->redirect_lid;
2261 ch->path.pkey = cpi->redirect_pkey;
aef9ec39 2262 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
509c07bc 2263 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
aef9ec39 2264
509c07bc 2265 ch->status = ch->path.dlid ?
aef9ec39
RD
2266 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2267 break;
2268
2269 case IB_CM_REJ_PORT_REDIRECT:
5d7cbfd6 2270 if (srp_target_is_topspin(target)) {
aef9ec39
RD
2271 /*
2272 * Topspin/Cisco SRP gateways incorrectly send
2273 * reject reason code 25 when they mean 24
2274 * (port redirect).
2275 */
509c07bc 2276 memcpy(ch->path.dgid.raw,
aef9ec39
RD
2277 event->param.rej_rcvd.ari, 16);
2278
7aa54bd7
DD
2279 shost_printk(KERN_DEBUG, shost,
2280 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
509c07bc
BVA
2281 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2282 be64_to_cpu(ch->path.dgid.global.interface_id));
aef9ec39 2283
509c07bc 2284 ch->status = SRP_PORT_REDIRECT;
aef9ec39 2285 } else {
7aa54bd7
DD
2286 shost_printk(KERN_WARNING, shost,
2287 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
509c07bc 2288 ch->status = -ECONNRESET;
aef9ec39
RD
2289 }
2290 break;
2291
2292 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
7aa54bd7
DD
2293 shost_printk(KERN_WARNING, shost,
2294 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
509c07bc 2295 ch->status = -ECONNRESET;
aef9ec39
RD
2296 break;
2297
2298 case IB_CM_REJ_CONSUMER_DEFINED:
2299 opcode = *(u8 *) event->private_data;
2300 if (opcode == SRP_LOGIN_REJ) {
2301 struct srp_login_rej *rej = event->private_data;
2302 u32 reason = be32_to_cpu(rej->reason);
2303
2304 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
7aa54bd7
DD
2305 shost_printk(KERN_WARNING, shost,
2306 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
aef9ec39 2307 else
e7ffde01
BVA
2308 shost_printk(KERN_WARNING, shost, PFX
2309 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
747fe000
BVA
2310 target->sgid.raw,
2311 target->orig_dgid.raw, reason);
aef9ec39 2312 } else
7aa54bd7
DD
2313 shost_printk(KERN_WARNING, shost,
2314 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2315 " opcode 0x%02x\n", opcode);
509c07bc 2316 ch->status = -ECONNRESET;
aef9ec39
RD
2317 break;
2318
9fe4bcf4
DD
2319 case IB_CM_REJ_STALE_CONN:
2320 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
509c07bc 2321 ch->status = SRP_STALE_CONN;
9fe4bcf4
DD
2322 break;
2323
aef9ec39 2324 default:
7aa54bd7
DD
2325 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2326 event->param.rej_rcvd.reason);
509c07bc 2327 ch->status = -ECONNRESET;
aef9ec39
RD
2328 }
2329}
2330
2331static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2332{
509c07bc
BVA
2333 struct srp_rdma_ch *ch = cm_id->context;
2334 struct srp_target_port *target = ch->target;
aef9ec39 2335 int comp = 0;
aef9ec39
RD
2336
2337 switch (event->event) {
2338 case IB_CM_REQ_ERROR:
7aa54bd7
DD
2339 shost_printk(KERN_DEBUG, target->scsi_host,
2340 PFX "Sending CM REQ failed\n");
aef9ec39 2341 comp = 1;
509c07bc 2342 ch->status = -ECONNRESET;
aef9ec39
RD
2343 break;
2344
2345 case IB_CM_REP_RECEIVED:
2346 comp = 1;
509c07bc 2347 srp_cm_rep_handler(cm_id, event->private_data, ch);
aef9ec39
RD
2348 break;
2349
2350 case IB_CM_REJ_RECEIVED:
7aa54bd7 2351 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
aef9ec39
RD
2352 comp = 1;
2353
509c07bc 2354 srp_cm_rej_handler(cm_id, event, ch);
aef9ec39
RD
2355 break;
2356
b7ac4ab4 2357 case IB_CM_DREQ_RECEIVED:
7aa54bd7
DD
2358 shost_printk(KERN_WARNING, target->scsi_host,
2359 PFX "DREQ received - connection closed\n");
c014c8cd 2360 ch->connected = false;
b7ac4ab4 2361 if (ib_send_cm_drep(cm_id, NULL, 0))
7aa54bd7
DD
2362 shost_printk(KERN_ERR, target->scsi_host,
2363 PFX "Sending CM DREP failed\n");
c1120f89 2364 queue_work(system_long_wq, &target->tl_err_work);
aef9ec39
RD
2365 break;
2366
2367 case IB_CM_TIMEWAIT_EXIT:
7aa54bd7
DD
2368 shost_printk(KERN_ERR, target->scsi_host,
2369 PFX "connection closed\n");
ac72d766 2370 comp = 1;
aef9ec39 2371
509c07bc 2372 ch->status = 0;
aef9ec39
RD
2373 break;
2374
b7ac4ab4
IR
2375 case IB_CM_MRA_RECEIVED:
2376 case IB_CM_DREQ_ERROR:
2377 case IB_CM_DREP_RECEIVED:
2378 break;
2379
aef9ec39 2380 default:
7aa54bd7
DD
2381 shost_printk(KERN_WARNING, target->scsi_host,
2382 PFX "Unhandled CM event %d\n", event->event);
aef9ec39
RD
2383 break;
2384 }
2385
2386 if (comp)
509c07bc 2387 complete(&ch->done);
aef9ec39 2388
aef9ec39
RD
2389 return 0;
2390}
2391
71444b97
JW
2392/**
2393 * srp_change_queue_depth - setting device queue depth
2394 * @sdev: scsi device struct
2395 * @qdepth: requested queue depth
71444b97
JW
2396 *
2397 * Returns queue depth.
2398 */
2399static int
db5ed4df 2400srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
71444b97 2401{
c40ecc12 2402 if (!sdev->tagged_supported)
1e6f2416 2403 qdepth = 1;
db5ed4df 2404 return scsi_change_queue_depth(sdev, qdepth);
71444b97
JW
2405}
2406
509c07bc
BVA
2407static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
2408 unsigned int lun, u8 func)
aef9ec39 2409{
509c07bc 2410 struct srp_target_port *target = ch->target;
a95cadb9 2411 struct srp_rport *rport = target->rport;
19081f31 2412 struct ib_device *dev = target->srp_host->srp_dev->dev;
aef9ec39
RD
2413 struct srp_iu *iu;
2414 struct srp_tsk_mgmt *tsk_mgmt;
aef9ec39 2415
c014c8cd 2416 if (!ch->connected || target->qp_in_error)
3780d1f0
BVA
2417 return -1;
2418
509c07bc 2419 init_completion(&ch->tsk_mgmt_done);
aef9ec39 2420
a95cadb9 2421 /*
509c07bc 2422 * Lock the rport mutex to avoid that srp_create_ch_ib() is
a95cadb9
BVA
2423 * invoked while a task management function is being sent.
2424 */
2425 mutex_lock(&rport->mutex);
509c07bc
BVA
2426 spin_lock_irq(&ch->lock);
2427 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2428 spin_unlock_irq(&ch->lock);
76c75b25 2429
a95cadb9
BVA
2430 if (!iu) {
2431 mutex_unlock(&rport->mutex);
2432
76c75b25 2433 return -1;
a95cadb9 2434 }
aef9ec39 2435
19081f31
DD
2436 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2437 DMA_TO_DEVICE);
aef9ec39
RD
2438 tsk_mgmt = iu->buf;
2439 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2440
2441 tsk_mgmt->opcode = SRP_TSK_MGMT;
f8b6e31e
DD
2442 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
2443 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
aef9ec39 2444 tsk_mgmt->tsk_mgmt_func = func;
f8b6e31e 2445 tsk_mgmt->task_tag = req_tag;
aef9ec39 2446
19081f31
DD
2447 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2448 DMA_TO_DEVICE);
509c07bc
BVA
2449 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2450 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
a95cadb9
BVA
2451 mutex_unlock(&rport->mutex);
2452
76c75b25
BVA
2453 return -1;
2454 }
a95cadb9 2455 mutex_unlock(&rport->mutex);
d945e1df 2456
509c07bc 2457 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
aef9ec39 2458 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
d945e1df 2459 return -1;
aef9ec39 2460
d945e1df 2461 return 0;
d945e1df
RD
2462}
2463
aef9ec39
RD
2464static int srp_abort(struct scsi_cmnd *scmnd)
2465{
d945e1df 2466 struct srp_target_port *target = host_to_target(scmnd->device->host);
f8b6e31e 2467 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
77f2c1a4 2468 u32 tag;
d92c0da7 2469 u16 ch_idx;
509c07bc 2470 struct srp_rdma_ch *ch;
086f44f5 2471 int ret;
d945e1df 2472
7aa54bd7 2473 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
aef9ec39 2474
d92c0da7 2475 if (!req)
99b6697a 2476 return SUCCESS;
77f2c1a4 2477 tag = blk_mq_unique_tag(scmnd->request);
d92c0da7
BVA
2478 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2479 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2480 return SUCCESS;
2481 ch = &target->ch[ch_idx];
2482 if (!srp_claim_req(ch, req, NULL, scmnd))
2483 return SUCCESS;
2484 shost_printk(KERN_ERR, target->scsi_host,
2485 "Sending SRP abort for tag %#x\n", tag);
77f2c1a4 2486 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
80d5e8a2 2487 SRP_TSK_ABORT_TASK) == 0)
086f44f5 2488 ret = SUCCESS;
ed9b2264 2489 else if (target->rport->state == SRP_RPORT_LOST)
99e1c139 2490 ret = FAST_IO_FAIL;
086f44f5
BVA
2491 else
2492 ret = FAILED;
509c07bc 2493 srp_free_req(ch, req, scmnd, 0);
22032991 2494 scmnd->result = DID_ABORT << 16;
d8536670 2495 scmnd->scsi_done(scmnd);
d945e1df 2496
086f44f5 2497 return ret;
aef9ec39
RD
2498}
2499
2500static int srp_reset_device(struct scsi_cmnd *scmnd)
2501{
d945e1df 2502 struct srp_target_port *target = host_to_target(scmnd->device->host);
d92c0da7 2503 struct srp_rdma_ch *ch;
536ae14e 2504 int i;
d945e1df 2505
7aa54bd7 2506 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
aef9ec39 2507
d92c0da7 2508 ch = &target->ch[0];
509c07bc 2509 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
f8b6e31e 2510 SRP_TSK_LUN_RESET))
d945e1df 2511 return FAILED;
509c07bc 2512 if (ch->tsk_mgmt_status)
d945e1df
RD
2513 return FAILED;
2514
d92c0da7
BVA
2515 for (i = 0; i < target->ch_count; i++) {
2516 ch = &target->ch[i];
2517 for (i = 0; i < target->req_ring_size; ++i) {
2518 struct srp_request *req = &ch->req_ring[i];
509c07bc 2519
d92c0da7
BVA
2520 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2521 }
536ae14e 2522 }
d945e1df 2523
d945e1df 2524 return SUCCESS;
aef9ec39
RD
2525}
2526
2527static int srp_reset_host(struct scsi_cmnd *scmnd)
2528{
2529 struct srp_target_port *target = host_to_target(scmnd->device->host);
aef9ec39 2530
7aa54bd7 2531 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
aef9ec39 2532
ed9b2264 2533 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
aef9ec39
RD
2534}
2535
c9b03c1a
BVA
2536static int srp_slave_configure(struct scsi_device *sdev)
2537{
2538 struct Scsi_Host *shost = sdev->host;
2539 struct srp_target_port *target = host_to_target(shost);
2540 struct request_queue *q = sdev->request_queue;
2541 unsigned long timeout;
2542
2543 if (sdev->type == TYPE_DISK) {
2544 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2545 blk_queue_rq_timeout(q, timeout);
2546 }
2547
2548 return 0;
2549}
2550
ee959b00
TJ
2551static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2552 char *buf)
6ecb0c84 2553{
ee959b00 2554 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2555
45c37cad 2556 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
6ecb0c84
RD
2557}
2558
ee959b00
TJ
2559static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2560 char *buf)
6ecb0c84 2561{
ee959b00 2562 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2563
45c37cad 2564 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
6ecb0c84
RD
2565}
2566
ee959b00
TJ
2567static ssize_t show_service_id(struct device *dev,
2568 struct device_attribute *attr, char *buf)
6ecb0c84 2569{
ee959b00 2570 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2571
45c37cad 2572 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
6ecb0c84
RD
2573}
2574
ee959b00
TJ
2575static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2576 char *buf)
6ecb0c84 2577{
ee959b00 2578 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2579
747fe000 2580 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
6ecb0c84
RD
2581}
2582
848b3082
BVA
2583static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2584 char *buf)
2585{
2586 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2587
747fe000 2588 return sprintf(buf, "%pI6\n", target->sgid.raw);
848b3082
BVA
2589}
2590
ee959b00
TJ
2591static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2592 char *buf)
6ecb0c84 2593{
ee959b00 2594 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7 2595 struct srp_rdma_ch *ch = &target->ch[0];
6ecb0c84 2596
509c07bc 2597 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
6ecb0c84
RD
2598}
2599
ee959b00
TJ
2600static ssize_t show_orig_dgid(struct device *dev,
2601 struct device_attribute *attr, char *buf)
3633b3d0 2602{
ee959b00 2603 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3633b3d0 2604
747fe000 2605 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
3633b3d0
IR
2606}
2607
89de7486
BVA
2608static ssize_t show_req_lim(struct device *dev,
2609 struct device_attribute *attr, char *buf)
2610{
2611 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7
BVA
2612 struct srp_rdma_ch *ch;
2613 int i, req_lim = INT_MAX;
89de7486 2614
d92c0da7
BVA
2615 for (i = 0; i < target->ch_count; i++) {
2616 ch = &target->ch[i];
2617 req_lim = min(req_lim, ch->req_lim);
2618 }
2619 return sprintf(buf, "%d\n", req_lim);
89de7486
BVA
2620}
2621
ee959b00
TJ
2622static ssize_t show_zero_req_lim(struct device *dev,
2623 struct device_attribute *attr, char *buf)
6bfa24fa 2624{
ee959b00 2625 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6bfa24fa 2626
6bfa24fa
RD
2627 return sprintf(buf, "%d\n", target->zero_req_lim);
2628}
2629
ee959b00
TJ
2630static ssize_t show_local_ib_port(struct device *dev,
2631 struct device_attribute *attr, char *buf)
ded7f1a1 2632{
ee959b00 2633 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1
IR
2634
2635 return sprintf(buf, "%d\n", target->srp_host->port);
2636}
2637
ee959b00
TJ
2638static ssize_t show_local_ib_device(struct device *dev,
2639 struct device_attribute *attr, char *buf)
ded7f1a1 2640{
ee959b00 2641 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1 2642
05321937 2643 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
ded7f1a1
IR
2644}
2645
d92c0da7
BVA
2646static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2647 char *buf)
2648{
2649 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2650
2651 return sprintf(buf, "%d\n", target->ch_count);
2652}
2653
4b5e5f41
BVA
2654static ssize_t show_comp_vector(struct device *dev,
2655 struct device_attribute *attr, char *buf)
2656{
2657 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2658
2659 return sprintf(buf, "%d\n", target->comp_vector);
2660}
2661
7bb312e4
VP
2662static ssize_t show_tl_retry_count(struct device *dev,
2663 struct device_attribute *attr, char *buf)
2664{
2665 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2666
2667 return sprintf(buf, "%d\n", target->tl_retry_count);
2668}
2669
49248644
DD
2670static ssize_t show_cmd_sg_entries(struct device *dev,
2671 struct device_attribute *attr, char *buf)
2672{
2673 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2674
2675 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2676}
2677
c07d424d
DD
2678static ssize_t show_allow_ext_sg(struct device *dev,
2679 struct device_attribute *attr, char *buf)
2680{
2681 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2682
2683 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2684}
2685
ee959b00
TJ
2686static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2687static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2688static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2689static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
848b3082 2690static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
ee959b00
TJ
2691static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2692static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
89de7486 2693static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
ee959b00
TJ
2694static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2695static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2696static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
d92c0da7 2697static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
4b5e5f41 2698static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
7bb312e4 2699static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
49248644 2700static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
c07d424d 2701static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
ee959b00
TJ
2702
2703static struct device_attribute *srp_host_attrs[] = {
2704 &dev_attr_id_ext,
2705 &dev_attr_ioc_guid,
2706 &dev_attr_service_id,
2707 &dev_attr_pkey,
848b3082 2708 &dev_attr_sgid,
ee959b00
TJ
2709 &dev_attr_dgid,
2710 &dev_attr_orig_dgid,
89de7486 2711 &dev_attr_req_lim,
ee959b00
TJ
2712 &dev_attr_zero_req_lim,
2713 &dev_attr_local_ib_port,
2714 &dev_attr_local_ib_device,
d92c0da7 2715 &dev_attr_ch_count,
4b5e5f41 2716 &dev_attr_comp_vector,
7bb312e4 2717 &dev_attr_tl_retry_count,
49248644 2718 &dev_attr_cmd_sg_entries,
c07d424d 2719 &dev_attr_allow_ext_sg,
6ecb0c84
RD
2720 NULL
2721};
2722
aef9ec39
RD
2723static struct scsi_host_template srp_template = {
2724 .module = THIS_MODULE,
b7f008fd
RD
2725 .name = "InfiniBand SRP initiator",
2726 .proc_name = DRV_NAME,
c9b03c1a 2727 .slave_configure = srp_slave_configure,
aef9ec39
RD
2728 .info = srp_target_info,
2729 .queuecommand = srp_queuecommand,
71444b97 2730 .change_queue_depth = srp_change_queue_depth,
aef9ec39
RD
2731 .eh_abort_handler = srp_abort,
2732 .eh_device_reset_handler = srp_reset_device,
2733 .eh_host_reset_handler = srp_reset_host,
2742c1da 2734 .skip_settle_delay = true,
49248644 2735 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
4d73f95f 2736 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
aef9ec39 2737 .this_id = -1,
4d73f95f 2738 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
6ecb0c84 2739 .use_clustering = ENABLE_CLUSTERING,
77f2c1a4
BVA
2740 .shost_attrs = srp_host_attrs,
2741 .use_blk_tags = 1,
c40ecc12 2742 .track_queue_depth = 1,
aef9ec39
RD
2743};
2744
34aa654e
BVA
2745static int srp_sdev_count(struct Scsi_Host *host)
2746{
2747 struct scsi_device *sdev;
2748 int c = 0;
2749
2750 shost_for_each_device(sdev, host)
2751 c++;
2752
2753 return c;
2754}
2755
aef9ec39
RD
2756static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2757{
3236822b
FT
2758 struct srp_rport_identifiers ids;
2759 struct srp_rport *rport;
2760
34aa654e 2761 target->state = SRP_TARGET_SCANNING;
aef9ec39 2762 sprintf(target->target_name, "SRP.T10:%016llX",
45c37cad 2763 be64_to_cpu(target->id_ext));
aef9ec39 2764
05321937 2765 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
aef9ec39
RD
2766 return -ENODEV;
2767
3236822b
FT
2768 memcpy(ids.port_id, &target->id_ext, 8);
2769 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
aebd5e47 2770 ids.roles = SRP_RPORT_ROLE_TARGET;
3236822b
FT
2771 rport = srp_rport_add(target->scsi_host, &ids);
2772 if (IS_ERR(rport)) {
2773 scsi_remove_host(target->scsi_host);
2774 return PTR_ERR(rport);
2775 }
2776
dc1bdbd9 2777 rport->lld_data = target;
9dd69a60 2778 target->rport = rport;
dc1bdbd9 2779
b3589fd4 2780 spin_lock(&host->target_lock);
aef9ec39 2781 list_add_tail(&target->list, &host->target_list);
b3589fd4 2782 spin_unlock(&host->target_lock);
aef9ec39 2783
aef9ec39 2784 scsi_scan_target(&target->scsi_host->shost_gendev,
1962a4a1 2785 0, target->scsi_id, SCAN_WILD_CARD, 0);
aef9ec39 2786
c014c8cd
BVA
2787 if (srp_connected_ch(target) < target->ch_count ||
2788 target->qp_in_error) {
34aa654e
BVA
2789 shost_printk(KERN_INFO, target->scsi_host,
2790 PFX "SCSI scan failed - removing SCSI host\n");
2791 srp_queue_remove_work(target);
2792 goto out;
2793 }
2794
2795 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2796 dev_name(&target->scsi_host->shost_gendev),
2797 srp_sdev_count(target->scsi_host));
2798
2799 spin_lock_irq(&target->lock);
2800 if (target->state == SRP_TARGET_SCANNING)
2801 target->state = SRP_TARGET_LIVE;
2802 spin_unlock_irq(&target->lock);
2803
2804out:
aef9ec39
RD
2805 return 0;
2806}
2807
ee959b00 2808static void srp_release_dev(struct device *dev)
aef9ec39
RD
2809{
2810 struct srp_host *host =
ee959b00 2811 container_of(dev, struct srp_host, dev);
aef9ec39
RD
2812
2813 complete(&host->released);
2814}
2815
2816static struct class srp_class = {
2817 .name = "infiniband_srp",
ee959b00 2818 .dev_release = srp_release_dev
aef9ec39
RD
2819};
2820
96fc248a
BVA
2821/**
2822 * srp_conn_unique() - check whether the connection to a target is unique
af24663b
BVA
2823 * @host: SRP host.
2824 * @target: SRP target port.
96fc248a
BVA
2825 */
2826static bool srp_conn_unique(struct srp_host *host,
2827 struct srp_target_port *target)
2828{
2829 struct srp_target_port *t;
2830 bool ret = false;
2831
2832 if (target->state == SRP_TARGET_REMOVED)
2833 goto out;
2834
2835 ret = true;
2836
2837 spin_lock(&host->target_lock);
2838 list_for_each_entry(t, &host->target_list, list) {
2839 if (t != target &&
2840 target->id_ext == t->id_ext &&
2841 target->ioc_guid == t->ioc_guid &&
2842 target->initiator_ext == t->initiator_ext) {
2843 ret = false;
2844 break;
2845 }
2846 }
2847 spin_unlock(&host->target_lock);
2848
2849out:
2850 return ret;
2851}
2852
aef9ec39
RD
2853/*
2854 * Target ports are added by writing
2855 *
2856 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2857 * pkey=<P_Key>,service_id=<service ID>
2858 *
2859 * to the add_target sysfs attribute.
2860 */
2861enum {
2862 SRP_OPT_ERR = 0,
2863 SRP_OPT_ID_EXT = 1 << 0,
2864 SRP_OPT_IOC_GUID = 1 << 1,
2865 SRP_OPT_DGID = 1 << 2,
2866 SRP_OPT_PKEY = 1 << 3,
2867 SRP_OPT_SERVICE_ID = 1 << 4,
2868 SRP_OPT_MAX_SECT = 1 << 5,
52fb2b50 2869 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
0c0450db 2870 SRP_OPT_IO_CLASS = 1 << 7,
01cb9bcb 2871 SRP_OPT_INITIATOR_EXT = 1 << 8,
49248644 2872 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
c07d424d
DD
2873 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2874 SRP_OPT_SG_TABLESIZE = 1 << 11,
4b5e5f41 2875 SRP_OPT_COMP_VECTOR = 1 << 12,
7bb312e4 2876 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
4d73f95f 2877 SRP_OPT_QUEUE_SIZE = 1 << 14,
aef9ec39
RD
2878 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2879 SRP_OPT_IOC_GUID |
2880 SRP_OPT_DGID |
2881 SRP_OPT_PKEY |
2882 SRP_OPT_SERVICE_ID),
2883};
2884
a447c093 2885static const match_table_t srp_opt_tokens = {
52fb2b50
VP
2886 { SRP_OPT_ID_EXT, "id_ext=%s" },
2887 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2888 { SRP_OPT_DGID, "dgid=%s" },
2889 { SRP_OPT_PKEY, "pkey=%x" },
2890 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2891 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2892 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
0c0450db 2893 { SRP_OPT_IO_CLASS, "io_class=%x" },
01cb9bcb 2894 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
49248644 2895 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
c07d424d
DD
2896 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2897 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
4b5e5f41 2898 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
7bb312e4 2899 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
4d73f95f 2900 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
52fb2b50 2901 { SRP_OPT_ERR, NULL }
aef9ec39
RD
2902};
2903
2904static int srp_parse_options(const char *buf, struct srp_target_port *target)
2905{
2906 char *options, *sep_opt;
2907 char *p;
2908 char dgid[3];
2909 substring_t args[MAX_OPT_ARGS];
2910 int opt_mask = 0;
2911 int token;
2912 int ret = -EINVAL;
2913 int i;
2914
2915 options = kstrdup(buf, GFP_KERNEL);
2916 if (!options)
2917 return -ENOMEM;
2918
2919 sep_opt = options;
7dcf9c19 2920 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
aef9ec39
RD
2921 if (!*p)
2922 continue;
2923
2924 token = match_token(p, srp_opt_tokens, args);
2925 opt_mask |= token;
2926
2927 switch (token) {
2928 case SRP_OPT_ID_EXT:
2929 p = match_strdup(args);
a20f3a6d
IR
2930 if (!p) {
2931 ret = -ENOMEM;
2932 goto out;
2933 }
aef9ec39
RD
2934 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2935 kfree(p);
2936 break;
2937
2938 case SRP_OPT_IOC_GUID:
2939 p = match_strdup(args);
a20f3a6d
IR
2940 if (!p) {
2941 ret = -ENOMEM;
2942 goto out;
2943 }
aef9ec39
RD
2944 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2945 kfree(p);
2946 break;
2947
2948 case SRP_OPT_DGID:
2949 p = match_strdup(args);
a20f3a6d
IR
2950 if (!p) {
2951 ret = -ENOMEM;
2952 goto out;
2953 }
aef9ec39 2954 if (strlen(p) != 32) {
e0bda7d8 2955 pr_warn("bad dest GID parameter '%s'\n", p);
ce1823f0 2956 kfree(p);
aef9ec39
RD
2957 goto out;
2958 }
2959
2960 for (i = 0; i < 16; ++i) {
747fe000
BVA
2961 strlcpy(dgid, p + i * 2, sizeof(dgid));
2962 if (sscanf(dgid, "%hhx",
2963 &target->orig_dgid.raw[i]) < 1) {
2964 ret = -EINVAL;
2965 kfree(p);
2966 goto out;
2967 }
aef9ec39 2968 }
bf17c1c7 2969 kfree(p);
aef9ec39
RD
2970 break;
2971
2972 case SRP_OPT_PKEY:
2973 if (match_hex(args, &token)) {
e0bda7d8 2974 pr_warn("bad P_Key parameter '%s'\n", p);
aef9ec39
RD
2975 goto out;
2976 }
747fe000 2977 target->pkey = cpu_to_be16(token);
aef9ec39
RD
2978 break;
2979
2980 case SRP_OPT_SERVICE_ID:
2981 p = match_strdup(args);
a20f3a6d
IR
2982 if (!p) {
2983 ret = -ENOMEM;
2984 goto out;
2985 }
aef9ec39
RD
2986 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2987 kfree(p);
2988 break;
2989
2990 case SRP_OPT_MAX_SECT:
2991 if (match_int(args, &token)) {
e0bda7d8 2992 pr_warn("bad max sect parameter '%s'\n", p);
aef9ec39
RD
2993 goto out;
2994 }
2995 target->scsi_host->max_sectors = token;
2996 break;
2997
4d73f95f
BVA
2998 case SRP_OPT_QUEUE_SIZE:
2999 if (match_int(args, &token) || token < 1) {
3000 pr_warn("bad queue_size parameter '%s'\n", p);
3001 goto out;
3002 }
3003 target->scsi_host->can_queue = token;
3004 target->queue_size = token + SRP_RSP_SQ_SIZE +
3005 SRP_TSK_MGMT_SQ_SIZE;
3006 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3007 target->scsi_host->cmd_per_lun = token;
3008 break;
3009
52fb2b50 3010 case SRP_OPT_MAX_CMD_PER_LUN:
4d73f95f 3011 if (match_int(args, &token) || token < 1) {
e0bda7d8
BVA
3012 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3013 p);
52fb2b50
VP
3014 goto out;
3015 }
4d73f95f 3016 target->scsi_host->cmd_per_lun = token;
52fb2b50
VP
3017 break;
3018
0c0450db
R
3019 case SRP_OPT_IO_CLASS:
3020 if (match_hex(args, &token)) {
e0bda7d8 3021 pr_warn("bad IO class parameter '%s'\n", p);
0c0450db
R
3022 goto out;
3023 }
3024 if (token != SRP_REV10_IB_IO_CLASS &&
3025 token != SRP_REV16A_IB_IO_CLASS) {
e0bda7d8
BVA
3026 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3027 token, SRP_REV10_IB_IO_CLASS,
3028 SRP_REV16A_IB_IO_CLASS);
0c0450db
R
3029 goto out;
3030 }
3031 target->io_class = token;
3032 break;
3033
01cb9bcb
IR
3034 case SRP_OPT_INITIATOR_EXT:
3035 p = match_strdup(args);
a20f3a6d
IR
3036 if (!p) {
3037 ret = -ENOMEM;
3038 goto out;
3039 }
01cb9bcb
IR
3040 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3041 kfree(p);
3042 break;
3043
49248644
DD
3044 case SRP_OPT_CMD_SG_ENTRIES:
3045 if (match_int(args, &token) || token < 1 || token > 255) {
e0bda7d8
BVA
3046 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3047 p);
49248644
DD
3048 goto out;
3049 }
3050 target->cmd_sg_cnt = token;
3051 break;
3052
c07d424d
DD
3053 case SRP_OPT_ALLOW_EXT_SG:
3054 if (match_int(args, &token)) {
e0bda7d8 3055 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
c07d424d
DD
3056 goto out;
3057 }
3058 target->allow_ext_sg = !!token;
3059 break;
3060
3061 case SRP_OPT_SG_TABLESIZE:
3062 if (match_int(args, &token) || token < 1 ||
3063 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
e0bda7d8
BVA
3064 pr_warn("bad max sg_tablesize parameter '%s'\n",
3065 p);
c07d424d
DD
3066 goto out;
3067 }
3068 target->sg_tablesize = token;
3069 break;
3070
4b5e5f41
BVA
3071 case SRP_OPT_COMP_VECTOR:
3072 if (match_int(args, &token) || token < 0) {
3073 pr_warn("bad comp_vector parameter '%s'\n", p);
3074 goto out;
3075 }
3076 target->comp_vector = token;
3077 break;
3078
7bb312e4
VP
3079 case SRP_OPT_TL_RETRY_COUNT:
3080 if (match_int(args, &token) || token < 2 || token > 7) {
3081 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3082 p);
3083 goto out;
3084 }
3085 target->tl_retry_count = token;
3086 break;
3087
aef9ec39 3088 default:
e0bda7d8
BVA
3089 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3090 p);
aef9ec39
RD
3091 goto out;
3092 }
3093 }
3094
3095 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3096 ret = 0;
3097 else
3098 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3099 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3100 !(srp_opt_tokens[i].token & opt_mask))
e0bda7d8
BVA
3101 pr_warn("target creation request is missing parameter '%s'\n",
3102 srp_opt_tokens[i].pattern);
aef9ec39 3103
4d73f95f
BVA
3104 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3105 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3106 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3107 target->scsi_host->cmd_per_lun,
3108 target->scsi_host->can_queue);
3109
aef9ec39
RD
3110out:
3111 kfree(options);
3112 return ret;
3113}
3114
ee959b00
TJ
3115static ssize_t srp_create_target(struct device *dev,
3116 struct device_attribute *attr,
aef9ec39
RD
3117 const char *buf, size_t count)
3118{
3119 struct srp_host *host =
ee959b00 3120 container_of(dev, struct srp_host, dev);
aef9ec39
RD
3121 struct Scsi_Host *target_host;
3122 struct srp_target_port *target;
509c07bc 3123 struct srp_rdma_ch *ch;
d1b4289e
BVA
3124 struct srp_device *srp_dev = host->srp_dev;
3125 struct ib_device *ibdev = srp_dev->dev;
d92c0da7
BVA
3126 int ret, node_idx, node, cpu, i;
3127 bool multich = false;
aef9ec39
RD
3128
3129 target_host = scsi_host_alloc(&srp_template,
3130 sizeof (struct srp_target_port));
3131 if (!target_host)
3132 return -ENOMEM;
3133
49248644 3134 target_host->transportt = ib_srp_transport_template;
fd1b6c4a
BVA
3135 target_host->max_channel = 0;
3136 target_host->max_id = 1;
3c8edf0e
AR
3137 target_host->max_lun = SRP_MAX_LUN;
3138 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
5f068992 3139
aef9ec39 3140 target = host_to_target(target_host);
aef9ec39 3141
49248644
DD
3142 target->io_class = SRP_REV16A_IB_IO_CLASS;
3143 target->scsi_host = target_host;
3144 target->srp_host = host;
3145 target->lkey = host->srp_dev->mr->lkey;
3146 target->rkey = host->srp_dev->mr->rkey;
3147 target->cmd_sg_cnt = cmd_sg_entries;
c07d424d
DD
3148 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3149 target->allow_ext_sg = allow_ext_sg;
7bb312e4 3150 target->tl_retry_count = 7;
4d73f95f 3151 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
aef9ec39 3152
34aa654e
BVA
3153 /*
3154 * Avoid that the SCSI host can be removed by srp_remove_target()
3155 * before this function returns.
3156 */
3157 scsi_host_get(target->scsi_host);
3158
2d7091bc
BVA
3159 mutex_lock(&host->add_target_mutex);
3160
aef9ec39
RD
3161 ret = srp_parse_options(buf, target);
3162 if (ret)
fb49c8bb 3163 goto out;
aef9ec39 3164
77f2c1a4
BVA
3165 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3166 if (ret)
fb49c8bb 3167 goto out;
77f2c1a4 3168
4d73f95f
BVA
3169 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3170
96fc248a
BVA
3171 if (!srp_conn_unique(target->srp_host, target)) {
3172 shost_printk(KERN_INFO, target->scsi_host,
3173 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3174 be64_to_cpu(target->id_ext),
3175 be64_to_cpu(target->ioc_guid),
3176 be64_to_cpu(target->initiator_ext));
3177 ret = -EEXIST;
fb49c8bb 3178 goto out;
96fc248a
BVA
3179 }
3180
5cfb1782 3181 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
d1b4289e 3182 target->cmd_sg_cnt < target->sg_tablesize) {
5cfb1782 3183 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
c07d424d
DD
3184 target->sg_tablesize = target->cmd_sg_cnt;
3185 }
3186
3187 target_host->sg_tablesize = target->sg_tablesize;
3188 target->indirect_size = target->sg_tablesize *
3189 sizeof (struct srp_direct_buf);
49248644
DD
3190 target->max_iu_len = sizeof (struct srp_cmd) +
3191 sizeof (struct srp_indirect_buf) +
3192 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3193
c1120f89 3194 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
ef6c49d8 3195 INIT_WORK(&target->remove_work, srp_remove_work);
8f26c9ff 3196 spin_lock_init(&target->lock);
747fe000 3197 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
2088ca66 3198 if (ret)
fb49c8bb 3199 goto out;
aef9ec39 3200
d92c0da7
BVA
3201 ret = -ENOMEM;
3202 target->ch_count = max_t(unsigned, num_online_nodes(),
3203 min(ch_count ? :
3204 min(4 * num_online_nodes(),
3205 ibdev->num_comp_vectors),
3206 num_online_cpus()));
3207 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3208 GFP_KERNEL);
3209 if (!target->ch)
fb49c8bb 3210 goto out;
aef9ec39 3211
d92c0da7
BVA
3212 node_idx = 0;
3213 for_each_online_node(node) {
3214 const int ch_start = (node_idx * target->ch_count /
3215 num_online_nodes());
3216 const int ch_end = ((node_idx + 1) * target->ch_count /
3217 num_online_nodes());
3218 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3219 num_online_nodes() + target->comp_vector)
3220 % ibdev->num_comp_vectors;
3221 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3222 num_online_nodes() + target->comp_vector)
3223 % ibdev->num_comp_vectors;
3224 int cpu_idx = 0;
3225
3226 for_each_online_cpu(cpu) {
3227 if (cpu_to_node(cpu) != node)
3228 continue;
3229 if (ch_start + cpu_idx >= ch_end)
3230 continue;
3231 ch = &target->ch[ch_start + cpu_idx];
3232 ch->target = target;
3233 ch->comp_vector = cv_start == cv_end ? cv_start :
3234 cv_start + cpu_idx % (cv_end - cv_start);
3235 spin_lock_init(&ch->lock);
3236 INIT_LIST_HEAD(&ch->free_tx);
3237 ret = srp_new_cm_id(ch);
3238 if (ret)
3239 goto err_disconnect;
aef9ec39 3240
d92c0da7
BVA
3241 ret = srp_create_ch_ib(ch);
3242 if (ret)
3243 goto err_disconnect;
3244
3245 ret = srp_alloc_req_data(ch);
3246 if (ret)
3247 goto err_disconnect;
3248
3249 ret = srp_connect_ch(ch, multich);
3250 if (ret) {
3251 shost_printk(KERN_ERR, target->scsi_host,
3252 PFX "Connection %d/%d failed\n",
3253 ch_start + cpu_idx,
3254 target->ch_count);
3255 if (node_idx == 0 && cpu_idx == 0) {
3256 goto err_disconnect;
3257 } else {
3258 srp_free_ch_ib(target, ch);
3259 srp_free_req_data(target, ch);
3260 target->ch_count = ch - target->ch;
3261 break;
3262 }
3263 }
3264
3265 multich = true;
3266 cpu_idx++;
3267 }
3268 node_idx++;
aef9ec39
RD
3269 }
3270
d92c0da7
BVA
3271 target->scsi_host->nr_hw_queues = target->ch_count;
3272
aef9ec39
RD
3273 ret = srp_add_target(host, target);
3274 if (ret)
3275 goto err_disconnect;
3276
34aa654e
BVA
3277 if (target->state != SRP_TARGET_REMOVED) {
3278 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3279 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3280 be64_to_cpu(target->id_ext),
3281 be64_to_cpu(target->ioc_guid),
747fe000 3282 be16_to_cpu(target->pkey),
34aa654e 3283 be64_to_cpu(target->service_id),
747fe000 3284 target->sgid.raw, target->orig_dgid.raw);
34aa654e 3285 }
e7ffde01 3286
2d7091bc
BVA
3287 ret = count;
3288
3289out:
3290 mutex_unlock(&host->add_target_mutex);
34aa654e
BVA
3291
3292 scsi_host_put(target->scsi_host);
3293
2d7091bc 3294 return ret;
aef9ec39
RD
3295
3296err_disconnect:
3297 srp_disconnect_target(target);
3298
d92c0da7
BVA
3299 for (i = 0; i < target->ch_count; i++) {
3300 ch = &target->ch[i];
3301 srp_free_ch_ib(target, ch);
3302 srp_free_req_data(target, ch);
3303 }
aef9ec39 3304
d92c0da7 3305 kfree(target->ch);
2d7091bc 3306 goto out;
aef9ec39
RD
3307}
3308
ee959b00 3309static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
aef9ec39 3310
ee959b00
TJ
3311static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3312 char *buf)
aef9ec39 3313{
ee959b00 3314 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39 3315
05321937 3316 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
aef9ec39
RD
3317}
3318
ee959b00 3319static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
aef9ec39 3320
ee959b00
TJ
3321static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3322 char *buf)
aef9ec39 3323{
ee959b00 3324 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39
RD
3325
3326 return sprintf(buf, "%d\n", host->port);
3327}
3328
ee959b00 3329static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
aef9ec39 3330
f5358a17 3331static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
aef9ec39
RD
3332{
3333 struct srp_host *host;
3334
3335 host = kzalloc(sizeof *host, GFP_KERNEL);
3336 if (!host)
3337 return NULL;
3338
3339 INIT_LIST_HEAD(&host->target_list);
b3589fd4 3340 spin_lock_init(&host->target_lock);
aef9ec39 3341 init_completion(&host->released);
2d7091bc 3342 mutex_init(&host->add_target_mutex);
05321937 3343 host->srp_dev = device;
aef9ec39
RD
3344 host->port = port;
3345
ee959b00
TJ
3346 host->dev.class = &srp_class;
3347 host->dev.parent = device->dev->dma_device;
d927e38c 3348 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
aef9ec39 3349
ee959b00 3350 if (device_register(&host->dev))
f5358a17 3351 goto free_host;
ee959b00 3352 if (device_create_file(&host->dev, &dev_attr_add_target))
aef9ec39 3353 goto err_class;
ee959b00 3354 if (device_create_file(&host->dev, &dev_attr_ibdev))
aef9ec39 3355 goto err_class;
ee959b00 3356 if (device_create_file(&host->dev, &dev_attr_port))
aef9ec39
RD
3357 goto err_class;
3358
3359 return host;
3360
3361err_class:
ee959b00 3362 device_unregister(&host->dev);
aef9ec39 3363
f5358a17 3364free_host:
aef9ec39
RD
3365 kfree(host);
3366
3367 return NULL;
3368}
3369
3370static void srp_add_one(struct ib_device *device)
3371{
f5358a17
RD
3372 struct srp_device *srp_dev;
3373 struct ib_device_attr *dev_attr;
aef9ec39 3374 struct srp_host *host;
52ede08f
BVA
3375 int mr_page_shift, s, e, p;
3376 u64 max_pages_per_mr;
aef9ec39 3377
f5358a17
RD
3378 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3379 if (!dev_attr)
cf311cd4 3380 return;
aef9ec39 3381
f5358a17 3382 if (ib_query_device(device, dev_attr)) {
e0bda7d8 3383 pr_warn("Query device failed for %s\n", device->name);
f5358a17
RD
3384 goto free_attr;
3385 }
3386
3387 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3388 if (!srp_dev)
3389 goto free_attr;
3390
d1b4289e
BVA
3391 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3392 device->map_phys_fmr && device->unmap_fmr);
5cfb1782
BVA
3393 srp_dev->has_fr = (dev_attr->device_cap_flags &
3394 IB_DEVICE_MEM_MGT_EXTENSIONS);
3395 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3396 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3397
3398 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3399 (!srp_dev->has_fmr || prefer_fr));
d1b4289e 3400
f5358a17
RD
3401 /*
3402 * Use the smallest page size supported by the HCA, down to a
8f26c9ff
DD
3403 * minimum of 4096 bytes. We're unlikely to build large sglists
3404 * out of smaller entries.
f5358a17 3405 */
52ede08f
BVA
3406 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3407 srp_dev->mr_page_size = 1 << mr_page_shift;
3408 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3409 max_pages_per_mr = dev_attr->max_mr_size;
3410 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3411 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3412 max_pages_per_mr);
5cfb1782
BVA
3413 if (srp_dev->use_fast_reg) {
3414 srp_dev->max_pages_per_mr =
3415 min_t(u32, srp_dev->max_pages_per_mr,
3416 dev_attr->max_fast_reg_page_list_len);
3417 }
52ede08f
BVA
3418 srp_dev->mr_max_size = srp_dev->mr_page_size *
3419 srp_dev->max_pages_per_mr;
5cfb1782 3420 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
52ede08f 3421 device->name, mr_page_shift, dev_attr->max_mr_size,
5cfb1782 3422 dev_attr->max_fast_reg_page_list_len,
52ede08f 3423 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
f5358a17
RD
3424
3425 INIT_LIST_HEAD(&srp_dev->dev_list);
3426
3427 srp_dev->dev = device;
3428 srp_dev->pd = ib_alloc_pd(device);
3429 if (IS_ERR(srp_dev->pd))
3430 goto free_dev;
3431
3432 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3433 IB_ACCESS_LOCAL_WRITE |
3434 IB_ACCESS_REMOTE_READ |
3435 IB_ACCESS_REMOTE_WRITE);
3436 if (IS_ERR(srp_dev->mr))
3437 goto err_pd;
3438
07ebafba 3439 if (device->node_type == RDMA_NODE_IB_SWITCH) {
aef9ec39
RD
3440 s = 0;
3441 e = 0;
3442 } else {
3443 s = 1;
3444 e = device->phys_port_cnt;
3445 }
3446
3447 for (p = s; p <= e; ++p) {
f5358a17 3448 host = srp_add_port(srp_dev, p);
aef9ec39 3449 if (host)
f5358a17 3450 list_add_tail(&host->list, &srp_dev->dev_list);
aef9ec39
RD
3451 }
3452
f5358a17
RD
3453 ib_set_client_data(device, &srp_client, srp_dev);
3454
3455 goto free_attr;
3456
3457err_pd:
3458 ib_dealloc_pd(srp_dev->pd);
3459
3460free_dev:
3461 kfree(srp_dev);
3462
3463free_attr:
3464 kfree(dev_attr);
aef9ec39
RD
3465}
3466
3467static void srp_remove_one(struct ib_device *device)
3468{
f5358a17 3469 struct srp_device *srp_dev;
aef9ec39 3470 struct srp_host *host, *tmp_host;
ef6c49d8 3471 struct srp_target_port *target;
aef9ec39 3472
f5358a17 3473 srp_dev = ib_get_client_data(device, &srp_client);
1fe0cb84
DB
3474 if (!srp_dev)
3475 return;
aef9ec39 3476
f5358a17 3477 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
ee959b00 3478 device_unregister(&host->dev);
aef9ec39
RD
3479 /*
3480 * Wait for the sysfs entry to go away, so that no new
3481 * target ports can be created.
3482 */
3483 wait_for_completion(&host->released);
3484
3485 /*
ef6c49d8 3486 * Remove all target ports.
aef9ec39 3487 */
b3589fd4 3488 spin_lock(&host->target_lock);
ef6c49d8
BVA
3489 list_for_each_entry(target, &host->target_list, list)
3490 srp_queue_remove_work(target);
b3589fd4 3491 spin_unlock(&host->target_lock);
aef9ec39
RD
3492
3493 /*
bcc05910 3494 * Wait for tl_err and target port removal tasks.
aef9ec39 3495 */
ef6c49d8 3496 flush_workqueue(system_long_wq);
bcc05910 3497 flush_workqueue(srp_remove_wq);
aef9ec39 3498
aef9ec39
RD
3499 kfree(host);
3500 }
3501
f5358a17
RD
3502 ib_dereg_mr(srp_dev->mr);
3503 ib_dealloc_pd(srp_dev->pd);
3504
3505 kfree(srp_dev);
aef9ec39
RD
3506}
3507
3236822b 3508static struct srp_function_template ib_srp_transport_functions = {
ed9b2264
BVA
3509 .has_rport_state = true,
3510 .reset_timer_if_blocked = true,
a95cadb9 3511 .reconnect_delay = &srp_reconnect_delay,
ed9b2264
BVA
3512 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3513 .dev_loss_tmo = &srp_dev_loss_tmo,
3514 .reconnect = srp_rport_reconnect,
dc1bdbd9 3515 .rport_delete = srp_rport_delete,
ed9b2264 3516 .terminate_rport_io = srp_terminate_io,
3236822b
FT
3517};
3518
aef9ec39
RD
3519static int __init srp_init_module(void)
3520{
3521 int ret;
3522
dcb4cb85 3523 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
dd5e6e38 3524
49248644 3525 if (srp_sg_tablesize) {
e0bda7d8 3526 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
49248644
DD
3527 if (!cmd_sg_entries)
3528 cmd_sg_entries = srp_sg_tablesize;
3529 }
3530
3531 if (!cmd_sg_entries)
3532 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3533
3534 if (cmd_sg_entries > 255) {
e0bda7d8 3535 pr_warn("Clamping cmd_sg_entries to 255\n");
49248644 3536 cmd_sg_entries = 255;
1e89a194
DD
3537 }
3538
c07d424d
DD
3539 if (!indirect_sg_entries)
3540 indirect_sg_entries = cmd_sg_entries;
3541 else if (indirect_sg_entries < cmd_sg_entries) {
e0bda7d8
BVA
3542 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3543 cmd_sg_entries);
c07d424d
DD
3544 indirect_sg_entries = cmd_sg_entries;
3545 }
3546
bcc05910 3547 srp_remove_wq = create_workqueue("srp_remove");
da05be29
WY
3548 if (!srp_remove_wq) {
3549 ret = -ENOMEM;
bcc05910
BVA
3550 goto out;
3551 }
3552
3553 ret = -ENOMEM;
3236822b
FT
3554 ib_srp_transport_template =
3555 srp_attach_transport(&ib_srp_transport_functions);
3556 if (!ib_srp_transport_template)
bcc05910 3557 goto destroy_wq;
3236822b 3558
aef9ec39
RD
3559 ret = class_register(&srp_class);
3560 if (ret) {
e0bda7d8 3561 pr_err("couldn't register class infiniband_srp\n");
bcc05910 3562 goto release_tr;
aef9ec39
RD
3563 }
3564
c1a0b23b
MT
3565 ib_sa_register_client(&srp_sa_client);
3566
aef9ec39
RD
3567 ret = ib_register_client(&srp_client);
3568 if (ret) {
e0bda7d8 3569 pr_err("couldn't register IB client\n");
bcc05910 3570 goto unreg_sa;
aef9ec39
RD
3571 }
3572
bcc05910
BVA
3573out:
3574 return ret;
3575
3576unreg_sa:
3577 ib_sa_unregister_client(&srp_sa_client);
3578 class_unregister(&srp_class);
3579
3580release_tr:
3581 srp_release_transport(ib_srp_transport_template);
3582
3583destroy_wq:
3584 destroy_workqueue(srp_remove_wq);
3585 goto out;
aef9ec39
RD
3586}
3587
3588static void __exit srp_cleanup_module(void)
3589{
3590 ib_unregister_client(&srp_client);
c1a0b23b 3591 ib_sa_unregister_client(&srp_sa_client);
aef9ec39 3592 class_unregister(&srp_class);
3236822b 3593 srp_release_transport(ib_srp_transport_template);
bcc05910 3594 destroy_workqueue(srp_remove_wq);
aef9ec39
RD
3595}
3596
3597module_init(srp_init_module);
3598module_exit(srp_cleanup_module);