]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/infiniband/ulp/srp/ib_srp.c
IB/srp: Remove srp_finish_mapping
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / ulp / srp / ib_srp.c
CommitLineData
aef9ec39
RD
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
aef9ec39
RD
31 */
32
d236cd0e 33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
e0bda7d8 34
aef9ec39
RD
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
de25968c 42#include <linux/jiffies.h>
56b5390c 43#include <rdma/ib_cache.h>
aef9ec39 44
60063497 45#include <linux/atomic.h>
aef9ec39
RD
46
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
71444b97 50#include <scsi/scsi_tcq.h>
aef9ec39 51#include <scsi/srp.h>
3236822b 52#include <scsi/scsi_transport_srp.h>
aef9ec39 53
aef9ec39
RD
54#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
713ef24e
BVA
58#define DRV_VERSION "2.0"
59#define DRV_RELDATE "July 26, 2015"
aef9ec39
RD
60
61MODULE_AUTHOR("Roland Dreier");
33ab3e5b 62MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
aef9ec39 63MODULE_LICENSE("Dual BSD/GPL");
33ab3e5b
BVA
64MODULE_VERSION(DRV_VERSION);
65MODULE_INFO(release_date, DRV_RELDATE);
aef9ec39 66
49248644
DD
67static unsigned int srp_sg_tablesize;
68static unsigned int cmd_sg_entries;
c07d424d
DD
69static unsigned int indirect_sg_entries;
70static bool allow_ext_sg;
03f6fb93
BVA
71static bool prefer_fr = true;
72static bool register_always = true;
49248644 73static int topspin_workarounds = 1;
74b0a15b 74
49248644
DD
75module_param(srp_sg_tablesize, uint, 0444);
76MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
74b0a15b 77
49248644
DD
78module_param(cmd_sg_entries, uint, 0444);
79MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
aef9ec39 81
c07d424d
DD
82module_param(indirect_sg_entries, uint, 0444);
83MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85
86module_param(allow_ext_sg, bool, 0444);
87MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89
aef9ec39
RD
90module_param(topspin_workarounds, int, 0444);
91MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93
5cfb1782
BVA
94module_param(prefer_fr, bool, 0444);
95MODULE_PARM_DESC(prefer_fr,
96"Whether to use fast registration if both FMR and fast registration are supported");
97
b1b8854d
BVA
98module_param(register_always, bool, 0444);
99MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
101
9c27847d 102static const struct kernel_param_ops srp_tmo_ops;
ed9b2264 103
a95cadb9
BVA
104static int srp_reconnect_delay = 10;
105module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106 S_IRUGO | S_IWUSR);
107MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108
ed9b2264
BVA
109static int srp_fast_io_fail_tmo = 15;
110module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111 S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
116
a95cadb9 117static int srp_dev_loss_tmo = 600;
ed9b2264
BVA
118module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119 S_IRUGO | S_IWUSR);
120MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
127
d92c0da7
BVA
128static unsigned ch_count;
129module_param(ch_count, uint, 0444);
130MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132
aef9ec39 133static void srp_add_one(struct ib_device *device);
7c1eb45a 134static void srp_remove_one(struct ib_device *device, void *client_data);
509c07bc
BVA
135static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
136static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
aef9ec39
RD
137static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
138
3236822b 139static struct scsi_transport_template *ib_srp_transport_template;
bcc05910 140static struct workqueue_struct *srp_remove_wq;
3236822b 141
aef9ec39
RD
142static struct ib_client srp_client = {
143 .name = "srp",
144 .add = srp_add_one,
145 .remove = srp_remove_one
146};
147
c1a0b23b
MT
148static struct ib_sa_client srp_sa_client;
149
ed9b2264
BVA
150static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
151{
152 int tmo = *(int *)kp->arg;
153
154 if (tmo >= 0)
155 return sprintf(buffer, "%d", tmo);
156 else
157 return sprintf(buffer, "off");
158}
159
160static int srp_tmo_set(const char *val, const struct kernel_param *kp)
161{
162 int tmo, res;
163
3fdf70ac
SG
164 res = srp_parse_tmo(&tmo, val);
165 if (res)
166 goto out;
167
a95cadb9
BVA
168 if (kp->arg == &srp_reconnect_delay)
169 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
170 srp_dev_loss_tmo);
171 else if (kp->arg == &srp_fast_io_fail_tmo)
172 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
ed9b2264 173 else
a95cadb9
BVA
174 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
175 tmo);
ed9b2264
BVA
176 if (res)
177 goto out;
178 *(int *)kp->arg = tmo;
179
180out:
181 return res;
182}
183
9c27847d 184static const struct kernel_param_ops srp_tmo_ops = {
ed9b2264
BVA
185 .get = srp_tmo_get,
186 .set = srp_tmo_set,
187};
188
aef9ec39
RD
189static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
190{
191 return (struct srp_target_port *) host->hostdata;
192}
193
194static const char *srp_target_info(struct Scsi_Host *host)
195{
196 return host_to_target(host)->target_name;
197}
198
5d7cbfd6
RD
199static int srp_target_is_topspin(struct srp_target_port *target)
200{
201 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
3d1ff48d 202 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
5d7cbfd6
RD
203
204 return topspin_workarounds &&
3d1ff48d
RK
205 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
206 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
5d7cbfd6
RD
207}
208
aef9ec39
RD
209static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
210 gfp_t gfp_mask,
211 enum dma_data_direction direction)
212{
213 struct srp_iu *iu;
214
215 iu = kmalloc(sizeof *iu, gfp_mask);
216 if (!iu)
217 goto out;
218
219 iu->buf = kzalloc(size, gfp_mask);
220 if (!iu->buf)
221 goto out_free_iu;
222
05321937
GKH
223 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
224 direction);
225 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
aef9ec39
RD
226 goto out_free_buf;
227
228 iu->size = size;
229 iu->direction = direction;
230
231 return iu;
232
233out_free_buf:
234 kfree(iu->buf);
235out_free_iu:
236 kfree(iu);
237out:
238 return NULL;
239}
240
241static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
242{
243 if (!iu)
244 return;
245
05321937
GKH
246 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
247 iu->direction);
aef9ec39
RD
248 kfree(iu->buf);
249 kfree(iu);
250}
251
252static void srp_qp_event(struct ib_event *event, void *context)
253{
57363d98
SG
254 pr_debug("QP event %s (%d)\n",
255 ib_event_msg(event->event), event->event);
aef9ec39
RD
256}
257
258static int srp_init_qp(struct srp_target_port *target,
259 struct ib_qp *qp)
260{
261 struct ib_qp_attr *attr;
262 int ret;
263
264 attr = kmalloc(sizeof *attr, GFP_KERNEL);
265 if (!attr)
266 return -ENOMEM;
267
56b5390c
BVA
268 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
269 target->srp_host->port,
270 be16_to_cpu(target->pkey),
271 &attr->pkey_index);
aef9ec39
RD
272 if (ret)
273 goto out;
274
275 attr->qp_state = IB_QPS_INIT;
276 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
277 IB_ACCESS_REMOTE_WRITE);
278 attr->port_num = target->srp_host->port;
279
280 ret = ib_modify_qp(qp, attr,
281 IB_QP_STATE |
282 IB_QP_PKEY_INDEX |
283 IB_QP_ACCESS_FLAGS |
284 IB_QP_PORT);
285
286out:
287 kfree(attr);
288 return ret;
289}
290
509c07bc 291static int srp_new_cm_id(struct srp_rdma_ch *ch)
9fe4bcf4 292{
509c07bc 293 struct srp_target_port *target = ch->target;
9fe4bcf4
DD
294 struct ib_cm_id *new_cm_id;
295
05321937 296 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
509c07bc 297 srp_cm_handler, ch);
9fe4bcf4
DD
298 if (IS_ERR(new_cm_id))
299 return PTR_ERR(new_cm_id);
300
509c07bc
BVA
301 if (ch->cm_id)
302 ib_destroy_cm_id(ch->cm_id);
303 ch->cm_id = new_cm_id;
304 ch->path.sgid = target->sgid;
305 ch->path.dgid = target->orig_dgid;
306 ch->path.pkey = target->pkey;
307 ch->path.service_id = target->service_id;
9fe4bcf4
DD
308
309 return 0;
310}
311
d1b4289e
BVA
312static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
313{
314 struct srp_device *dev = target->srp_host->srp_dev;
315 struct ib_fmr_pool_param fmr_param;
316
317 memset(&fmr_param, 0, sizeof(fmr_param));
318 fmr_param.pool_size = target->scsi_host->can_queue;
319 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
320 fmr_param.cache = 1;
52ede08f
BVA
321 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
322 fmr_param.page_shift = ilog2(dev->mr_page_size);
d1b4289e
BVA
323 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
324 IB_ACCESS_REMOTE_WRITE |
325 IB_ACCESS_REMOTE_READ);
326
327 return ib_create_fmr_pool(dev->pd, &fmr_param);
328}
329
5cfb1782
BVA
330/**
331 * srp_destroy_fr_pool() - free the resources owned by a pool
332 * @pool: Fast registration pool to be destroyed.
333 */
334static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
335{
336 int i;
337 struct srp_fr_desc *d;
338
339 if (!pool)
340 return;
341
342 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
5cfb1782
BVA
343 if (d->mr)
344 ib_dereg_mr(d->mr);
345 }
346 kfree(pool);
347}
348
349/**
350 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
351 * @device: IB device to allocate fast registration descriptors for.
352 * @pd: Protection domain associated with the FR descriptors.
353 * @pool_size: Number of descriptors to allocate.
354 * @max_page_list_len: Maximum fast registration work request page list length.
355 */
356static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
357 struct ib_pd *pd, int pool_size,
358 int max_page_list_len)
359{
360 struct srp_fr_pool *pool;
361 struct srp_fr_desc *d;
362 struct ib_mr *mr;
5cfb1782
BVA
363 int i, ret = -EINVAL;
364
365 if (pool_size <= 0)
366 goto err;
367 ret = -ENOMEM;
368 pool = kzalloc(sizeof(struct srp_fr_pool) +
369 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
370 if (!pool)
371 goto err;
372 pool->size = pool_size;
373 pool->max_page_list_len = max_page_list_len;
374 spin_lock_init(&pool->lock);
375 INIT_LIST_HEAD(&pool->free_list);
376
377 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
563b67c5
SG
378 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
379 max_page_list_len);
5cfb1782
BVA
380 if (IS_ERR(mr)) {
381 ret = PTR_ERR(mr);
382 goto destroy_pool;
383 }
384 d->mr = mr;
5cfb1782
BVA
385 list_add_tail(&d->entry, &pool->free_list);
386 }
387
388out:
389 return pool;
390
391destroy_pool:
392 srp_destroy_fr_pool(pool);
393
394err:
395 pool = ERR_PTR(ret);
396 goto out;
397}
398
399/**
400 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
401 * @pool: Pool to obtain descriptor from.
402 */
403static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
404{
405 struct srp_fr_desc *d = NULL;
406 unsigned long flags;
407
408 spin_lock_irqsave(&pool->lock, flags);
409 if (!list_empty(&pool->free_list)) {
410 d = list_first_entry(&pool->free_list, typeof(*d), entry);
411 list_del(&d->entry);
412 }
413 spin_unlock_irqrestore(&pool->lock, flags);
414
415 return d;
416}
417
418/**
419 * srp_fr_pool_put() - put an FR descriptor back in the free list
420 * @pool: Pool the descriptor was allocated from.
421 * @desc: Pointer to an array of fast registration descriptor pointers.
422 * @n: Number of descriptors to put back.
423 *
424 * Note: The caller must already have queued an invalidation request for
425 * desc->mr->rkey before calling this function.
426 */
427static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
428 int n)
429{
430 unsigned long flags;
431 int i;
432
433 spin_lock_irqsave(&pool->lock, flags);
434 for (i = 0; i < n; i++)
435 list_add(&desc[i]->entry, &pool->free_list);
436 spin_unlock_irqrestore(&pool->lock, flags);
437}
438
439static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
440{
441 struct srp_device *dev = target->srp_host->srp_dev;
442
443 return srp_create_fr_pool(dev->dev, dev->pd,
444 target->scsi_host->can_queue,
445 dev->max_pages_per_mr);
446}
447
7dad6b2e
BVA
448/**
449 * srp_destroy_qp() - destroy an RDMA queue pair
450 * @ch: SRP RDMA channel.
451 *
452 * Change a queue pair into the error state and wait until all receive
453 * completions have been processed before destroying it. This avoids that
454 * the receive completion handler can access the queue pair while it is
455 * being destroyed.
456 */
457static void srp_destroy_qp(struct srp_rdma_ch *ch)
458{
7dad6b2e
BVA
459 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
460 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
461 struct ib_recv_wr *bad_wr;
462 int ret;
463
464 /* Destroying a QP and reusing ch->done is only safe if not connected */
c014c8cd 465 WARN_ON_ONCE(ch->connected);
7dad6b2e
BVA
466
467 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
468 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
469 if (ret)
470 goto out;
471
472 init_completion(&ch->done);
473 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
474 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
475 if (ret == 0)
476 wait_for_completion(&ch->done);
477
478out:
479 ib_destroy_qp(ch->qp);
480}
481
509c07bc 482static int srp_create_ch_ib(struct srp_rdma_ch *ch)
aef9ec39 483{
509c07bc 484 struct srp_target_port *target = ch->target;
62154b2e 485 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39 486 struct ib_qp_init_attr *init_attr;
73aa89ed
IR
487 struct ib_cq *recv_cq, *send_cq;
488 struct ib_qp *qp;
d1b4289e 489 struct ib_fmr_pool *fmr_pool = NULL;
5cfb1782
BVA
490 struct srp_fr_pool *fr_pool = NULL;
491 const int m = 1 + dev->use_fast_reg;
8e37210b 492 struct ib_cq_init_attr cq_attr = {};
aef9ec39
RD
493 int ret;
494
495 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
496 if (!init_attr)
497 return -ENOMEM;
498
7dad6b2e 499 /* + 1 for SRP_LAST_WR_ID */
8e37210b
MB
500 cq_attr.cqe = target->queue_size + 1;
501 cq_attr.comp_vector = ch->comp_vector;
509c07bc 502 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
8e37210b 503 &cq_attr);
73aa89ed
IR
504 if (IS_ERR(recv_cq)) {
505 ret = PTR_ERR(recv_cq);
da9d2f07 506 goto err;
aef9ec39
RD
507 }
508
8e37210b
MB
509 cq_attr.cqe = m * target->queue_size;
510 cq_attr.comp_vector = ch->comp_vector;
509c07bc 511 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
8e37210b 512 &cq_attr);
73aa89ed
IR
513 if (IS_ERR(send_cq)) {
514 ret = PTR_ERR(send_cq);
da9d2f07 515 goto err_recv_cq;
9c03dc9f
BVA
516 }
517
73aa89ed 518 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
aef9ec39
RD
519
520 init_attr->event_handler = srp_qp_event;
5cfb1782 521 init_attr->cap.max_send_wr = m * target->queue_size;
7dad6b2e 522 init_attr->cap.max_recv_wr = target->queue_size + 1;
aef9ec39
RD
523 init_attr->cap.max_recv_sge = 1;
524 init_attr->cap.max_send_sge = 1;
5cfb1782 525 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
aef9ec39 526 init_attr->qp_type = IB_QPT_RC;
73aa89ed
IR
527 init_attr->send_cq = send_cq;
528 init_attr->recv_cq = recv_cq;
aef9ec39 529
62154b2e 530 qp = ib_create_qp(dev->pd, init_attr);
73aa89ed
IR
531 if (IS_ERR(qp)) {
532 ret = PTR_ERR(qp);
da9d2f07 533 goto err_send_cq;
aef9ec39
RD
534 }
535
73aa89ed 536 ret = srp_init_qp(target, qp);
da9d2f07
RD
537 if (ret)
538 goto err_qp;
aef9ec39 539
002f1567 540 if (dev->use_fast_reg) {
5cfb1782
BVA
541 fr_pool = srp_alloc_fr_pool(target);
542 if (IS_ERR(fr_pool)) {
543 ret = PTR_ERR(fr_pool);
544 shost_printk(KERN_WARNING, target->scsi_host, PFX
545 "FR pool allocation failed (%d)\n", ret);
546 goto err_qp;
547 }
002f1567 548 } else if (dev->use_fmr) {
d1b4289e
BVA
549 fmr_pool = srp_alloc_fmr_pool(target);
550 if (IS_ERR(fmr_pool)) {
551 ret = PTR_ERR(fmr_pool);
552 shost_printk(KERN_WARNING, target->scsi_host, PFX
553 "FMR pool allocation failed (%d)\n", ret);
554 goto err_qp;
555 }
d1b4289e
BVA
556 }
557
509c07bc 558 if (ch->qp)
7dad6b2e 559 srp_destroy_qp(ch);
509c07bc
BVA
560 if (ch->recv_cq)
561 ib_destroy_cq(ch->recv_cq);
562 if (ch->send_cq)
563 ib_destroy_cq(ch->send_cq);
73aa89ed 564
509c07bc
BVA
565 ch->qp = qp;
566 ch->recv_cq = recv_cq;
567 ch->send_cq = send_cq;
73aa89ed 568
7fbc67df
SG
569 if (dev->use_fast_reg) {
570 if (ch->fr_pool)
571 srp_destroy_fr_pool(ch->fr_pool);
572 ch->fr_pool = fr_pool;
573 } else if (dev->use_fmr) {
574 if (ch->fmr_pool)
575 ib_destroy_fmr_pool(ch->fmr_pool);
576 ch->fmr_pool = fmr_pool;
577 }
578
da9d2f07
RD
579 kfree(init_attr);
580 return 0;
581
582err_qp:
73aa89ed 583 ib_destroy_qp(qp);
da9d2f07
RD
584
585err_send_cq:
73aa89ed 586 ib_destroy_cq(send_cq);
da9d2f07
RD
587
588err_recv_cq:
73aa89ed 589 ib_destroy_cq(recv_cq);
da9d2f07
RD
590
591err:
aef9ec39
RD
592 kfree(init_attr);
593 return ret;
594}
595
4d73f95f
BVA
596/*
597 * Note: this function may be called without srp_alloc_iu_bufs() having been
509c07bc 598 * invoked. Hence the ch->[rt]x_ring checks.
4d73f95f 599 */
509c07bc
BVA
600static void srp_free_ch_ib(struct srp_target_port *target,
601 struct srp_rdma_ch *ch)
aef9ec39 602{
5cfb1782 603 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39
RD
604 int i;
605
d92c0da7
BVA
606 if (!ch->target)
607 return;
608
509c07bc
BVA
609 if (ch->cm_id) {
610 ib_destroy_cm_id(ch->cm_id);
611 ch->cm_id = NULL;
394c595e
BVA
612 }
613
d92c0da7
BVA
614 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
615 if (!ch->qp)
616 return;
617
5cfb1782 618 if (dev->use_fast_reg) {
509c07bc
BVA
619 if (ch->fr_pool)
620 srp_destroy_fr_pool(ch->fr_pool);
002f1567 621 } else if (dev->use_fmr) {
509c07bc
BVA
622 if (ch->fmr_pool)
623 ib_destroy_fmr_pool(ch->fmr_pool);
5cfb1782 624 }
7dad6b2e 625 srp_destroy_qp(ch);
509c07bc
BVA
626 ib_destroy_cq(ch->send_cq);
627 ib_destroy_cq(ch->recv_cq);
aef9ec39 628
d92c0da7
BVA
629 /*
630 * Avoid that the SCSI error handler tries to use this channel after
631 * it has been freed. The SCSI error handler can namely continue
632 * trying to perform recovery actions after scsi_remove_host()
633 * returned.
634 */
635 ch->target = NULL;
636
509c07bc
BVA
637 ch->qp = NULL;
638 ch->send_cq = ch->recv_cq = NULL;
73aa89ed 639
509c07bc 640 if (ch->rx_ring) {
4d73f95f 641 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
642 srp_free_iu(target->srp_host, ch->rx_ring[i]);
643 kfree(ch->rx_ring);
644 ch->rx_ring = NULL;
4d73f95f 645 }
509c07bc 646 if (ch->tx_ring) {
4d73f95f 647 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
648 srp_free_iu(target->srp_host, ch->tx_ring[i]);
649 kfree(ch->tx_ring);
650 ch->tx_ring = NULL;
4d73f95f 651 }
aef9ec39
RD
652}
653
654static void srp_path_rec_completion(int status,
655 struct ib_sa_path_rec *pathrec,
509c07bc 656 void *ch_ptr)
aef9ec39 657{
509c07bc
BVA
658 struct srp_rdma_ch *ch = ch_ptr;
659 struct srp_target_port *target = ch->target;
aef9ec39 660
509c07bc 661 ch->status = status;
aef9ec39 662 if (status)
7aa54bd7
DD
663 shost_printk(KERN_ERR, target->scsi_host,
664 PFX "Got failed path rec status %d\n", status);
aef9ec39 665 else
509c07bc
BVA
666 ch->path = *pathrec;
667 complete(&ch->done);
aef9ec39
RD
668}
669
509c07bc 670static int srp_lookup_path(struct srp_rdma_ch *ch)
aef9ec39 671{
509c07bc 672 struct srp_target_port *target = ch->target;
a702adce
BVA
673 int ret;
674
509c07bc
BVA
675 ch->path.numb_path = 1;
676
677 init_completion(&ch->done);
678
679 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
680 target->srp_host->srp_dev->dev,
681 target->srp_host->port,
682 &ch->path,
683 IB_SA_PATH_REC_SERVICE_ID |
684 IB_SA_PATH_REC_DGID |
685 IB_SA_PATH_REC_SGID |
686 IB_SA_PATH_REC_NUMB_PATH |
687 IB_SA_PATH_REC_PKEY,
688 SRP_PATH_REC_TIMEOUT_MS,
689 GFP_KERNEL,
690 srp_path_rec_completion,
691 ch, &ch->path_query);
692 if (ch->path_query_id < 0)
693 return ch->path_query_id;
694
695 ret = wait_for_completion_interruptible(&ch->done);
a702adce
BVA
696 if (ret < 0)
697 return ret;
aef9ec39 698
509c07bc 699 if (ch->status < 0)
7aa54bd7
DD
700 shost_printk(KERN_WARNING, target->scsi_host,
701 PFX "Path record query failed\n");
aef9ec39 702
509c07bc 703 return ch->status;
aef9ec39
RD
704}
705
d92c0da7 706static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
aef9ec39 707{
509c07bc 708 struct srp_target_port *target = ch->target;
aef9ec39
RD
709 struct {
710 struct ib_cm_req_param param;
711 struct srp_login_req priv;
712 } *req = NULL;
713 int status;
714
715 req = kzalloc(sizeof *req, GFP_KERNEL);
716 if (!req)
717 return -ENOMEM;
718
509c07bc 719 req->param.primary_path = &ch->path;
aef9ec39
RD
720 req->param.alternate_path = NULL;
721 req->param.service_id = target->service_id;
509c07bc
BVA
722 req->param.qp_num = ch->qp->qp_num;
723 req->param.qp_type = ch->qp->qp_type;
aef9ec39
RD
724 req->param.private_data = &req->priv;
725 req->param.private_data_len = sizeof req->priv;
726 req->param.flow_control = 1;
727
728 get_random_bytes(&req->param.starting_psn, 4);
729 req->param.starting_psn &= 0xffffff;
730
731 /*
732 * Pick some arbitrary defaults here; we could make these
733 * module parameters if anyone cared about setting them.
734 */
735 req->param.responder_resources = 4;
736 req->param.remote_cm_response_timeout = 20;
737 req->param.local_cm_response_timeout = 20;
7bb312e4 738 req->param.retry_count = target->tl_retry_count;
aef9ec39
RD
739 req->param.rnr_retry_count = 7;
740 req->param.max_cm_retries = 15;
741
742 req->priv.opcode = SRP_LOGIN_REQ;
743 req->priv.tag = 0;
49248644 744 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
aef9ec39
RD
745 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
746 SRP_BUF_FORMAT_INDIRECT);
d92c0da7
BVA
747 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
748 SRP_MULTICHAN_SINGLE);
0c0450db 749 /*
3cd96564 750 * In the published SRP specification (draft rev. 16a), the
0c0450db
R
751 * port identifier format is 8 bytes of ID extension followed
752 * by 8 bytes of GUID. Older drafts put the two halves in the
753 * opposite order, so that the GUID comes first.
754 *
755 * Targets conforming to these obsolete drafts can be
756 * recognized by the I/O Class they report.
757 */
758 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
759 memcpy(req->priv.initiator_port_id,
747fe000 760 &target->sgid.global.interface_id, 8);
0c0450db 761 memcpy(req->priv.initiator_port_id + 8,
01cb9bcb 762 &target->initiator_ext, 8);
0c0450db
R
763 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
764 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
765 } else {
766 memcpy(req->priv.initiator_port_id,
01cb9bcb
IR
767 &target->initiator_ext, 8);
768 memcpy(req->priv.initiator_port_id + 8,
747fe000 769 &target->sgid.global.interface_id, 8);
0c0450db
R
770 memcpy(req->priv.target_port_id, &target->id_ext, 8);
771 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
772 }
773
aef9ec39
RD
774 /*
775 * Topspin/Cisco SRP targets will reject our login unless we
01cb9bcb
IR
776 * zero out the first 8 bytes of our initiator port ID and set
777 * the second 8 bytes to the local node GUID.
aef9ec39 778 */
5d7cbfd6 779 if (srp_target_is_topspin(target)) {
7aa54bd7
DD
780 shost_printk(KERN_DEBUG, target->scsi_host,
781 PFX "Topspin/Cisco initiator port ID workaround "
782 "activated for target GUID %016llx\n",
45c37cad 783 be64_to_cpu(target->ioc_guid));
aef9ec39 784 memset(req->priv.initiator_port_id, 0, 8);
01cb9bcb 785 memcpy(req->priv.initiator_port_id + 8,
05321937 786 &target->srp_host->srp_dev->dev->node_guid, 8);
aef9ec39 787 }
aef9ec39 788
509c07bc 789 status = ib_send_cm_req(ch->cm_id, &req->param);
aef9ec39
RD
790
791 kfree(req);
792
793 return status;
794}
795
ef6c49d8
BVA
796static bool srp_queue_remove_work(struct srp_target_port *target)
797{
798 bool changed = false;
799
800 spin_lock_irq(&target->lock);
801 if (target->state != SRP_TARGET_REMOVED) {
802 target->state = SRP_TARGET_REMOVED;
803 changed = true;
804 }
805 spin_unlock_irq(&target->lock);
806
807 if (changed)
bcc05910 808 queue_work(srp_remove_wq, &target->remove_work);
ef6c49d8
BVA
809
810 return changed;
811}
812
aef9ec39
RD
813static void srp_disconnect_target(struct srp_target_port *target)
814{
d92c0da7
BVA
815 struct srp_rdma_ch *ch;
816 int i;
509c07bc 817
c014c8cd 818 /* XXX should send SRP_I_LOGOUT request */
aef9ec39 819
c014c8cd
BVA
820 for (i = 0; i < target->ch_count; i++) {
821 ch = &target->ch[i];
822 ch->connected = false;
823 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
824 shost_printk(KERN_DEBUG, target->scsi_host,
825 PFX "Sending CM DREQ failed\n");
294c875a 826 }
e6581056 827 }
aef9ec39
RD
828}
829
509c07bc
BVA
830static void srp_free_req_data(struct srp_target_port *target,
831 struct srp_rdma_ch *ch)
8f26c9ff 832{
5cfb1782
BVA
833 struct srp_device *dev = target->srp_host->srp_dev;
834 struct ib_device *ibdev = dev->dev;
8f26c9ff
DD
835 struct srp_request *req;
836 int i;
837
47513cf4 838 if (!ch->req_ring)
4d73f95f
BVA
839 return;
840
841 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 842 req = &ch->req_ring[i];
5cfb1782
BVA
843 if (dev->use_fast_reg)
844 kfree(req->fr_list);
845 else
846 kfree(req->fmr_list);
8f26c9ff 847 kfree(req->map_page);
c07d424d
DD
848 if (req->indirect_dma_addr) {
849 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
850 target->indirect_size,
851 DMA_TO_DEVICE);
852 }
853 kfree(req->indirect_desc);
8f26c9ff 854 }
4d73f95f 855
509c07bc
BVA
856 kfree(ch->req_ring);
857 ch->req_ring = NULL;
8f26c9ff
DD
858}
859
509c07bc 860static int srp_alloc_req_data(struct srp_rdma_ch *ch)
b81d00bd 861{
509c07bc 862 struct srp_target_port *target = ch->target;
b81d00bd
BVA
863 struct srp_device *srp_dev = target->srp_host->srp_dev;
864 struct ib_device *ibdev = srp_dev->dev;
865 struct srp_request *req;
5cfb1782 866 void *mr_list;
b81d00bd
BVA
867 dma_addr_t dma_addr;
868 int i, ret = -ENOMEM;
869
509c07bc
BVA
870 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
871 GFP_KERNEL);
872 if (!ch->req_ring)
4d73f95f
BVA
873 goto out;
874
875 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 876 req = &ch->req_ring[i];
5cfb1782
BVA
877 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
878 GFP_KERNEL);
879 if (!mr_list)
880 goto out;
881 if (srp_dev->use_fast_reg)
882 req->fr_list = mr_list;
883 else
884 req->fmr_list = mr_list;
52ede08f 885 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
d1b4289e 886 sizeof(void *), GFP_KERNEL);
5cfb1782
BVA
887 if (!req->map_page)
888 goto out;
b81d00bd 889 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
5cfb1782 890 if (!req->indirect_desc)
b81d00bd
BVA
891 goto out;
892
893 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
894 target->indirect_size,
895 DMA_TO_DEVICE);
896 if (ib_dma_mapping_error(ibdev, dma_addr))
897 goto out;
898
899 req->indirect_dma_addr = dma_addr;
b81d00bd
BVA
900 }
901 ret = 0;
902
903out:
904 return ret;
905}
906
683b159a
BVA
907/**
908 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
909 * @shost: SCSI host whose attributes to remove from sysfs.
910 *
911 * Note: Any attributes defined in the host template and that did not exist
912 * before invocation of this function will be ignored.
913 */
914static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
915{
916 struct device_attribute **attr;
917
918 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
919 device_remove_file(&shost->shost_dev, *attr);
920}
921
ee12d6a8
BVA
922static void srp_remove_target(struct srp_target_port *target)
923{
d92c0da7
BVA
924 struct srp_rdma_ch *ch;
925 int i;
509c07bc 926
ef6c49d8
BVA
927 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
928
ee12d6a8 929 srp_del_scsi_host_attr(target->scsi_host);
9dd69a60 930 srp_rport_get(target->rport);
ee12d6a8
BVA
931 srp_remove_host(target->scsi_host);
932 scsi_remove_host(target->scsi_host);
93079162 933 srp_stop_rport_timers(target->rport);
ef6c49d8 934 srp_disconnect_target(target);
d92c0da7
BVA
935 for (i = 0; i < target->ch_count; i++) {
936 ch = &target->ch[i];
937 srp_free_ch_ib(target, ch);
938 }
c1120f89 939 cancel_work_sync(&target->tl_err_work);
9dd69a60 940 srp_rport_put(target->rport);
d92c0da7
BVA
941 for (i = 0; i < target->ch_count; i++) {
942 ch = &target->ch[i];
943 srp_free_req_data(target, ch);
944 }
945 kfree(target->ch);
946 target->ch = NULL;
65d7dd2f
VP
947
948 spin_lock(&target->srp_host->target_lock);
949 list_del(&target->list);
950 spin_unlock(&target->srp_host->target_lock);
951
ee12d6a8
BVA
952 scsi_host_put(target->scsi_host);
953}
954
c4028958 955static void srp_remove_work(struct work_struct *work)
aef9ec39 956{
c4028958 957 struct srp_target_port *target =
ef6c49d8 958 container_of(work, struct srp_target_port, remove_work);
aef9ec39 959
ef6c49d8 960 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
aef9ec39 961
96fc248a 962 srp_remove_target(target);
aef9ec39
RD
963}
964
dc1bdbd9
BVA
965static void srp_rport_delete(struct srp_rport *rport)
966{
967 struct srp_target_port *target = rport->lld_data;
968
969 srp_queue_remove_work(target);
970}
971
c014c8cd
BVA
972/**
973 * srp_connected_ch() - number of connected channels
974 * @target: SRP target port.
975 */
976static int srp_connected_ch(struct srp_target_port *target)
977{
978 int i, c = 0;
979
980 for (i = 0; i < target->ch_count; i++)
981 c += target->ch[i].connected;
982
983 return c;
984}
985
d92c0da7 986static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
aef9ec39 987{
509c07bc 988 struct srp_target_port *target = ch->target;
aef9ec39
RD
989 int ret;
990
c014c8cd 991 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
294c875a 992
509c07bc 993 ret = srp_lookup_path(ch);
aef9ec39
RD
994 if (ret)
995 return ret;
996
997 while (1) {
509c07bc 998 init_completion(&ch->done);
d92c0da7 999 ret = srp_send_req(ch, multich);
aef9ec39
RD
1000 if (ret)
1001 return ret;
509c07bc 1002 ret = wait_for_completion_interruptible(&ch->done);
a702adce
BVA
1003 if (ret < 0)
1004 return ret;
aef9ec39
RD
1005
1006 /*
1007 * The CM event handling code will set status to
1008 * SRP_PORT_REDIRECT if we get a port redirect REJ
1009 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1010 * redirect REJ back.
1011 */
509c07bc 1012 switch (ch->status) {
aef9ec39 1013 case 0:
c014c8cd 1014 ch->connected = true;
aef9ec39
RD
1015 return 0;
1016
1017 case SRP_PORT_REDIRECT:
509c07bc 1018 ret = srp_lookup_path(ch);
aef9ec39
RD
1019 if (ret)
1020 return ret;
1021 break;
1022
1023 case SRP_DLID_REDIRECT:
1024 break;
1025
9fe4bcf4 1026 case SRP_STALE_CONN:
9fe4bcf4 1027 shost_printk(KERN_ERR, target->scsi_host, PFX
205619f2 1028 "giving up on stale connection\n");
509c07bc
BVA
1029 ch->status = -ECONNRESET;
1030 return ch->status;
9fe4bcf4 1031
aef9ec39 1032 default:
509c07bc 1033 return ch->status;
aef9ec39
RD
1034 }
1035 }
1036}
1037
509c07bc 1038static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
5cfb1782
BVA
1039{
1040 struct ib_send_wr *bad_wr;
1041 struct ib_send_wr wr = {
1042 .opcode = IB_WR_LOCAL_INV,
1043 .wr_id = LOCAL_INV_WR_ID_MASK,
1044 .next = NULL,
1045 .num_sge = 0,
1046 .send_flags = 0,
1047 .ex.invalidate_rkey = rkey,
1048 };
1049
509c07bc 1050 return ib_post_send(ch->qp, &wr, &bad_wr);
5cfb1782
BVA
1051}
1052
d945e1df 1053static void srp_unmap_data(struct scsi_cmnd *scmnd,
509c07bc 1054 struct srp_rdma_ch *ch,
d945e1df
RD
1055 struct srp_request *req)
1056{
509c07bc 1057 struct srp_target_port *target = ch->target;
5cfb1782
BVA
1058 struct srp_device *dev = target->srp_host->srp_dev;
1059 struct ib_device *ibdev = dev->dev;
1060 int i, res;
8f26c9ff 1061
bb350d1d 1062 if (!scsi_sglist(scmnd) ||
d945e1df
RD
1063 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1064 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1065 return;
1066
5cfb1782
BVA
1067 if (dev->use_fast_reg) {
1068 struct srp_fr_desc **pfr;
1069
1070 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
509c07bc 1071 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
5cfb1782
BVA
1072 if (res < 0) {
1073 shost_printk(KERN_ERR, target->scsi_host, PFX
1074 "Queueing INV WR for rkey %#x failed (%d)\n",
1075 (*pfr)->mr->rkey, res);
1076 queue_work(system_long_wq,
1077 &target->tl_err_work);
1078 }
1079 }
1080 if (req->nmdesc)
509c07bc 1081 srp_fr_pool_put(ch->fr_pool, req->fr_list,
5cfb1782 1082 req->nmdesc);
002f1567 1083 } else if (dev->use_fmr) {
5cfb1782
BVA
1084 struct ib_pool_fmr **pfmr;
1085
1086 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1087 ib_fmr_pool_unmap(*pfmr);
1088 }
f5358a17 1089
8f26c9ff
DD
1090 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1091 scmnd->sc_data_direction);
d945e1df
RD
1092}
1093
22032991
BVA
1094/**
1095 * srp_claim_req - Take ownership of the scmnd associated with a request.
509c07bc 1096 * @ch: SRP RDMA channel.
22032991 1097 * @req: SRP request.
b3fe628d 1098 * @sdev: If not NULL, only take ownership for this SCSI device.
22032991
BVA
1099 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1100 * ownership of @req->scmnd if it equals @scmnd.
1101 *
1102 * Return value:
1103 * Either NULL or a pointer to the SCSI command the caller became owner of.
1104 */
509c07bc 1105static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
22032991 1106 struct srp_request *req,
b3fe628d 1107 struct scsi_device *sdev,
22032991
BVA
1108 struct scsi_cmnd *scmnd)
1109{
1110 unsigned long flags;
1111
509c07bc 1112 spin_lock_irqsave(&ch->lock, flags);
b3fe628d
BVA
1113 if (req->scmnd &&
1114 (!sdev || req->scmnd->device == sdev) &&
1115 (!scmnd || req->scmnd == scmnd)) {
22032991
BVA
1116 scmnd = req->scmnd;
1117 req->scmnd = NULL;
22032991
BVA
1118 } else {
1119 scmnd = NULL;
1120 }
509c07bc 1121 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1122
1123 return scmnd;
1124}
1125
1126/**
1127 * srp_free_req() - Unmap data and add request to the free request list.
509c07bc 1128 * @ch: SRP RDMA channel.
af24663b
BVA
1129 * @req: Request to be freed.
1130 * @scmnd: SCSI command associated with @req.
1131 * @req_lim_delta: Amount to be added to @target->req_lim.
22032991 1132 */
509c07bc
BVA
1133static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1134 struct scsi_cmnd *scmnd, s32 req_lim_delta)
526b4caa 1135{
94a9174c
BVA
1136 unsigned long flags;
1137
509c07bc 1138 srp_unmap_data(scmnd, ch, req);
22032991 1139
509c07bc
BVA
1140 spin_lock_irqsave(&ch->lock, flags);
1141 ch->req_lim += req_lim_delta;
509c07bc 1142 spin_unlock_irqrestore(&ch->lock, flags);
526b4caa
IR
1143}
1144
509c07bc
BVA
1145static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1146 struct scsi_device *sdev, int result)
526b4caa 1147{
509c07bc 1148 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
22032991
BVA
1149
1150 if (scmnd) {
509c07bc 1151 srp_free_req(ch, req, scmnd, 0);
ed9b2264 1152 scmnd->result = result;
22032991 1153 scmnd->scsi_done(scmnd);
22032991 1154 }
526b4caa
IR
1155}
1156
ed9b2264 1157static void srp_terminate_io(struct srp_rport *rport)
aef9ec39 1158{
ed9b2264 1159 struct srp_target_port *target = rport->lld_data;
d92c0da7 1160 struct srp_rdma_ch *ch;
b3fe628d
BVA
1161 struct Scsi_Host *shost = target->scsi_host;
1162 struct scsi_device *sdev;
d92c0da7 1163 int i, j;
ed9b2264 1164
b3fe628d
BVA
1165 /*
1166 * Invoking srp_terminate_io() while srp_queuecommand() is running
1167 * is not safe. Hence the warning statement below.
1168 */
1169 shost_for_each_device(sdev, shost)
1170 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1171
d92c0da7
BVA
1172 for (i = 0; i < target->ch_count; i++) {
1173 ch = &target->ch[i];
509c07bc 1174
d92c0da7
BVA
1175 for (j = 0; j < target->req_ring_size; ++j) {
1176 struct srp_request *req = &ch->req_ring[j];
1177
1178 srp_finish_req(ch, req, NULL,
1179 DID_TRANSPORT_FAILFAST << 16);
1180 }
ed9b2264
BVA
1181 }
1182}
aef9ec39 1183
ed9b2264
BVA
1184/*
1185 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1186 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1187 * srp_reset_device() or srp_reset_host() calls will occur while this function
1188 * is in progress. One way to realize that is not to call this function
1189 * directly but to call srp_reconnect_rport() instead since that last function
1190 * serializes calls of this function via rport->mutex and also blocks
1191 * srp_queuecommand() calls before invoking this function.
1192 */
1193static int srp_rport_reconnect(struct srp_rport *rport)
1194{
1195 struct srp_target_port *target = rport->lld_data;
d92c0da7
BVA
1196 struct srp_rdma_ch *ch;
1197 int i, j, ret = 0;
1198 bool multich = false;
09be70a2 1199
aef9ec39 1200 srp_disconnect_target(target);
34aa654e
BVA
1201
1202 if (target->state == SRP_TARGET_SCANNING)
1203 return -ENODEV;
1204
aef9ec39 1205 /*
c7c4e7ff
BVA
1206 * Now get a new local CM ID so that we avoid confusing the target in
1207 * case things are really fouled up. Doing so also ensures that all CM
1208 * callbacks will have finished before a new QP is allocated.
aef9ec39 1209 */
d92c0da7
BVA
1210 for (i = 0; i < target->ch_count; i++) {
1211 ch = &target->ch[i];
d92c0da7 1212 ret += srp_new_cm_id(ch);
536ae14e 1213 }
d92c0da7
BVA
1214 for (i = 0; i < target->ch_count; i++) {
1215 ch = &target->ch[i];
d92c0da7
BVA
1216 for (j = 0; j < target->req_ring_size; ++j) {
1217 struct srp_request *req = &ch->req_ring[j];
aef9ec39 1218
d92c0da7
BVA
1219 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1220 }
1221 }
1222 for (i = 0; i < target->ch_count; i++) {
1223 ch = &target->ch[i];
d92c0da7
BVA
1224 /*
1225 * Whether or not creating a new CM ID succeeded, create a new
1226 * QP. This guarantees that all completion callback function
1227 * invocations have finished before request resetting starts.
1228 */
1229 ret += srp_create_ch_ib(ch);
aef9ec39 1230
d92c0da7
BVA
1231 INIT_LIST_HEAD(&ch->free_tx);
1232 for (j = 0; j < target->queue_size; ++j)
1233 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1234 }
8de9fe3a
BVA
1235
1236 target->qp_in_error = false;
1237
d92c0da7
BVA
1238 for (i = 0; i < target->ch_count; i++) {
1239 ch = &target->ch[i];
bbac5ccf 1240 if (ret)
d92c0da7 1241 break;
d92c0da7
BVA
1242 ret = srp_connect_ch(ch, multich);
1243 multich = true;
1244 }
09be70a2 1245
ed9b2264
BVA
1246 if (ret == 0)
1247 shost_printk(KERN_INFO, target->scsi_host,
1248 PFX "reconnect succeeded\n");
aef9ec39
RD
1249
1250 return ret;
1251}
1252
8f26c9ff
DD
1253static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1254 unsigned int dma_len, u32 rkey)
f5358a17 1255{
8f26c9ff 1256 struct srp_direct_buf *desc = state->desc;
f5358a17 1257
3ae95da8
BVA
1258 WARN_ON_ONCE(!dma_len);
1259
8f26c9ff
DD
1260 desc->va = cpu_to_be64(dma_addr);
1261 desc->key = cpu_to_be32(rkey);
1262 desc->len = cpu_to_be32(dma_len);
f5358a17 1263
8f26c9ff
DD
1264 state->total_len += dma_len;
1265 state->desc++;
1266 state->ndesc++;
1267}
559ce8f1 1268
8f26c9ff 1269static int srp_map_finish_fmr(struct srp_map_state *state,
509c07bc 1270 struct srp_rdma_ch *ch)
8f26c9ff 1271{
186fbc66
BVA
1272 struct srp_target_port *target = ch->target;
1273 struct srp_device *dev = target->srp_host->srp_dev;
8f26c9ff
DD
1274 struct ib_pool_fmr *fmr;
1275 u64 io_addr = 0;
85507bcc 1276
f731ed62
BVA
1277 if (state->fmr.next >= state->fmr.end)
1278 return -ENOMEM;
1279
26630e8a
SG
1280 WARN_ON_ONCE(!dev->use_fmr);
1281
1282 if (state->npages == 0)
1283 return 0;
1284
1285 if (state->npages == 1 && target->global_mr) {
1286 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1287 target->global_mr->rkey);
1288 goto reset_state;
1289 }
1290
509c07bc 1291 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
8f26c9ff
DD
1292 state->npages, io_addr);
1293 if (IS_ERR(fmr))
1294 return PTR_ERR(fmr);
f5358a17 1295
f731ed62 1296 *state->fmr.next++ = fmr;
52ede08f 1297 state->nmdesc++;
f5358a17 1298
186fbc66
BVA
1299 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1300 state->dma_len, fmr->fmr->rkey);
539dde6f 1301
26630e8a
SG
1302reset_state:
1303 state->npages = 0;
1304 state->dma_len = 0;
1305
8f26c9ff
DD
1306 return 0;
1307}
1308
5cfb1782 1309static int srp_map_finish_fr(struct srp_map_state *state,
509c07bc 1310 struct srp_rdma_ch *ch)
5cfb1782 1311{
509c07bc 1312 struct srp_target_port *target = ch->target;
5cfb1782
BVA
1313 struct srp_device *dev = target->srp_host->srp_dev;
1314 struct ib_send_wr *bad_wr;
f7f7aab1 1315 struct ib_reg_wr wr;
5cfb1782
BVA
1316 struct srp_fr_desc *desc;
1317 u32 rkey;
f7f7aab1 1318 int n, err;
5cfb1782 1319
f731ed62
BVA
1320 if (state->fr.next >= state->fr.end)
1321 return -ENOMEM;
1322
26630e8a
SG
1323 WARN_ON_ONCE(!dev->use_fast_reg);
1324
f7f7aab1 1325 if (state->sg_nents == 0)
26630e8a
SG
1326 return 0;
1327
f7f7aab1
SG
1328 if (state->sg_nents == 1 && target->global_mr) {
1329 srp_map_desc(state, sg_dma_address(state->sg),
1330 sg_dma_len(state->sg),
26630e8a 1331 target->global_mr->rkey);
f7f7aab1 1332 return 1;
26630e8a
SG
1333 }
1334
509c07bc 1335 desc = srp_fr_pool_get(ch->fr_pool);
5cfb1782
BVA
1336 if (!desc)
1337 return -ENOMEM;
1338
1339 rkey = ib_inc_rkey(desc->mr->rkey);
1340 ib_update_fast_reg_key(desc->mr, rkey);
1341
f7f7aab1
SG
1342 n = ib_map_mr_sg(desc->mr, state->sg, state->sg_nents,
1343 dev->mr_page_size);
1344 if (unlikely(n < 0))
1345 return n;
5cfb1782 1346
f7f7aab1
SG
1347 wr.wr.next = NULL;
1348 wr.wr.opcode = IB_WR_REG_MR;
e622f2f4 1349 wr.wr.wr_id = FAST_REG_WR_ID_MASK;
f7f7aab1
SG
1350 wr.wr.num_sge = 0;
1351 wr.wr.send_flags = 0;
1352 wr.mr = desc->mr;
1353 wr.key = desc->mr->rkey;
1354 wr.access = (IB_ACCESS_LOCAL_WRITE |
1355 IB_ACCESS_REMOTE_READ |
1356 IB_ACCESS_REMOTE_WRITE);
5cfb1782 1357
f731ed62 1358 *state->fr.next++ = desc;
5cfb1782
BVA
1359 state->nmdesc++;
1360
f7f7aab1
SG
1361 srp_map_desc(state, desc->mr->iova,
1362 desc->mr->length, desc->mr->rkey);
5cfb1782 1363
26630e8a 1364 err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
f7f7aab1 1365 if (unlikely(err))
26630e8a
SG
1366 return err;
1367
f7f7aab1 1368 return n;
5cfb1782
BVA
1369}
1370
8f26c9ff 1371static int srp_map_sg_entry(struct srp_map_state *state,
509c07bc 1372 struct srp_rdma_ch *ch,
3ae95da8 1373 struct scatterlist *sg, int sg_index)
8f26c9ff 1374{
509c07bc 1375 struct srp_target_port *target = ch->target;
8f26c9ff
DD
1376 struct srp_device *dev = target->srp_host->srp_dev;
1377 struct ib_device *ibdev = dev->dev;
1378 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1379 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
3ae95da8 1380 unsigned int len = 0;
8f26c9ff
DD
1381 int ret;
1382
3ae95da8 1383 WARN_ON_ONCE(!dma_len);
f5358a17 1384
8f26c9ff 1385 while (dma_len) {
5cfb1782
BVA
1386 unsigned offset = dma_addr & ~dev->mr_page_mask;
1387 if (state->npages == dev->max_pages_per_mr || offset != 0) {
f7f7aab1 1388 ret = srp_map_finish_fmr(state, ch);
8f26c9ff
DD
1389 if (ret)
1390 return ret;
8f26c9ff
DD
1391 }
1392
5cfb1782 1393 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
f5358a17 1394
8f26c9ff
DD
1395 if (!state->npages)
1396 state->base_dma_addr = dma_addr;
5cfb1782 1397 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
52ede08f 1398 state->dma_len += len;
8f26c9ff
DD
1399 dma_addr += len;
1400 dma_len -= len;
1401 }
1402
5cfb1782
BVA
1403 /*
1404 * If the last entry of the MR wasn't a full page, then we need to
8f26c9ff
DD
1405 * close it out and start a new one -- we can only merge at page
1406 * boundries.
1407 */
1408 ret = 0;
0e0d3a48 1409 if (len != dev->mr_page_size)
f7f7aab1 1410 ret = srp_map_finish_fmr(state, ch);
f5358a17
RD
1411 return ret;
1412}
1413
26630e8a
SG
1414static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1415 struct srp_request *req, struct scatterlist *scat,
1416 int count)
76bc1e1d 1417{
76bc1e1d 1418 struct scatterlist *sg;
0e0d3a48 1419 int i, ret;
76bc1e1d 1420
26630e8a
SG
1421 state->desc = req->indirect_desc;
1422 state->pages = req->map_page;
1423 state->fmr.next = req->fmr_list;
1424 state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt;
1425
1426 for_each_sg(scat, sg, count, i) {
1427 ret = srp_map_sg_entry(state, ch, sg, i);
1428 if (ret)
1429 return ret;
5cfb1782 1430 }
76bc1e1d 1431
f7f7aab1 1432 ret = srp_map_finish_fmr(state, ch);
26630e8a
SG
1433 if (ret)
1434 return ret;
1435
1436 req->nmdesc = state->nmdesc;
1437
1438 return 0;
1439}
1440
1441static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1442 struct srp_request *req, struct scatterlist *scat,
1443 int count)
1444{
26630e8a 1445 state->desc = req->indirect_desc;
f7f7aab1
SG
1446 state->fr.next = req->fr_list;
1447 state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
1448 state->sg = scat;
1449 state->sg_nents = scsi_sg_count(req->scmnd);
26630e8a 1450
f7f7aab1
SG
1451 while (state->sg_nents) {
1452 int i, n;
26630e8a 1453
f7f7aab1
SG
1454 n = srp_map_finish_fr(state, ch);
1455 if (unlikely(n < 0))
1456 return n;
1457
1458 state->sg_nents -= n;
1459 for (i = 0; i < n; i++)
1460 state->sg = sg_next(state->sg);
1461 }
26630e8a
SG
1462
1463 req->nmdesc = state->nmdesc;
1464
1465 return 0;
1466}
1467
1468static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1469 struct srp_request *req, struct scatterlist *scat,
1470 int count)
1471{
1472 struct srp_target_port *target = ch->target;
1473 struct srp_device *dev = target->srp_host->srp_dev;
1474 struct scatterlist *sg;
1475 int i;
1476
1477 state->desc = req->indirect_desc;
1478 for_each_sg(scat, sg, count, i) {
1479 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1480 ib_sg_dma_len(dev->dev, sg),
1481 target->global_mr->rkey);
0e0d3a48 1482 }
76bc1e1d 1483
52ede08f 1484 req->nmdesc = state->nmdesc;
5cfb1782 1485
26630e8a 1486 return 0;
76bc1e1d
BVA
1487}
1488
330179f2
BVA
1489/*
1490 * Register the indirect data buffer descriptor with the HCA.
1491 *
1492 * Note: since the indirect data buffer descriptor has been allocated with
1493 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1494 * memory buffer.
1495 */
1496static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1497 void **next_mr, void **end_mr, u32 idb_len,
1498 __be32 *idb_rkey)
1499{
1500 struct srp_target_port *target = ch->target;
1501 struct srp_device *dev = target->srp_host->srp_dev;
1502 struct srp_map_state state;
1503 struct srp_direct_buf idb_desc;
1504 u64 idb_pages[1];
f7f7aab1 1505 struct scatterlist idb_sg[1];
330179f2
BVA
1506 int ret;
1507
1508 memset(&state, 0, sizeof(state));
1509 memset(&idb_desc, 0, sizeof(idb_desc));
1510 state.gen.next = next_mr;
1511 state.gen.end = end_mr;
1512 state.desc = &idb_desc;
330179f2
BVA
1513 state.base_dma_addr = req->indirect_dma_addr;
1514 state.dma_len = idb_len;
f7f7aab1
SG
1515
1516 if (dev->use_fast_reg) {
1517 state.sg = idb_sg;
1518 state.sg_nents = 1;
1519 sg_set_buf(idb_sg, req->indirect_desc, idb_len);
1520 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1521 ret = srp_map_finish_fr(&state, ch);
1522 if (ret < 0)
1523 return ret;
1524 } else if (dev->use_fmr) {
1525 state.pages = idb_pages;
1526 state.pages[0] = (req->indirect_dma_addr &
1527 dev->mr_page_mask);
1528 state.npages = 1;
1529 ret = srp_map_finish_fmr(&state, ch);
1530 if (ret < 0)
1531 return ret;
1532 } else {
1533 return -EINVAL;
1534 }
330179f2
BVA
1535
1536 *idb_rkey = idb_desc.key;
1537
f7f7aab1 1538 return 0;
330179f2
BVA
1539}
1540
509c07bc 1541static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
aef9ec39
RD
1542 struct srp_request *req)
1543{
509c07bc 1544 struct srp_target_port *target = ch->target;
76bc1e1d 1545 struct scatterlist *scat;
aef9ec39 1546 struct srp_cmd *cmd = req->cmd->buf;
330179f2 1547 int len, nents, count, ret;
85507bcc
RC
1548 struct srp_device *dev;
1549 struct ib_device *ibdev;
8f26c9ff
DD
1550 struct srp_map_state state;
1551 struct srp_indirect_buf *indirect_hdr;
330179f2
BVA
1552 u32 idb_len, table_len;
1553 __be32 idb_rkey;
8f26c9ff 1554 u8 fmt;
aef9ec39 1555
bb350d1d 1556 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
aef9ec39
RD
1557 return sizeof (struct srp_cmd);
1558
1559 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1560 scmnd->sc_data_direction != DMA_TO_DEVICE) {
7aa54bd7
DD
1561 shost_printk(KERN_WARNING, target->scsi_host,
1562 PFX "Unhandled data direction %d\n",
1563 scmnd->sc_data_direction);
aef9ec39
RD
1564 return -EINVAL;
1565 }
1566
bb350d1d
FT
1567 nents = scsi_sg_count(scmnd);
1568 scat = scsi_sglist(scmnd);
aef9ec39 1569
05321937 1570 dev = target->srp_host->srp_dev;
85507bcc
RC
1571 ibdev = dev->dev;
1572
1573 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
8f26c9ff
DD
1574 if (unlikely(count == 0))
1575 return -EIO;
f5358a17
RD
1576
1577 fmt = SRP_DATA_DESC_DIRECT;
1578 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
aef9ec39 1579
03f6fb93 1580 if (count == 1 && target->global_mr) {
f5358a17
RD
1581 /*
1582 * The midlayer only generated a single gather/scatter
1583 * entry, or DMA mapping coalesced everything to a
1584 * single entry. So a direct descriptor along with
1585 * the DMA MR suffices.
1586 */
cf368713 1587 struct srp_direct_buf *buf = (void *) cmd->add_data;
aef9ec39 1588
85507bcc 1589 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
03f6fb93 1590 buf->key = cpu_to_be32(target->global_mr->rkey);
85507bcc 1591 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
8f26c9ff 1592
52ede08f 1593 req->nmdesc = 0;
8f26c9ff
DD
1594 goto map_complete;
1595 }
1596
5cfb1782
BVA
1597 /*
1598 * We have more than one scatter/gather entry, so build our indirect
1599 * descriptor table, trying to merge as many entries as we can.
8f26c9ff
DD
1600 */
1601 indirect_hdr = (void *) cmd->add_data;
1602
c07d424d
DD
1603 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1604 target->indirect_size, DMA_TO_DEVICE);
1605
8f26c9ff 1606 memset(&state, 0, sizeof(state));
26630e8a
SG
1607 if (dev->use_fast_reg)
1608 srp_map_sg_fr(&state, ch, req, scat, count);
1609 else if (dev->use_fmr)
1610 srp_map_sg_fmr(&state, ch, req, scat, count);
1611 else
1612 srp_map_sg_dma(&state, ch, req, scat, count);
cf368713 1613
c07d424d
DD
1614 /* We've mapped the request, now pull as much of the indirect
1615 * descriptor table as we can into the command buffer. If this
1616 * target is not using an external indirect table, we are
1617 * guaranteed to fit into the command, as the SCSI layer won't
1618 * give us more S/G entries than we allow.
8f26c9ff 1619 */
8f26c9ff 1620 if (state.ndesc == 1) {
5cfb1782
BVA
1621 /*
1622 * Memory registration collapsed the sg-list into one entry,
8f26c9ff
DD
1623 * so use a direct descriptor.
1624 */
1625 struct srp_direct_buf *buf = (void *) cmd->add_data;
cf368713 1626
c07d424d 1627 *buf = req->indirect_desc[0];
8f26c9ff 1628 goto map_complete;
aef9ec39
RD
1629 }
1630
c07d424d
DD
1631 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1632 !target->allow_ext_sg)) {
1633 shost_printk(KERN_ERR, target->scsi_host,
1634 "Could not fit S/G list into SRP_CMD\n");
1635 return -EIO;
1636 }
1637
1638 count = min(state.ndesc, target->cmd_sg_cnt);
8f26c9ff 1639 table_len = state.ndesc * sizeof (struct srp_direct_buf);
330179f2 1640 idb_len = sizeof(struct srp_indirect_buf) + table_len;
8f26c9ff
DD
1641
1642 fmt = SRP_DATA_DESC_INDIRECT;
1643 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
c07d424d 1644 len += count * sizeof (struct srp_direct_buf);
8f26c9ff 1645
c07d424d
DD
1646 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1647 count * sizeof (struct srp_direct_buf));
8f26c9ff 1648
03f6fb93 1649 if (!target->global_mr) {
330179f2
BVA
1650 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1651 idb_len, &idb_rkey);
1652 if (ret < 0)
1653 return ret;
1654 req->nmdesc++;
1655 } else {
03f6fb93 1656 idb_rkey = target->global_mr->rkey;
330179f2
BVA
1657 }
1658
c07d424d 1659 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
330179f2 1660 indirect_hdr->table_desc.key = idb_rkey;
8f26c9ff
DD
1661 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1662 indirect_hdr->len = cpu_to_be32(state.total_len);
1663
1664 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
c07d424d 1665 cmd->data_out_desc_cnt = count;
8f26c9ff 1666 else
c07d424d
DD
1667 cmd->data_in_desc_cnt = count;
1668
1669 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1670 DMA_TO_DEVICE);
8f26c9ff
DD
1671
1672map_complete:
aef9ec39
RD
1673 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1674 cmd->buf_fmt = fmt << 4;
1675 else
1676 cmd->buf_fmt = fmt;
1677
aef9ec39
RD
1678 return len;
1679}
1680
76c75b25
BVA
1681/*
1682 * Return an IU and possible credit to the free pool
1683 */
509c07bc 1684static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
76c75b25
BVA
1685 enum srp_iu_type iu_type)
1686{
1687 unsigned long flags;
1688
509c07bc
BVA
1689 spin_lock_irqsave(&ch->lock, flags);
1690 list_add(&iu->list, &ch->free_tx);
76c75b25 1691 if (iu_type != SRP_IU_RSP)
509c07bc
BVA
1692 ++ch->req_lim;
1693 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25
BVA
1694}
1695
05a1d750 1696/*
509c07bc 1697 * Must be called with ch->lock held to protect req_lim and free_tx.
e9684678 1698 * If IU is not sent, it must be returned using srp_put_tx_iu().
05a1d750
DD
1699 *
1700 * Note:
1701 * An upper limit for the number of allocated information units for each
1702 * request type is:
1703 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1704 * more than Scsi_Host.can_queue requests.
1705 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1706 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1707 * one unanswered SRP request to an initiator.
1708 */
509c07bc 1709static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
05a1d750
DD
1710 enum srp_iu_type iu_type)
1711{
509c07bc 1712 struct srp_target_port *target = ch->target;
05a1d750
DD
1713 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1714 struct srp_iu *iu;
1715
509c07bc 1716 srp_send_completion(ch->send_cq, ch);
05a1d750 1717
509c07bc 1718 if (list_empty(&ch->free_tx))
05a1d750
DD
1719 return NULL;
1720
1721 /* Initiator responses to target requests do not consume credits */
76c75b25 1722 if (iu_type != SRP_IU_RSP) {
509c07bc 1723 if (ch->req_lim <= rsv) {
76c75b25
BVA
1724 ++target->zero_req_lim;
1725 return NULL;
1726 }
1727
509c07bc 1728 --ch->req_lim;
05a1d750
DD
1729 }
1730
509c07bc 1731 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
76c75b25 1732 list_del(&iu->list);
05a1d750
DD
1733 return iu;
1734}
1735
509c07bc 1736static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
05a1d750 1737{
509c07bc 1738 struct srp_target_port *target = ch->target;
05a1d750
DD
1739 struct ib_sge list;
1740 struct ib_send_wr wr, *bad_wr;
05a1d750
DD
1741
1742 list.addr = iu->dma;
1743 list.length = len;
9af76271 1744 list.lkey = target->lkey;
05a1d750
DD
1745
1746 wr.next = NULL;
dcb4cb85 1747 wr.wr_id = (uintptr_t) iu;
05a1d750
DD
1748 wr.sg_list = &list;
1749 wr.num_sge = 1;
1750 wr.opcode = IB_WR_SEND;
1751 wr.send_flags = IB_SEND_SIGNALED;
1752
509c07bc 1753 return ib_post_send(ch->qp, &wr, &bad_wr);
05a1d750
DD
1754}
1755
509c07bc 1756static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
c996bb47 1757{
509c07bc 1758 struct srp_target_port *target = ch->target;
c996bb47 1759 struct ib_recv_wr wr, *bad_wr;
dcb4cb85 1760 struct ib_sge list;
c996bb47
BVA
1761
1762 list.addr = iu->dma;
1763 list.length = iu->size;
9af76271 1764 list.lkey = target->lkey;
c996bb47
BVA
1765
1766 wr.next = NULL;
dcb4cb85 1767 wr.wr_id = (uintptr_t) iu;
c996bb47
BVA
1768 wr.sg_list = &list;
1769 wr.num_sge = 1;
1770
509c07bc 1771 return ib_post_recv(ch->qp, &wr, &bad_wr);
c996bb47
BVA
1772}
1773
509c07bc 1774static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
aef9ec39 1775{
509c07bc 1776 struct srp_target_port *target = ch->target;
aef9ec39
RD
1777 struct srp_request *req;
1778 struct scsi_cmnd *scmnd;
1779 unsigned long flags;
aef9ec39 1780
aef9ec39 1781 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
509c07bc
BVA
1782 spin_lock_irqsave(&ch->lock, flags);
1783 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1784 spin_unlock_irqrestore(&ch->lock, flags);
94a9174c 1785
509c07bc 1786 ch->tsk_mgmt_status = -1;
f8b6e31e 1787 if (be32_to_cpu(rsp->resp_data_len) >= 4)
509c07bc
BVA
1788 ch->tsk_mgmt_status = rsp->data[3];
1789 complete(&ch->tsk_mgmt_done);
aef9ec39 1790 } else {
77f2c1a4
BVA
1791 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1792 if (scmnd) {
1793 req = (void *)scmnd->host_scribble;
1794 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1795 }
22032991 1796 if (!scmnd) {
7aa54bd7 1797 shost_printk(KERN_ERR, target->scsi_host,
d92c0da7
BVA
1798 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1799 rsp->tag, ch - target->ch, ch->qp->qp_num);
22032991 1800
509c07bc
BVA
1801 spin_lock_irqsave(&ch->lock, flags);
1802 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1803 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1804
1805 return;
1806 }
aef9ec39
RD
1807 scmnd->result = rsp->status;
1808
1809 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1810 memcpy(scmnd->sense_buffer, rsp->data +
1811 be32_to_cpu(rsp->resp_data_len),
1812 min_t(int, be32_to_cpu(rsp->sense_data_len),
1813 SCSI_SENSE_BUFFERSIZE));
1814 }
1815
e714531a 1816 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
bb350d1d 1817 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
e714531a
BVA
1818 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1819 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1820 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1821 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1822 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1823 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
aef9ec39 1824
509c07bc 1825 srp_free_req(ch, req, scmnd,
22032991
BVA
1826 be32_to_cpu(rsp->req_lim_delta));
1827
f8b6e31e
DD
1828 scmnd->host_scribble = NULL;
1829 scmnd->scsi_done(scmnd);
aef9ec39 1830 }
aef9ec39
RD
1831}
1832
509c07bc 1833static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
bb12588a
DD
1834 void *rsp, int len)
1835{
509c07bc 1836 struct srp_target_port *target = ch->target;
76c75b25 1837 struct ib_device *dev = target->srp_host->srp_dev->dev;
bb12588a
DD
1838 unsigned long flags;
1839 struct srp_iu *iu;
76c75b25 1840 int err;
bb12588a 1841
509c07bc
BVA
1842 spin_lock_irqsave(&ch->lock, flags);
1843 ch->req_lim += req_delta;
1844 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1845 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25 1846
bb12588a
DD
1847 if (!iu) {
1848 shost_printk(KERN_ERR, target->scsi_host, PFX
1849 "no IU available to send response\n");
76c75b25 1850 return 1;
bb12588a
DD
1851 }
1852
1853 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1854 memcpy(iu->buf, rsp, len);
1855 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1856
509c07bc 1857 err = srp_post_send(ch, iu, len);
76c75b25 1858 if (err) {
bb12588a
DD
1859 shost_printk(KERN_ERR, target->scsi_host, PFX
1860 "unable to post response: %d\n", err);
509c07bc 1861 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
76c75b25 1862 }
bb12588a 1863
bb12588a
DD
1864 return err;
1865}
1866
509c07bc 1867static void srp_process_cred_req(struct srp_rdma_ch *ch,
bb12588a
DD
1868 struct srp_cred_req *req)
1869{
1870 struct srp_cred_rsp rsp = {
1871 .opcode = SRP_CRED_RSP,
1872 .tag = req->tag,
1873 };
1874 s32 delta = be32_to_cpu(req->req_lim_delta);
1875
509c07bc
BVA
1876 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1877 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
bb12588a
DD
1878 "problems processing SRP_CRED_REQ\n");
1879}
1880
509c07bc 1881static void srp_process_aer_req(struct srp_rdma_ch *ch,
bb12588a
DD
1882 struct srp_aer_req *req)
1883{
509c07bc 1884 struct srp_target_port *target = ch->target;
bb12588a
DD
1885 struct srp_aer_rsp rsp = {
1886 .opcode = SRP_AER_RSP,
1887 .tag = req->tag,
1888 };
1889 s32 delta = be32_to_cpu(req->req_lim_delta);
1890
1891 shost_printk(KERN_ERR, target->scsi_host, PFX
985aa495 1892 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
bb12588a 1893
509c07bc 1894 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
bb12588a
DD
1895 shost_printk(KERN_ERR, target->scsi_host, PFX
1896 "problems processing SRP_AER_REQ\n");
1897}
1898
509c07bc 1899static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
aef9ec39 1900{
509c07bc 1901 struct srp_target_port *target = ch->target;
dcb4cb85 1902 struct ib_device *dev = target->srp_host->srp_dev->dev;
737b94eb 1903 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
c996bb47 1904 int res;
aef9ec39
RD
1905 u8 opcode;
1906
509c07bc 1907 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 1908 DMA_FROM_DEVICE);
aef9ec39
RD
1909
1910 opcode = *(u8 *) iu->buf;
1911
1912 if (0) {
7aa54bd7
DD
1913 shost_printk(KERN_ERR, target->scsi_host,
1914 PFX "recv completion, opcode 0x%02x\n", opcode);
7a700811
BVA
1915 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1916 iu->buf, wc->byte_len, true);
aef9ec39
RD
1917 }
1918
1919 switch (opcode) {
1920 case SRP_RSP:
509c07bc 1921 srp_process_rsp(ch, iu->buf);
aef9ec39
RD
1922 break;
1923
bb12588a 1924 case SRP_CRED_REQ:
509c07bc 1925 srp_process_cred_req(ch, iu->buf);
bb12588a
DD
1926 break;
1927
1928 case SRP_AER_REQ:
509c07bc 1929 srp_process_aer_req(ch, iu->buf);
bb12588a
DD
1930 break;
1931
aef9ec39
RD
1932 case SRP_T_LOGOUT:
1933 /* XXX Handle target logout */
7aa54bd7
DD
1934 shost_printk(KERN_WARNING, target->scsi_host,
1935 PFX "Got target logout request\n");
aef9ec39
RD
1936 break;
1937
1938 default:
7aa54bd7
DD
1939 shost_printk(KERN_WARNING, target->scsi_host,
1940 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
aef9ec39
RD
1941 break;
1942 }
1943
509c07bc 1944 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 1945 DMA_FROM_DEVICE);
c996bb47 1946
509c07bc 1947 res = srp_post_recv(ch, iu);
c996bb47
BVA
1948 if (res != 0)
1949 shost_printk(KERN_ERR, target->scsi_host,
1950 PFX "Recv failed with error code %d\n", res);
aef9ec39
RD
1951}
1952
c1120f89
BVA
1953/**
1954 * srp_tl_err_work() - handle a transport layer error
af24663b 1955 * @work: Work structure embedded in an SRP target port.
c1120f89
BVA
1956 *
1957 * Note: This function may get invoked before the rport has been created,
1958 * hence the target->rport test.
1959 */
1960static void srp_tl_err_work(struct work_struct *work)
1961{
1962 struct srp_target_port *target;
1963
1964 target = container_of(work, struct srp_target_port, tl_err_work);
1965 if (target->rport)
1966 srp_start_tl_fail_timers(target->rport);
1967}
1968
5cfb1782 1969static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
7dad6b2e 1970 bool send_err, struct srp_rdma_ch *ch)
948d1e88 1971{
7dad6b2e
BVA
1972 struct srp_target_port *target = ch->target;
1973
1974 if (wr_id == SRP_LAST_WR_ID) {
1975 complete(&ch->done);
1976 return;
1977 }
1978
c014c8cd 1979 if (ch->connected && !target->qp_in_error) {
5cfb1782
BVA
1980 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1981 shost_printk(KERN_ERR, target->scsi_host, PFX
57363d98
SG
1982 "LOCAL_INV failed with status %s (%d)\n",
1983 ib_wc_status_msg(wc_status), wc_status);
5cfb1782
BVA
1984 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1985 shost_printk(KERN_ERR, target->scsi_host, PFX
57363d98
SG
1986 "FAST_REG_MR failed status %s (%d)\n",
1987 ib_wc_status_msg(wc_status), wc_status);
5cfb1782
BVA
1988 } else {
1989 shost_printk(KERN_ERR, target->scsi_host,
57363d98 1990 PFX "failed %s status %s (%d) for iu %p\n",
5cfb1782 1991 send_err ? "send" : "receive",
57363d98
SG
1992 ib_wc_status_msg(wc_status), wc_status,
1993 (void *)(uintptr_t)wr_id);
5cfb1782 1994 }
c1120f89 1995 queue_work(system_long_wq, &target->tl_err_work);
4f0af697 1996 }
948d1e88
BVA
1997 target->qp_in_error = true;
1998}
1999
509c07bc 2000static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
aef9ec39 2001{
509c07bc 2002 struct srp_rdma_ch *ch = ch_ptr;
aef9ec39 2003 struct ib_wc wc;
aef9ec39
RD
2004
2005 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
2006 while (ib_poll_cq(cq, 1, &wc) > 0) {
948d1e88 2007 if (likely(wc.status == IB_WC_SUCCESS)) {
509c07bc 2008 srp_handle_recv(ch, &wc);
948d1e88 2009 } else {
7dad6b2e 2010 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
aef9ec39 2011 }
9c03dc9f
BVA
2012 }
2013}
2014
509c07bc 2015static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
9c03dc9f 2016{
509c07bc 2017 struct srp_rdma_ch *ch = ch_ptr;
9c03dc9f 2018 struct ib_wc wc;
dcb4cb85 2019 struct srp_iu *iu;
9c03dc9f
BVA
2020
2021 while (ib_poll_cq(cq, 1, &wc) > 0) {
948d1e88
BVA
2022 if (likely(wc.status == IB_WC_SUCCESS)) {
2023 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
509c07bc 2024 list_add(&iu->list, &ch->free_tx);
948d1e88 2025 } else {
7dad6b2e 2026 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
9c03dc9f 2027 }
aef9ec39
RD
2028 }
2029}
2030
76c75b25 2031static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
aef9ec39 2032{
76c75b25 2033 struct srp_target_port *target = host_to_target(shost);
a95cadb9 2034 struct srp_rport *rport = target->rport;
509c07bc 2035 struct srp_rdma_ch *ch;
aef9ec39
RD
2036 struct srp_request *req;
2037 struct srp_iu *iu;
2038 struct srp_cmd *cmd;
85507bcc 2039 struct ib_device *dev;
76c75b25 2040 unsigned long flags;
77f2c1a4
BVA
2041 u32 tag;
2042 u16 idx;
d1b4289e 2043 int len, ret;
a95cadb9
BVA
2044 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2045
2046 /*
2047 * The SCSI EH thread is the only context from which srp_queuecommand()
2048 * can get invoked for blocked devices (SDEV_BLOCK /
2049 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2050 * locking the rport mutex if invoked from inside the SCSI EH.
2051 */
2052 if (in_scsi_eh)
2053 mutex_lock(&rport->mutex);
aef9ec39 2054
d1b4289e
BVA
2055 scmnd->result = srp_chkready(target->rport);
2056 if (unlikely(scmnd->result))
2057 goto err;
2ce19e72 2058
77f2c1a4
BVA
2059 WARN_ON_ONCE(scmnd->request->tag < 0);
2060 tag = blk_mq_unique_tag(scmnd->request);
d92c0da7 2061 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
77f2c1a4
BVA
2062 idx = blk_mq_unique_tag_to_tag(tag);
2063 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2064 dev_name(&shost->shost_gendev), tag, idx,
2065 target->req_ring_size);
509c07bc
BVA
2066
2067 spin_lock_irqsave(&ch->lock, flags);
2068 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
509c07bc 2069 spin_unlock_irqrestore(&ch->lock, flags);
aef9ec39 2070
77f2c1a4
BVA
2071 if (!iu)
2072 goto err;
2073
2074 req = &ch->req_ring[idx];
05321937 2075 dev = target->srp_host->srp_dev->dev;
49248644 2076 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
85507bcc 2077 DMA_TO_DEVICE);
aef9ec39 2078
f8b6e31e 2079 scmnd->host_scribble = (void *) req;
aef9ec39
RD
2080
2081 cmd = iu->buf;
2082 memset(cmd, 0, sizeof *cmd);
2083
2084 cmd->opcode = SRP_CMD;
985aa495 2085 int_to_scsilun(scmnd->device->lun, &cmd->lun);
77f2c1a4 2086 cmd->tag = tag;
aef9ec39
RD
2087 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2088
aef9ec39
RD
2089 req->scmnd = scmnd;
2090 req->cmd = iu;
aef9ec39 2091
509c07bc 2092 len = srp_map_data(scmnd, ch, req);
aef9ec39 2093 if (len < 0) {
7aa54bd7 2094 shost_printk(KERN_ERR, target->scsi_host,
d1b4289e
BVA
2095 PFX "Failed to map data (%d)\n", len);
2096 /*
2097 * If we ran out of memory descriptors (-ENOMEM) because an
2098 * application is queuing many requests with more than
52ede08f 2099 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
d1b4289e
BVA
2100 * to reduce queue depth temporarily.
2101 */
2102 scmnd->result = len == -ENOMEM ?
2103 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
76c75b25 2104 goto err_iu;
aef9ec39
RD
2105 }
2106
49248644 2107 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
85507bcc 2108 DMA_TO_DEVICE);
aef9ec39 2109
509c07bc 2110 if (srp_post_send(ch, iu, len)) {
7aa54bd7 2111 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
aef9ec39
RD
2112 goto err_unmap;
2113 }
2114
d1b4289e
BVA
2115 ret = 0;
2116
a95cadb9
BVA
2117unlock_rport:
2118 if (in_scsi_eh)
2119 mutex_unlock(&rport->mutex);
2120
d1b4289e 2121 return ret;
aef9ec39
RD
2122
2123err_unmap:
509c07bc 2124 srp_unmap_data(scmnd, ch, req);
aef9ec39 2125
76c75b25 2126err_iu:
509c07bc 2127 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
76c75b25 2128
024ca901
BVA
2129 /*
2130 * Avoid that the loops that iterate over the request ring can
2131 * encounter a dangling SCSI command pointer.
2132 */
2133 req->scmnd = NULL;
2134
d1b4289e
BVA
2135err:
2136 if (scmnd->result) {
2137 scmnd->scsi_done(scmnd);
2138 ret = 0;
2139 } else {
2140 ret = SCSI_MLQUEUE_HOST_BUSY;
2141 }
a95cadb9 2142
d1b4289e 2143 goto unlock_rport;
aef9ec39
RD
2144}
2145
4d73f95f
BVA
2146/*
2147 * Note: the resources allocated in this function are freed in
509c07bc 2148 * srp_free_ch_ib().
4d73f95f 2149 */
509c07bc 2150static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
aef9ec39 2151{
509c07bc 2152 struct srp_target_port *target = ch->target;
aef9ec39
RD
2153 int i;
2154
509c07bc
BVA
2155 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2156 GFP_KERNEL);
2157 if (!ch->rx_ring)
4d73f95f 2158 goto err_no_ring;
509c07bc
BVA
2159 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2160 GFP_KERNEL);
2161 if (!ch->tx_ring)
4d73f95f
BVA
2162 goto err_no_ring;
2163
2164 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2165 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2166 ch->max_ti_iu_len,
2167 GFP_KERNEL, DMA_FROM_DEVICE);
2168 if (!ch->rx_ring[i])
aef9ec39
RD
2169 goto err;
2170 }
2171
4d73f95f 2172 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2173 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2174 target->max_iu_len,
2175 GFP_KERNEL, DMA_TO_DEVICE);
2176 if (!ch->tx_ring[i])
aef9ec39 2177 goto err;
dcb4cb85 2178
509c07bc 2179 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
aef9ec39
RD
2180 }
2181
2182 return 0;
2183
2184err:
4d73f95f 2185 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2186 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2187 srp_free_iu(target->srp_host, ch->tx_ring[i]);
aef9ec39
RD
2188 }
2189
4d73f95f
BVA
2190
2191err_no_ring:
509c07bc
BVA
2192 kfree(ch->tx_ring);
2193 ch->tx_ring = NULL;
2194 kfree(ch->rx_ring);
2195 ch->rx_ring = NULL;
4d73f95f 2196
aef9ec39
RD
2197 return -ENOMEM;
2198}
2199
c9b03c1a
BVA
2200static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2201{
2202 uint64_t T_tr_ns, max_compl_time_ms;
2203 uint32_t rq_tmo_jiffies;
2204
2205 /*
2206 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2207 * table 91), both the QP timeout and the retry count have to be set
2208 * for RC QP's during the RTR to RTS transition.
2209 */
2210 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2211 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2212
2213 /*
2214 * Set target->rq_tmo_jiffies to one second more than the largest time
2215 * it can take before an error completion is generated. See also
2216 * C9-140..142 in the IBTA spec for more information about how to
2217 * convert the QP Local ACK Timeout value to nanoseconds.
2218 */
2219 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2220 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2221 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2222 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2223
2224 return rq_tmo_jiffies;
2225}
2226
961e0be8 2227static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
e6300cbd 2228 const struct srp_login_rsp *lrsp,
509c07bc 2229 struct srp_rdma_ch *ch)
961e0be8 2230{
509c07bc 2231 struct srp_target_port *target = ch->target;
961e0be8
DD
2232 struct ib_qp_attr *qp_attr = NULL;
2233 int attr_mask = 0;
2234 int ret;
2235 int i;
2236
2237 if (lrsp->opcode == SRP_LOGIN_RSP) {
509c07bc
BVA
2238 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2239 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
961e0be8
DD
2240
2241 /*
2242 * Reserve credits for task management so we don't
2243 * bounce requests back to the SCSI mid-layer.
2244 */
2245 target->scsi_host->can_queue
509c07bc 2246 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
961e0be8 2247 target->scsi_host->can_queue);
4d73f95f
BVA
2248 target->scsi_host->cmd_per_lun
2249 = min_t(int, target->scsi_host->can_queue,
2250 target->scsi_host->cmd_per_lun);
961e0be8
DD
2251 } else {
2252 shost_printk(KERN_WARNING, target->scsi_host,
2253 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2254 ret = -ECONNRESET;
2255 goto error;
2256 }
2257
509c07bc
BVA
2258 if (!ch->rx_ring) {
2259 ret = srp_alloc_iu_bufs(ch);
961e0be8
DD
2260 if (ret)
2261 goto error;
2262 }
2263
2264 ret = -ENOMEM;
2265 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2266 if (!qp_attr)
2267 goto error;
2268
2269 qp_attr->qp_state = IB_QPS_RTR;
2270 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2271 if (ret)
2272 goto error_free;
2273
509c07bc 2274 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
961e0be8
DD
2275 if (ret)
2276 goto error_free;
2277
4d73f95f 2278 for (i = 0; i < target->queue_size; i++) {
509c07bc
BVA
2279 struct srp_iu *iu = ch->rx_ring[i];
2280
2281 ret = srp_post_recv(ch, iu);
961e0be8
DD
2282 if (ret)
2283 goto error_free;
2284 }
2285
2286 qp_attr->qp_state = IB_QPS_RTS;
2287 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2288 if (ret)
2289 goto error_free;
2290
c9b03c1a
BVA
2291 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2292
509c07bc 2293 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
961e0be8
DD
2294 if (ret)
2295 goto error_free;
2296
2297 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2298
2299error_free:
2300 kfree(qp_attr);
2301
2302error:
509c07bc 2303 ch->status = ret;
961e0be8
DD
2304}
2305
aef9ec39
RD
2306static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2307 struct ib_cm_event *event,
509c07bc 2308 struct srp_rdma_ch *ch)
aef9ec39 2309{
509c07bc 2310 struct srp_target_port *target = ch->target;
7aa54bd7 2311 struct Scsi_Host *shost = target->scsi_host;
aef9ec39
RD
2312 struct ib_class_port_info *cpi;
2313 int opcode;
2314
2315 switch (event->param.rej_rcvd.reason) {
2316 case IB_CM_REJ_PORT_CM_REDIRECT:
2317 cpi = event->param.rej_rcvd.ari;
509c07bc
BVA
2318 ch->path.dlid = cpi->redirect_lid;
2319 ch->path.pkey = cpi->redirect_pkey;
aef9ec39 2320 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
509c07bc 2321 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
aef9ec39 2322
509c07bc 2323 ch->status = ch->path.dlid ?
aef9ec39
RD
2324 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2325 break;
2326
2327 case IB_CM_REJ_PORT_REDIRECT:
5d7cbfd6 2328 if (srp_target_is_topspin(target)) {
aef9ec39
RD
2329 /*
2330 * Topspin/Cisco SRP gateways incorrectly send
2331 * reject reason code 25 when they mean 24
2332 * (port redirect).
2333 */
509c07bc 2334 memcpy(ch->path.dgid.raw,
aef9ec39
RD
2335 event->param.rej_rcvd.ari, 16);
2336
7aa54bd7
DD
2337 shost_printk(KERN_DEBUG, shost,
2338 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
509c07bc
BVA
2339 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2340 be64_to_cpu(ch->path.dgid.global.interface_id));
aef9ec39 2341
509c07bc 2342 ch->status = SRP_PORT_REDIRECT;
aef9ec39 2343 } else {
7aa54bd7
DD
2344 shost_printk(KERN_WARNING, shost,
2345 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
509c07bc 2346 ch->status = -ECONNRESET;
aef9ec39
RD
2347 }
2348 break;
2349
2350 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
7aa54bd7
DD
2351 shost_printk(KERN_WARNING, shost,
2352 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
509c07bc 2353 ch->status = -ECONNRESET;
aef9ec39
RD
2354 break;
2355
2356 case IB_CM_REJ_CONSUMER_DEFINED:
2357 opcode = *(u8 *) event->private_data;
2358 if (opcode == SRP_LOGIN_REJ) {
2359 struct srp_login_rej *rej = event->private_data;
2360 u32 reason = be32_to_cpu(rej->reason);
2361
2362 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
7aa54bd7
DD
2363 shost_printk(KERN_WARNING, shost,
2364 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
aef9ec39 2365 else
e7ffde01
BVA
2366 shost_printk(KERN_WARNING, shost, PFX
2367 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
747fe000
BVA
2368 target->sgid.raw,
2369 target->orig_dgid.raw, reason);
aef9ec39 2370 } else
7aa54bd7
DD
2371 shost_printk(KERN_WARNING, shost,
2372 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2373 " opcode 0x%02x\n", opcode);
509c07bc 2374 ch->status = -ECONNRESET;
aef9ec39
RD
2375 break;
2376
9fe4bcf4
DD
2377 case IB_CM_REJ_STALE_CONN:
2378 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
509c07bc 2379 ch->status = SRP_STALE_CONN;
9fe4bcf4
DD
2380 break;
2381
aef9ec39 2382 default:
7aa54bd7
DD
2383 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2384 event->param.rej_rcvd.reason);
509c07bc 2385 ch->status = -ECONNRESET;
aef9ec39
RD
2386 }
2387}
2388
2389static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2390{
509c07bc
BVA
2391 struct srp_rdma_ch *ch = cm_id->context;
2392 struct srp_target_port *target = ch->target;
aef9ec39 2393 int comp = 0;
aef9ec39
RD
2394
2395 switch (event->event) {
2396 case IB_CM_REQ_ERROR:
7aa54bd7
DD
2397 shost_printk(KERN_DEBUG, target->scsi_host,
2398 PFX "Sending CM REQ failed\n");
aef9ec39 2399 comp = 1;
509c07bc 2400 ch->status = -ECONNRESET;
aef9ec39
RD
2401 break;
2402
2403 case IB_CM_REP_RECEIVED:
2404 comp = 1;
509c07bc 2405 srp_cm_rep_handler(cm_id, event->private_data, ch);
aef9ec39
RD
2406 break;
2407
2408 case IB_CM_REJ_RECEIVED:
7aa54bd7 2409 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
aef9ec39
RD
2410 comp = 1;
2411
509c07bc 2412 srp_cm_rej_handler(cm_id, event, ch);
aef9ec39
RD
2413 break;
2414
b7ac4ab4 2415 case IB_CM_DREQ_RECEIVED:
7aa54bd7
DD
2416 shost_printk(KERN_WARNING, target->scsi_host,
2417 PFX "DREQ received - connection closed\n");
c014c8cd 2418 ch->connected = false;
b7ac4ab4 2419 if (ib_send_cm_drep(cm_id, NULL, 0))
7aa54bd7
DD
2420 shost_printk(KERN_ERR, target->scsi_host,
2421 PFX "Sending CM DREP failed\n");
c1120f89 2422 queue_work(system_long_wq, &target->tl_err_work);
aef9ec39
RD
2423 break;
2424
2425 case IB_CM_TIMEWAIT_EXIT:
7aa54bd7
DD
2426 shost_printk(KERN_ERR, target->scsi_host,
2427 PFX "connection closed\n");
ac72d766 2428 comp = 1;
aef9ec39 2429
509c07bc 2430 ch->status = 0;
aef9ec39
RD
2431 break;
2432
b7ac4ab4
IR
2433 case IB_CM_MRA_RECEIVED:
2434 case IB_CM_DREQ_ERROR:
2435 case IB_CM_DREP_RECEIVED:
2436 break;
2437
aef9ec39 2438 default:
7aa54bd7
DD
2439 shost_printk(KERN_WARNING, target->scsi_host,
2440 PFX "Unhandled CM event %d\n", event->event);
aef9ec39
RD
2441 break;
2442 }
2443
2444 if (comp)
509c07bc 2445 complete(&ch->done);
aef9ec39 2446
aef9ec39
RD
2447 return 0;
2448}
2449
71444b97
JW
2450/**
2451 * srp_change_queue_depth - setting device queue depth
2452 * @sdev: scsi device struct
2453 * @qdepth: requested queue depth
71444b97
JW
2454 *
2455 * Returns queue depth.
2456 */
2457static int
db5ed4df 2458srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
71444b97 2459{
c40ecc12 2460 if (!sdev->tagged_supported)
1e6f2416 2461 qdepth = 1;
db5ed4df 2462 return scsi_change_queue_depth(sdev, qdepth);
71444b97
JW
2463}
2464
985aa495
BVA
2465static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2466 u8 func)
aef9ec39 2467{
509c07bc 2468 struct srp_target_port *target = ch->target;
a95cadb9 2469 struct srp_rport *rport = target->rport;
19081f31 2470 struct ib_device *dev = target->srp_host->srp_dev->dev;
aef9ec39
RD
2471 struct srp_iu *iu;
2472 struct srp_tsk_mgmt *tsk_mgmt;
aef9ec39 2473
c014c8cd 2474 if (!ch->connected || target->qp_in_error)
3780d1f0
BVA
2475 return -1;
2476
509c07bc 2477 init_completion(&ch->tsk_mgmt_done);
aef9ec39 2478
a95cadb9 2479 /*
509c07bc 2480 * Lock the rport mutex to avoid that srp_create_ch_ib() is
a95cadb9
BVA
2481 * invoked while a task management function is being sent.
2482 */
2483 mutex_lock(&rport->mutex);
509c07bc
BVA
2484 spin_lock_irq(&ch->lock);
2485 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2486 spin_unlock_irq(&ch->lock);
76c75b25 2487
a95cadb9
BVA
2488 if (!iu) {
2489 mutex_unlock(&rport->mutex);
2490
76c75b25 2491 return -1;
a95cadb9 2492 }
aef9ec39 2493
19081f31
DD
2494 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2495 DMA_TO_DEVICE);
aef9ec39
RD
2496 tsk_mgmt = iu->buf;
2497 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2498
2499 tsk_mgmt->opcode = SRP_TSK_MGMT;
985aa495 2500 int_to_scsilun(lun, &tsk_mgmt->lun);
f8b6e31e 2501 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
aef9ec39 2502 tsk_mgmt->tsk_mgmt_func = func;
f8b6e31e 2503 tsk_mgmt->task_tag = req_tag;
aef9ec39 2504
19081f31
DD
2505 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2506 DMA_TO_DEVICE);
509c07bc
BVA
2507 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2508 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
a95cadb9
BVA
2509 mutex_unlock(&rport->mutex);
2510
76c75b25
BVA
2511 return -1;
2512 }
a95cadb9 2513 mutex_unlock(&rport->mutex);
d945e1df 2514
509c07bc 2515 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
aef9ec39 2516 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
d945e1df 2517 return -1;
aef9ec39 2518
d945e1df 2519 return 0;
d945e1df
RD
2520}
2521
aef9ec39
RD
2522static int srp_abort(struct scsi_cmnd *scmnd)
2523{
d945e1df 2524 struct srp_target_port *target = host_to_target(scmnd->device->host);
f8b6e31e 2525 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
77f2c1a4 2526 u32 tag;
d92c0da7 2527 u16 ch_idx;
509c07bc 2528 struct srp_rdma_ch *ch;
086f44f5 2529 int ret;
d945e1df 2530
7aa54bd7 2531 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
aef9ec39 2532
d92c0da7 2533 if (!req)
99b6697a 2534 return SUCCESS;
77f2c1a4 2535 tag = blk_mq_unique_tag(scmnd->request);
d92c0da7
BVA
2536 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2537 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2538 return SUCCESS;
2539 ch = &target->ch[ch_idx];
2540 if (!srp_claim_req(ch, req, NULL, scmnd))
2541 return SUCCESS;
2542 shost_printk(KERN_ERR, target->scsi_host,
2543 "Sending SRP abort for tag %#x\n", tag);
77f2c1a4 2544 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
80d5e8a2 2545 SRP_TSK_ABORT_TASK) == 0)
086f44f5 2546 ret = SUCCESS;
ed9b2264 2547 else if (target->rport->state == SRP_RPORT_LOST)
99e1c139 2548 ret = FAST_IO_FAIL;
086f44f5
BVA
2549 else
2550 ret = FAILED;
509c07bc 2551 srp_free_req(ch, req, scmnd, 0);
22032991 2552 scmnd->result = DID_ABORT << 16;
d8536670 2553 scmnd->scsi_done(scmnd);
d945e1df 2554
086f44f5 2555 return ret;
aef9ec39
RD
2556}
2557
2558static int srp_reset_device(struct scsi_cmnd *scmnd)
2559{
d945e1df 2560 struct srp_target_port *target = host_to_target(scmnd->device->host);
d92c0da7 2561 struct srp_rdma_ch *ch;
536ae14e 2562 int i;
d945e1df 2563
7aa54bd7 2564 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
aef9ec39 2565
d92c0da7 2566 ch = &target->ch[0];
509c07bc 2567 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
f8b6e31e 2568 SRP_TSK_LUN_RESET))
d945e1df 2569 return FAILED;
509c07bc 2570 if (ch->tsk_mgmt_status)
d945e1df
RD
2571 return FAILED;
2572
d92c0da7
BVA
2573 for (i = 0; i < target->ch_count; i++) {
2574 ch = &target->ch[i];
2575 for (i = 0; i < target->req_ring_size; ++i) {
2576 struct srp_request *req = &ch->req_ring[i];
509c07bc 2577
d92c0da7
BVA
2578 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2579 }
536ae14e 2580 }
d945e1df 2581
d945e1df 2582 return SUCCESS;
aef9ec39
RD
2583}
2584
2585static int srp_reset_host(struct scsi_cmnd *scmnd)
2586{
2587 struct srp_target_port *target = host_to_target(scmnd->device->host);
aef9ec39 2588
7aa54bd7 2589 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
aef9ec39 2590
ed9b2264 2591 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
aef9ec39
RD
2592}
2593
c9b03c1a
BVA
2594static int srp_slave_configure(struct scsi_device *sdev)
2595{
2596 struct Scsi_Host *shost = sdev->host;
2597 struct srp_target_port *target = host_to_target(shost);
2598 struct request_queue *q = sdev->request_queue;
2599 unsigned long timeout;
2600
2601 if (sdev->type == TYPE_DISK) {
2602 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2603 blk_queue_rq_timeout(q, timeout);
2604 }
2605
2606 return 0;
2607}
2608
ee959b00
TJ
2609static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2610 char *buf)
6ecb0c84 2611{
ee959b00 2612 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2613
45c37cad 2614 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
6ecb0c84
RD
2615}
2616
ee959b00
TJ
2617static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2618 char *buf)
6ecb0c84 2619{
ee959b00 2620 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2621
45c37cad 2622 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
6ecb0c84
RD
2623}
2624
ee959b00
TJ
2625static ssize_t show_service_id(struct device *dev,
2626 struct device_attribute *attr, char *buf)
6ecb0c84 2627{
ee959b00 2628 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2629
45c37cad 2630 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
6ecb0c84
RD
2631}
2632
ee959b00
TJ
2633static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2634 char *buf)
6ecb0c84 2635{
ee959b00 2636 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2637
747fe000 2638 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
6ecb0c84
RD
2639}
2640
848b3082
BVA
2641static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2642 char *buf)
2643{
2644 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2645
747fe000 2646 return sprintf(buf, "%pI6\n", target->sgid.raw);
848b3082
BVA
2647}
2648
ee959b00
TJ
2649static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2650 char *buf)
6ecb0c84 2651{
ee959b00 2652 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7 2653 struct srp_rdma_ch *ch = &target->ch[0];
6ecb0c84 2654
509c07bc 2655 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
6ecb0c84
RD
2656}
2657
ee959b00
TJ
2658static ssize_t show_orig_dgid(struct device *dev,
2659 struct device_attribute *attr, char *buf)
3633b3d0 2660{
ee959b00 2661 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3633b3d0 2662
747fe000 2663 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
3633b3d0
IR
2664}
2665
89de7486
BVA
2666static ssize_t show_req_lim(struct device *dev,
2667 struct device_attribute *attr, char *buf)
2668{
2669 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7
BVA
2670 struct srp_rdma_ch *ch;
2671 int i, req_lim = INT_MAX;
89de7486 2672
d92c0da7
BVA
2673 for (i = 0; i < target->ch_count; i++) {
2674 ch = &target->ch[i];
2675 req_lim = min(req_lim, ch->req_lim);
2676 }
2677 return sprintf(buf, "%d\n", req_lim);
89de7486
BVA
2678}
2679
ee959b00
TJ
2680static ssize_t show_zero_req_lim(struct device *dev,
2681 struct device_attribute *attr, char *buf)
6bfa24fa 2682{
ee959b00 2683 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6bfa24fa 2684
6bfa24fa
RD
2685 return sprintf(buf, "%d\n", target->zero_req_lim);
2686}
2687
ee959b00
TJ
2688static ssize_t show_local_ib_port(struct device *dev,
2689 struct device_attribute *attr, char *buf)
ded7f1a1 2690{
ee959b00 2691 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1
IR
2692
2693 return sprintf(buf, "%d\n", target->srp_host->port);
2694}
2695
ee959b00
TJ
2696static ssize_t show_local_ib_device(struct device *dev,
2697 struct device_attribute *attr, char *buf)
ded7f1a1 2698{
ee959b00 2699 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1 2700
05321937 2701 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
ded7f1a1
IR
2702}
2703
d92c0da7
BVA
2704static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2705 char *buf)
2706{
2707 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2708
2709 return sprintf(buf, "%d\n", target->ch_count);
2710}
2711
4b5e5f41
BVA
2712static ssize_t show_comp_vector(struct device *dev,
2713 struct device_attribute *attr, char *buf)
2714{
2715 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2716
2717 return sprintf(buf, "%d\n", target->comp_vector);
2718}
2719
7bb312e4
VP
2720static ssize_t show_tl_retry_count(struct device *dev,
2721 struct device_attribute *attr, char *buf)
2722{
2723 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2724
2725 return sprintf(buf, "%d\n", target->tl_retry_count);
2726}
2727
49248644
DD
2728static ssize_t show_cmd_sg_entries(struct device *dev,
2729 struct device_attribute *attr, char *buf)
2730{
2731 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2732
2733 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2734}
2735
c07d424d
DD
2736static ssize_t show_allow_ext_sg(struct device *dev,
2737 struct device_attribute *attr, char *buf)
2738{
2739 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2740
2741 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2742}
2743
ee959b00
TJ
2744static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2745static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2746static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2747static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
848b3082 2748static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
ee959b00
TJ
2749static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2750static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
89de7486 2751static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
ee959b00
TJ
2752static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2753static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2754static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
d92c0da7 2755static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
4b5e5f41 2756static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
7bb312e4 2757static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
49248644 2758static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
c07d424d 2759static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
ee959b00
TJ
2760
2761static struct device_attribute *srp_host_attrs[] = {
2762 &dev_attr_id_ext,
2763 &dev_attr_ioc_guid,
2764 &dev_attr_service_id,
2765 &dev_attr_pkey,
848b3082 2766 &dev_attr_sgid,
ee959b00
TJ
2767 &dev_attr_dgid,
2768 &dev_attr_orig_dgid,
89de7486 2769 &dev_attr_req_lim,
ee959b00
TJ
2770 &dev_attr_zero_req_lim,
2771 &dev_attr_local_ib_port,
2772 &dev_attr_local_ib_device,
d92c0da7 2773 &dev_attr_ch_count,
4b5e5f41 2774 &dev_attr_comp_vector,
7bb312e4 2775 &dev_attr_tl_retry_count,
49248644 2776 &dev_attr_cmd_sg_entries,
c07d424d 2777 &dev_attr_allow_ext_sg,
6ecb0c84
RD
2778 NULL
2779};
2780
aef9ec39
RD
2781static struct scsi_host_template srp_template = {
2782 .module = THIS_MODULE,
b7f008fd
RD
2783 .name = "InfiniBand SRP initiator",
2784 .proc_name = DRV_NAME,
c9b03c1a 2785 .slave_configure = srp_slave_configure,
aef9ec39
RD
2786 .info = srp_target_info,
2787 .queuecommand = srp_queuecommand,
71444b97 2788 .change_queue_depth = srp_change_queue_depth,
aef9ec39
RD
2789 .eh_abort_handler = srp_abort,
2790 .eh_device_reset_handler = srp_reset_device,
2791 .eh_host_reset_handler = srp_reset_host,
2742c1da 2792 .skip_settle_delay = true,
49248644 2793 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
4d73f95f 2794 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
aef9ec39 2795 .this_id = -1,
4d73f95f 2796 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
6ecb0c84 2797 .use_clustering = ENABLE_CLUSTERING,
77f2c1a4
BVA
2798 .shost_attrs = srp_host_attrs,
2799 .use_blk_tags = 1,
c40ecc12 2800 .track_queue_depth = 1,
aef9ec39
RD
2801};
2802
34aa654e
BVA
2803static int srp_sdev_count(struct Scsi_Host *host)
2804{
2805 struct scsi_device *sdev;
2806 int c = 0;
2807
2808 shost_for_each_device(sdev, host)
2809 c++;
2810
2811 return c;
2812}
2813
bc44bd1d
BVA
2814/*
2815 * Return values:
2816 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2817 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2818 * removal has been scheduled.
2819 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2820 */
aef9ec39
RD
2821static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2822{
3236822b
FT
2823 struct srp_rport_identifiers ids;
2824 struct srp_rport *rport;
2825
34aa654e 2826 target->state = SRP_TARGET_SCANNING;
aef9ec39 2827 sprintf(target->target_name, "SRP.T10:%016llX",
45c37cad 2828 be64_to_cpu(target->id_ext));
aef9ec39 2829
05321937 2830 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
aef9ec39
RD
2831 return -ENODEV;
2832
3236822b
FT
2833 memcpy(ids.port_id, &target->id_ext, 8);
2834 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
aebd5e47 2835 ids.roles = SRP_RPORT_ROLE_TARGET;
3236822b
FT
2836 rport = srp_rport_add(target->scsi_host, &ids);
2837 if (IS_ERR(rport)) {
2838 scsi_remove_host(target->scsi_host);
2839 return PTR_ERR(rport);
2840 }
2841
dc1bdbd9 2842 rport->lld_data = target;
9dd69a60 2843 target->rport = rport;
dc1bdbd9 2844
b3589fd4 2845 spin_lock(&host->target_lock);
aef9ec39 2846 list_add_tail(&target->list, &host->target_list);
b3589fd4 2847 spin_unlock(&host->target_lock);
aef9ec39 2848
aef9ec39 2849 scsi_scan_target(&target->scsi_host->shost_gendev,
1962a4a1 2850 0, target->scsi_id, SCAN_WILD_CARD, 0);
aef9ec39 2851
c014c8cd
BVA
2852 if (srp_connected_ch(target) < target->ch_count ||
2853 target->qp_in_error) {
34aa654e
BVA
2854 shost_printk(KERN_INFO, target->scsi_host,
2855 PFX "SCSI scan failed - removing SCSI host\n");
2856 srp_queue_remove_work(target);
2857 goto out;
2858 }
2859
2860 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2861 dev_name(&target->scsi_host->shost_gendev),
2862 srp_sdev_count(target->scsi_host));
2863
2864 spin_lock_irq(&target->lock);
2865 if (target->state == SRP_TARGET_SCANNING)
2866 target->state = SRP_TARGET_LIVE;
2867 spin_unlock_irq(&target->lock);
2868
2869out:
aef9ec39
RD
2870 return 0;
2871}
2872
ee959b00 2873static void srp_release_dev(struct device *dev)
aef9ec39
RD
2874{
2875 struct srp_host *host =
ee959b00 2876 container_of(dev, struct srp_host, dev);
aef9ec39
RD
2877
2878 complete(&host->released);
2879}
2880
2881static struct class srp_class = {
2882 .name = "infiniband_srp",
ee959b00 2883 .dev_release = srp_release_dev
aef9ec39
RD
2884};
2885
96fc248a
BVA
2886/**
2887 * srp_conn_unique() - check whether the connection to a target is unique
af24663b
BVA
2888 * @host: SRP host.
2889 * @target: SRP target port.
96fc248a
BVA
2890 */
2891static bool srp_conn_unique(struct srp_host *host,
2892 struct srp_target_port *target)
2893{
2894 struct srp_target_port *t;
2895 bool ret = false;
2896
2897 if (target->state == SRP_TARGET_REMOVED)
2898 goto out;
2899
2900 ret = true;
2901
2902 spin_lock(&host->target_lock);
2903 list_for_each_entry(t, &host->target_list, list) {
2904 if (t != target &&
2905 target->id_ext == t->id_ext &&
2906 target->ioc_guid == t->ioc_guid &&
2907 target->initiator_ext == t->initiator_ext) {
2908 ret = false;
2909 break;
2910 }
2911 }
2912 spin_unlock(&host->target_lock);
2913
2914out:
2915 return ret;
2916}
2917
aef9ec39
RD
2918/*
2919 * Target ports are added by writing
2920 *
2921 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2922 * pkey=<P_Key>,service_id=<service ID>
2923 *
2924 * to the add_target sysfs attribute.
2925 */
2926enum {
2927 SRP_OPT_ERR = 0,
2928 SRP_OPT_ID_EXT = 1 << 0,
2929 SRP_OPT_IOC_GUID = 1 << 1,
2930 SRP_OPT_DGID = 1 << 2,
2931 SRP_OPT_PKEY = 1 << 3,
2932 SRP_OPT_SERVICE_ID = 1 << 4,
2933 SRP_OPT_MAX_SECT = 1 << 5,
52fb2b50 2934 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
0c0450db 2935 SRP_OPT_IO_CLASS = 1 << 7,
01cb9bcb 2936 SRP_OPT_INITIATOR_EXT = 1 << 8,
49248644 2937 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
c07d424d
DD
2938 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2939 SRP_OPT_SG_TABLESIZE = 1 << 11,
4b5e5f41 2940 SRP_OPT_COMP_VECTOR = 1 << 12,
7bb312e4 2941 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
4d73f95f 2942 SRP_OPT_QUEUE_SIZE = 1 << 14,
aef9ec39
RD
2943 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2944 SRP_OPT_IOC_GUID |
2945 SRP_OPT_DGID |
2946 SRP_OPT_PKEY |
2947 SRP_OPT_SERVICE_ID),
2948};
2949
a447c093 2950static const match_table_t srp_opt_tokens = {
52fb2b50
VP
2951 { SRP_OPT_ID_EXT, "id_ext=%s" },
2952 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2953 { SRP_OPT_DGID, "dgid=%s" },
2954 { SRP_OPT_PKEY, "pkey=%x" },
2955 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2956 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2957 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
0c0450db 2958 { SRP_OPT_IO_CLASS, "io_class=%x" },
01cb9bcb 2959 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
49248644 2960 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
c07d424d
DD
2961 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2962 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
4b5e5f41 2963 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
7bb312e4 2964 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
4d73f95f 2965 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
52fb2b50 2966 { SRP_OPT_ERR, NULL }
aef9ec39
RD
2967};
2968
2969static int srp_parse_options(const char *buf, struct srp_target_port *target)
2970{
2971 char *options, *sep_opt;
2972 char *p;
2973 char dgid[3];
2974 substring_t args[MAX_OPT_ARGS];
2975 int opt_mask = 0;
2976 int token;
2977 int ret = -EINVAL;
2978 int i;
2979
2980 options = kstrdup(buf, GFP_KERNEL);
2981 if (!options)
2982 return -ENOMEM;
2983
2984 sep_opt = options;
7dcf9c19 2985 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
aef9ec39
RD
2986 if (!*p)
2987 continue;
2988
2989 token = match_token(p, srp_opt_tokens, args);
2990 opt_mask |= token;
2991
2992 switch (token) {
2993 case SRP_OPT_ID_EXT:
2994 p = match_strdup(args);
a20f3a6d
IR
2995 if (!p) {
2996 ret = -ENOMEM;
2997 goto out;
2998 }
aef9ec39
RD
2999 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3000 kfree(p);
3001 break;
3002
3003 case SRP_OPT_IOC_GUID:
3004 p = match_strdup(args);
a20f3a6d
IR
3005 if (!p) {
3006 ret = -ENOMEM;
3007 goto out;
3008 }
aef9ec39
RD
3009 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
3010 kfree(p);
3011 break;
3012
3013 case SRP_OPT_DGID:
3014 p = match_strdup(args);
a20f3a6d
IR
3015 if (!p) {
3016 ret = -ENOMEM;
3017 goto out;
3018 }
aef9ec39 3019 if (strlen(p) != 32) {
e0bda7d8 3020 pr_warn("bad dest GID parameter '%s'\n", p);
ce1823f0 3021 kfree(p);
aef9ec39
RD
3022 goto out;
3023 }
3024
3025 for (i = 0; i < 16; ++i) {
747fe000
BVA
3026 strlcpy(dgid, p + i * 2, sizeof(dgid));
3027 if (sscanf(dgid, "%hhx",
3028 &target->orig_dgid.raw[i]) < 1) {
3029 ret = -EINVAL;
3030 kfree(p);
3031 goto out;
3032 }
aef9ec39 3033 }
bf17c1c7 3034 kfree(p);
aef9ec39
RD
3035 break;
3036
3037 case SRP_OPT_PKEY:
3038 if (match_hex(args, &token)) {
e0bda7d8 3039 pr_warn("bad P_Key parameter '%s'\n", p);
aef9ec39
RD
3040 goto out;
3041 }
747fe000 3042 target->pkey = cpu_to_be16(token);
aef9ec39
RD
3043 break;
3044
3045 case SRP_OPT_SERVICE_ID:
3046 p = match_strdup(args);
a20f3a6d
IR
3047 if (!p) {
3048 ret = -ENOMEM;
3049 goto out;
3050 }
aef9ec39
RD
3051 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3052 kfree(p);
3053 break;
3054
3055 case SRP_OPT_MAX_SECT:
3056 if (match_int(args, &token)) {
e0bda7d8 3057 pr_warn("bad max sect parameter '%s'\n", p);
aef9ec39
RD
3058 goto out;
3059 }
3060 target->scsi_host->max_sectors = token;
3061 break;
3062
4d73f95f
BVA
3063 case SRP_OPT_QUEUE_SIZE:
3064 if (match_int(args, &token) || token < 1) {
3065 pr_warn("bad queue_size parameter '%s'\n", p);
3066 goto out;
3067 }
3068 target->scsi_host->can_queue = token;
3069 target->queue_size = token + SRP_RSP_SQ_SIZE +
3070 SRP_TSK_MGMT_SQ_SIZE;
3071 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3072 target->scsi_host->cmd_per_lun = token;
3073 break;
3074
52fb2b50 3075 case SRP_OPT_MAX_CMD_PER_LUN:
4d73f95f 3076 if (match_int(args, &token) || token < 1) {
e0bda7d8
BVA
3077 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3078 p);
52fb2b50
VP
3079 goto out;
3080 }
4d73f95f 3081 target->scsi_host->cmd_per_lun = token;
52fb2b50
VP
3082 break;
3083
0c0450db
R
3084 case SRP_OPT_IO_CLASS:
3085 if (match_hex(args, &token)) {
e0bda7d8 3086 pr_warn("bad IO class parameter '%s'\n", p);
0c0450db
R
3087 goto out;
3088 }
3089 if (token != SRP_REV10_IB_IO_CLASS &&
3090 token != SRP_REV16A_IB_IO_CLASS) {
e0bda7d8
BVA
3091 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3092 token, SRP_REV10_IB_IO_CLASS,
3093 SRP_REV16A_IB_IO_CLASS);
0c0450db
R
3094 goto out;
3095 }
3096 target->io_class = token;
3097 break;
3098
01cb9bcb
IR
3099 case SRP_OPT_INITIATOR_EXT:
3100 p = match_strdup(args);
a20f3a6d
IR
3101 if (!p) {
3102 ret = -ENOMEM;
3103 goto out;
3104 }
01cb9bcb
IR
3105 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3106 kfree(p);
3107 break;
3108
49248644
DD
3109 case SRP_OPT_CMD_SG_ENTRIES:
3110 if (match_int(args, &token) || token < 1 || token > 255) {
e0bda7d8
BVA
3111 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3112 p);
49248644
DD
3113 goto out;
3114 }
3115 target->cmd_sg_cnt = token;
3116 break;
3117
c07d424d
DD
3118 case SRP_OPT_ALLOW_EXT_SG:
3119 if (match_int(args, &token)) {
e0bda7d8 3120 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
c07d424d
DD
3121 goto out;
3122 }
3123 target->allow_ext_sg = !!token;
3124 break;
3125
3126 case SRP_OPT_SG_TABLESIZE:
3127 if (match_int(args, &token) || token < 1 ||
3128 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
e0bda7d8
BVA
3129 pr_warn("bad max sg_tablesize parameter '%s'\n",
3130 p);
c07d424d
DD
3131 goto out;
3132 }
3133 target->sg_tablesize = token;
3134 break;
3135
4b5e5f41
BVA
3136 case SRP_OPT_COMP_VECTOR:
3137 if (match_int(args, &token) || token < 0) {
3138 pr_warn("bad comp_vector parameter '%s'\n", p);
3139 goto out;
3140 }
3141 target->comp_vector = token;
3142 break;
3143
7bb312e4
VP
3144 case SRP_OPT_TL_RETRY_COUNT:
3145 if (match_int(args, &token) || token < 2 || token > 7) {
3146 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3147 p);
3148 goto out;
3149 }
3150 target->tl_retry_count = token;
3151 break;
3152
aef9ec39 3153 default:
e0bda7d8
BVA
3154 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3155 p);
aef9ec39
RD
3156 goto out;
3157 }
3158 }
3159
3160 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3161 ret = 0;
3162 else
3163 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3164 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3165 !(srp_opt_tokens[i].token & opt_mask))
e0bda7d8
BVA
3166 pr_warn("target creation request is missing parameter '%s'\n",
3167 srp_opt_tokens[i].pattern);
aef9ec39 3168
4d73f95f
BVA
3169 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3170 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3171 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3172 target->scsi_host->cmd_per_lun,
3173 target->scsi_host->can_queue);
3174
aef9ec39
RD
3175out:
3176 kfree(options);
3177 return ret;
3178}
3179
ee959b00
TJ
3180static ssize_t srp_create_target(struct device *dev,
3181 struct device_attribute *attr,
aef9ec39
RD
3182 const char *buf, size_t count)
3183{
3184 struct srp_host *host =
ee959b00 3185 container_of(dev, struct srp_host, dev);
aef9ec39
RD
3186 struct Scsi_Host *target_host;
3187 struct srp_target_port *target;
509c07bc 3188 struct srp_rdma_ch *ch;
d1b4289e
BVA
3189 struct srp_device *srp_dev = host->srp_dev;
3190 struct ib_device *ibdev = srp_dev->dev;
d92c0da7
BVA
3191 int ret, node_idx, node, cpu, i;
3192 bool multich = false;
aef9ec39
RD
3193
3194 target_host = scsi_host_alloc(&srp_template,
3195 sizeof (struct srp_target_port));
3196 if (!target_host)
3197 return -ENOMEM;
3198
49248644 3199 target_host->transportt = ib_srp_transport_template;
fd1b6c4a
BVA
3200 target_host->max_channel = 0;
3201 target_host->max_id = 1;
985aa495 3202 target_host->max_lun = -1LL;
3c8edf0e 3203 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
5f068992 3204
aef9ec39 3205 target = host_to_target(target_host);
aef9ec39 3206
49248644
DD
3207 target->io_class = SRP_REV16A_IB_IO_CLASS;
3208 target->scsi_host = target_host;
3209 target->srp_host = host;
e6bf5f48 3210 target->lkey = host->srp_dev->pd->local_dma_lkey;
03f6fb93 3211 target->global_mr = host->srp_dev->global_mr;
49248644 3212 target->cmd_sg_cnt = cmd_sg_entries;
c07d424d
DD
3213 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3214 target->allow_ext_sg = allow_ext_sg;
7bb312e4 3215 target->tl_retry_count = 7;
4d73f95f 3216 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
aef9ec39 3217
34aa654e
BVA
3218 /*
3219 * Avoid that the SCSI host can be removed by srp_remove_target()
3220 * before this function returns.
3221 */
3222 scsi_host_get(target->scsi_host);
3223
2d7091bc
BVA
3224 mutex_lock(&host->add_target_mutex);
3225
aef9ec39
RD
3226 ret = srp_parse_options(buf, target);
3227 if (ret)
fb49c8bb 3228 goto out;
aef9ec39 3229
77f2c1a4
BVA
3230 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3231 if (ret)
fb49c8bb 3232 goto out;
77f2c1a4 3233
4d73f95f
BVA
3234 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3235
96fc248a
BVA
3236 if (!srp_conn_unique(target->srp_host, target)) {
3237 shost_printk(KERN_INFO, target->scsi_host,
3238 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3239 be64_to_cpu(target->id_ext),
3240 be64_to_cpu(target->ioc_guid),
3241 be64_to_cpu(target->initiator_ext));
3242 ret = -EEXIST;
fb49c8bb 3243 goto out;
96fc248a
BVA
3244 }
3245
5cfb1782 3246 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
d1b4289e 3247 target->cmd_sg_cnt < target->sg_tablesize) {
5cfb1782 3248 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
c07d424d
DD
3249 target->sg_tablesize = target->cmd_sg_cnt;
3250 }
3251
3252 target_host->sg_tablesize = target->sg_tablesize;
3253 target->indirect_size = target->sg_tablesize *
3254 sizeof (struct srp_direct_buf);
49248644
DD
3255 target->max_iu_len = sizeof (struct srp_cmd) +
3256 sizeof (struct srp_indirect_buf) +
3257 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3258
c1120f89 3259 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
ef6c49d8 3260 INIT_WORK(&target->remove_work, srp_remove_work);
8f26c9ff 3261 spin_lock_init(&target->lock);
55ee3ab2 3262 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
2088ca66 3263 if (ret)
fb49c8bb 3264 goto out;
aef9ec39 3265
d92c0da7
BVA
3266 ret = -ENOMEM;
3267 target->ch_count = max_t(unsigned, num_online_nodes(),
3268 min(ch_count ? :
3269 min(4 * num_online_nodes(),
3270 ibdev->num_comp_vectors),
3271 num_online_cpus()));
3272 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3273 GFP_KERNEL);
3274 if (!target->ch)
fb49c8bb 3275 goto out;
aef9ec39 3276
d92c0da7
BVA
3277 node_idx = 0;
3278 for_each_online_node(node) {
3279 const int ch_start = (node_idx * target->ch_count /
3280 num_online_nodes());
3281 const int ch_end = ((node_idx + 1) * target->ch_count /
3282 num_online_nodes());
3283 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3284 num_online_nodes() + target->comp_vector)
3285 % ibdev->num_comp_vectors;
3286 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3287 num_online_nodes() + target->comp_vector)
3288 % ibdev->num_comp_vectors;
3289 int cpu_idx = 0;
3290
3291 for_each_online_cpu(cpu) {
3292 if (cpu_to_node(cpu) != node)
3293 continue;
3294 if (ch_start + cpu_idx >= ch_end)
3295 continue;
3296 ch = &target->ch[ch_start + cpu_idx];
3297 ch->target = target;
3298 ch->comp_vector = cv_start == cv_end ? cv_start :
3299 cv_start + cpu_idx % (cv_end - cv_start);
3300 spin_lock_init(&ch->lock);
3301 INIT_LIST_HEAD(&ch->free_tx);
3302 ret = srp_new_cm_id(ch);
3303 if (ret)
3304 goto err_disconnect;
aef9ec39 3305
d92c0da7
BVA
3306 ret = srp_create_ch_ib(ch);
3307 if (ret)
3308 goto err_disconnect;
3309
3310 ret = srp_alloc_req_data(ch);
3311 if (ret)
3312 goto err_disconnect;
3313
3314 ret = srp_connect_ch(ch, multich);
3315 if (ret) {
3316 shost_printk(KERN_ERR, target->scsi_host,
3317 PFX "Connection %d/%d failed\n",
3318 ch_start + cpu_idx,
3319 target->ch_count);
3320 if (node_idx == 0 && cpu_idx == 0) {
3321 goto err_disconnect;
3322 } else {
3323 srp_free_ch_ib(target, ch);
3324 srp_free_req_data(target, ch);
3325 target->ch_count = ch - target->ch;
c257ea6f 3326 goto connected;
d92c0da7
BVA
3327 }
3328 }
3329
3330 multich = true;
3331 cpu_idx++;
3332 }
3333 node_idx++;
aef9ec39
RD
3334 }
3335
c257ea6f 3336connected:
d92c0da7
BVA
3337 target->scsi_host->nr_hw_queues = target->ch_count;
3338
aef9ec39
RD
3339 ret = srp_add_target(host, target);
3340 if (ret)
3341 goto err_disconnect;
3342
34aa654e
BVA
3343 if (target->state != SRP_TARGET_REMOVED) {
3344 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3345 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3346 be64_to_cpu(target->id_ext),
3347 be64_to_cpu(target->ioc_guid),
747fe000 3348 be16_to_cpu(target->pkey),
34aa654e 3349 be64_to_cpu(target->service_id),
747fe000 3350 target->sgid.raw, target->orig_dgid.raw);
34aa654e 3351 }
e7ffde01 3352
2d7091bc
BVA
3353 ret = count;
3354
3355out:
3356 mutex_unlock(&host->add_target_mutex);
34aa654e
BVA
3357
3358 scsi_host_put(target->scsi_host);
bc44bd1d
BVA
3359 if (ret < 0)
3360 scsi_host_put(target->scsi_host);
34aa654e 3361
2d7091bc 3362 return ret;
aef9ec39
RD
3363
3364err_disconnect:
3365 srp_disconnect_target(target);
3366
d92c0da7
BVA
3367 for (i = 0; i < target->ch_count; i++) {
3368 ch = &target->ch[i];
3369 srp_free_ch_ib(target, ch);
3370 srp_free_req_data(target, ch);
3371 }
aef9ec39 3372
d92c0da7 3373 kfree(target->ch);
2d7091bc 3374 goto out;
aef9ec39
RD
3375}
3376
ee959b00 3377static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
aef9ec39 3378
ee959b00
TJ
3379static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3380 char *buf)
aef9ec39 3381{
ee959b00 3382 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39 3383
05321937 3384 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
aef9ec39
RD
3385}
3386
ee959b00 3387static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
aef9ec39 3388
ee959b00
TJ
3389static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3390 char *buf)
aef9ec39 3391{
ee959b00 3392 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39
RD
3393
3394 return sprintf(buf, "%d\n", host->port);
3395}
3396
ee959b00 3397static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
aef9ec39 3398
f5358a17 3399static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
aef9ec39
RD
3400{
3401 struct srp_host *host;
3402
3403 host = kzalloc(sizeof *host, GFP_KERNEL);
3404 if (!host)
3405 return NULL;
3406
3407 INIT_LIST_HEAD(&host->target_list);
b3589fd4 3408 spin_lock_init(&host->target_lock);
aef9ec39 3409 init_completion(&host->released);
2d7091bc 3410 mutex_init(&host->add_target_mutex);
05321937 3411 host->srp_dev = device;
aef9ec39
RD
3412 host->port = port;
3413
ee959b00
TJ
3414 host->dev.class = &srp_class;
3415 host->dev.parent = device->dev->dma_device;
d927e38c 3416 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
aef9ec39 3417
ee959b00 3418 if (device_register(&host->dev))
f5358a17 3419 goto free_host;
ee959b00 3420 if (device_create_file(&host->dev, &dev_attr_add_target))
aef9ec39 3421 goto err_class;
ee959b00 3422 if (device_create_file(&host->dev, &dev_attr_ibdev))
aef9ec39 3423 goto err_class;
ee959b00 3424 if (device_create_file(&host->dev, &dev_attr_port))
aef9ec39
RD
3425 goto err_class;
3426
3427 return host;
3428
3429err_class:
ee959b00 3430 device_unregister(&host->dev);
aef9ec39 3431
f5358a17 3432free_host:
aef9ec39
RD
3433 kfree(host);
3434
3435 return NULL;
3436}
3437
3438static void srp_add_one(struct ib_device *device)
3439{
f5358a17
RD
3440 struct srp_device *srp_dev;
3441 struct ib_device_attr *dev_attr;
aef9ec39 3442 struct srp_host *host;
4139032b 3443 int mr_page_shift, p;
52ede08f 3444 u64 max_pages_per_mr;
aef9ec39 3445
f5358a17
RD
3446 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3447 if (!dev_attr)
cf311cd4 3448 return;
aef9ec39 3449
f5358a17 3450 if (ib_query_device(device, dev_attr)) {
e0bda7d8 3451 pr_warn("Query device failed for %s\n", device->name);
f5358a17
RD
3452 goto free_attr;
3453 }
3454
3455 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3456 if (!srp_dev)
3457 goto free_attr;
3458
d1b4289e
BVA
3459 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3460 device->map_phys_fmr && device->unmap_fmr);
5cfb1782
BVA
3461 srp_dev->has_fr = (dev_attr->device_cap_flags &
3462 IB_DEVICE_MEM_MGT_EXTENSIONS);
3463 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3464 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3465
3466 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3467 (!srp_dev->has_fmr || prefer_fr));
002f1567 3468 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
d1b4289e 3469
f5358a17
RD
3470 /*
3471 * Use the smallest page size supported by the HCA, down to a
8f26c9ff
DD
3472 * minimum of 4096 bytes. We're unlikely to build large sglists
3473 * out of smaller entries.
f5358a17 3474 */
52ede08f
BVA
3475 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3476 srp_dev->mr_page_size = 1 << mr_page_shift;
3477 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3478 max_pages_per_mr = dev_attr->max_mr_size;
3479 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3480 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3481 max_pages_per_mr);
5cfb1782
BVA
3482 if (srp_dev->use_fast_reg) {
3483 srp_dev->max_pages_per_mr =
3484 min_t(u32, srp_dev->max_pages_per_mr,
3485 dev_attr->max_fast_reg_page_list_len);
3486 }
52ede08f
BVA
3487 srp_dev->mr_max_size = srp_dev->mr_page_size *
3488 srp_dev->max_pages_per_mr;
5cfb1782 3489 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
52ede08f 3490 device->name, mr_page_shift, dev_attr->max_mr_size,
5cfb1782 3491 dev_attr->max_fast_reg_page_list_len,
52ede08f 3492 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
f5358a17
RD
3493
3494 INIT_LIST_HEAD(&srp_dev->dev_list);
3495
3496 srp_dev->dev = device;
3497 srp_dev->pd = ib_alloc_pd(device);
3498 if (IS_ERR(srp_dev->pd))
3499 goto free_dev;
3500
03f6fb93
BVA
3501 if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) {
3502 srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
3503 IB_ACCESS_LOCAL_WRITE |
3504 IB_ACCESS_REMOTE_READ |
3505 IB_ACCESS_REMOTE_WRITE);
3506 if (IS_ERR(srp_dev->global_mr))
3507 goto err_pd;
3508 } else {
3509 srp_dev->global_mr = NULL;
3510 }
f5358a17 3511
4139032b 3512 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
f5358a17 3513 host = srp_add_port(srp_dev, p);
aef9ec39 3514 if (host)
f5358a17 3515 list_add_tail(&host->list, &srp_dev->dev_list);
aef9ec39
RD
3516 }
3517
f5358a17
RD
3518 ib_set_client_data(device, &srp_client, srp_dev);
3519
3520 goto free_attr;
3521
3522err_pd:
3523 ib_dealloc_pd(srp_dev->pd);
3524
3525free_dev:
3526 kfree(srp_dev);
3527
3528free_attr:
3529 kfree(dev_attr);
aef9ec39
RD
3530}
3531
7c1eb45a 3532static void srp_remove_one(struct ib_device *device, void *client_data)
aef9ec39 3533{
f5358a17 3534 struct srp_device *srp_dev;
aef9ec39 3535 struct srp_host *host, *tmp_host;
ef6c49d8 3536 struct srp_target_port *target;
aef9ec39 3537
7c1eb45a 3538 srp_dev = client_data;
1fe0cb84
DB
3539 if (!srp_dev)
3540 return;
aef9ec39 3541
f5358a17 3542 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
ee959b00 3543 device_unregister(&host->dev);
aef9ec39
RD
3544 /*
3545 * Wait for the sysfs entry to go away, so that no new
3546 * target ports can be created.
3547 */
3548 wait_for_completion(&host->released);
3549
3550 /*
ef6c49d8 3551 * Remove all target ports.
aef9ec39 3552 */
b3589fd4 3553 spin_lock(&host->target_lock);
ef6c49d8
BVA
3554 list_for_each_entry(target, &host->target_list, list)
3555 srp_queue_remove_work(target);
b3589fd4 3556 spin_unlock(&host->target_lock);
aef9ec39
RD
3557
3558 /*
bcc05910 3559 * Wait for tl_err and target port removal tasks.
aef9ec39 3560 */
ef6c49d8 3561 flush_workqueue(system_long_wq);
bcc05910 3562 flush_workqueue(srp_remove_wq);
aef9ec39 3563
aef9ec39
RD
3564 kfree(host);
3565 }
3566
03f6fb93
BVA
3567 if (srp_dev->global_mr)
3568 ib_dereg_mr(srp_dev->global_mr);
f5358a17
RD
3569 ib_dealloc_pd(srp_dev->pd);
3570
3571 kfree(srp_dev);
aef9ec39
RD
3572}
3573
3236822b 3574static struct srp_function_template ib_srp_transport_functions = {
ed9b2264
BVA
3575 .has_rport_state = true,
3576 .reset_timer_if_blocked = true,
a95cadb9 3577 .reconnect_delay = &srp_reconnect_delay,
ed9b2264
BVA
3578 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3579 .dev_loss_tmo = &srp_dev_loss_tmo,
3580 .reconnect = srp_rport_reconnect,
dc1bdbd9 3581 .rport_delete = srp_rport_delete,
ed9b2264 3582 .terminate_rport_io = srp_terminate_io,
3236822b
FT
3583};
3584
aef9ec39
RD
3585static int __init srp_init_module(void)
3586{
3587 int ret;
3588
dcb4cb85 3589 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
dd5e6e38 3590
49248644 3591 if (srp_sg_tablesize) {
e0bda7d8 3592 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
49248644
DD
3593 if (!cmd_sg_entries)
3594 cmd_sg_entries = srp_sg_tablesize;
3595 }
3596
3597 if (!cmd_sg_entries)
3598 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3599
3600 if (cmd_sg_entries > 255) {
e0bda7d8 3601 pr_warn("Clamping cmd_sg_entries to 255\n");
49248644 3602 cmd_sg_entries = 255;
1e89a194
DD
3603 }
3604
c07d424d
DD
3605 if (!indirect_sg_entries)
3606 indirect_sg_entries = cmd_sg_entries;
3607 else if (indirect_sg_entries < cmd_sg_entries) {
e0bda7d8
BVA
3608 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3609 cmd_sg_entries);
c07d424d
DD
3610 indirect_sg_entries = cmd_sg_entries;
3611 }
3612
bcc05910 3613 srp_remove_wq = create_workqueue("srp_remove");
da05be29
WY
3614 if (!srp_remove_wq) {
3615 ret = -ENOMEM;
bcc05910
BVA
3616 goto out;
3617 }
3618
3619 ret = -ENOMEM;
3236822b
FT
3620 ib_srp_transport_template =
3621 srp_attach_transport(&ib_srp_transport_functions);
3622 if (!ib_srp_transport_template)
bcc05910 3623 goto destroy_wq;
3236822b 3624
aef9ec39
RD
3625 ret = class_register(&srp_class);
3626 if (ret) {
e0bda7d8 3627 pr_err("couldn't register class infiniband_srp\n");
bcc05910 3628 goto release_tr;
aef9ec39
RD
3629 }
3630
c1a0b23b
MT
3631 ib_sa_register_client(&srp_sa_client);
3632
aef9ec39
RD
3633 ret = ib_register_client(&srp_client);
3634 if (ret) {
e0bda7d8 3635 pr_err("couldn't register IB client\n");
bcc05910 3636 goto unreg_sa;
aef9ec39
RD
3637 }
3638
bcc05910
BVA
3639out:
3640 return ret;
3641
3642unreg_sa:
3643 ib_sa_unregister_client(&srp_sa_client);
3644 class_unregister(&srp_class);
3645
3646release_tr:
3647 srp_release_transport(ib_srp_transport_template);
3648
3649destroy_wq:
3650 destroy_workqueue(srp_remove_wq);
3651 goto out;
aef9ec39
RD
3652}
3653
3654static void __exit srp_cleanup_module(void)
3655{
3656 ib_unregister_client(&srp_client);
c1a0b23b 3657 ib_sa_unregister_client(&srp_sa_client);
aef9ec39 3658 class_unregister(&srp_class);
3236822b 3659 srp_release_transport(ib_srp_transport_template);
bcc05910 3660 destroy_workqueue(srp_remove_wq);
aef9ec39
RD
3661}
3662
3663module_init(srp_init_module);
3664module_exit(srp_cleanup_module);