]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - drivers/infiniband/ulp/srp/ib_srp.c
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / drivers / infiniband / ulp / srp / ib_srp.c
CommitLineData
aef9ec39
RD
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
aef9ec39
RD
31 */
32
d236cd0e 33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
e0bda7d8 34
aef9ec39
RD
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
de25968c 42#include <linux/jiffies.h>
93c76dbb 43#include <linux/lockdep.h>
19f31343 44#include <linux/inet.h>
56b5390c 45#include <rdma/ib_cache.h>
aef9ec39 46
60063497 47#include <linux/atomic.h>
aef9ec39
RD
48
49#include <scsi/scsi.h>
50#include <scsi/scsi_device.h>
51#include <scsi/scsi_dbg.h>
71444b97 52#include <scsi/scsi_tcq.h>
aef9ec39 53#include <scsi/srp.h>
3236822b 54#include <scsi/scsi_transport_srp.h>
aef9ec39 55
aef9ec39
RD
56#include "ib_srp.h"
57
58#define DRV_NAME "ib_srp"
59#define PFX DRV_NAME ": "
aef9ec39
RD
60
61MODULE_AUTHOR("Roland Dreier");
33ab3e5b 62MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
aef9ec39
RD
63MODULE_LICENSE("Dual BSD/GPL");
64
1a1faf7a
BVA
65#if !defined(CONFIG_DYNAMIC_DEBUG)
66#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
67#define DYNAMIC_DEBUG_BRANCH(descriptor) false
68#endif
69
49248644
DD
70static unsigned int srp_sg_tablesize;
71static unsigned int cmd_sg_entries;
c07d424d
DD
72static unsigned int indirect_sg_entries;
73static bool allow_ext_sg;
03f6fb93
BVA
74static bool prefer_fr = true;
75static bool register_always = true;
c222a39f 76static bool never_register;
49248644 77static int topspin_workarounds = 1;
74b0a15b 78
49248644
DD
79module_param(srp_sg_tablesize, uint, 0444);
80MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
74b0a15b 81
49248644
DD
82module_param(cmd_sg_entries, uint, 0444);
83MODULE_PARM_DESC(cmd_sg_entries,
84 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
aef9ec39 85
c07d424d
DD
86module_param(indirect_sg_entries, uint, 0444);
87MODULE_PARM_DESC(indirect_sg_entries,
65e8617f 88 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
c07d424d
DD
89
90module_param(allow_ext_sg, bool, 0444);
91MODULE_PARM_DESC(allow_ext_sg,
92 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
93
aef9ec39
RD
94module_param(topspin_workarounds, int, 0444);
95MODULE_PARM_DESC(topspin_workarounds,
96 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
97
5cfb1782
BVA
98module_param(prefer_fr, bool, 0444);
99MODULE_PARM_DESC(prefer_fr,
100"Whether to use fast registration if both FMR and fast registration are supported");
101
b1b8854d
BVA
102module_param(register_always, bool, 0444);
103MODULE_PARM_DESC(register_always,
104 "Use memory registration even for contiguous memory regions");
105
c222a39f
BVA
106module_param(never_register, bool, 0444);
107MODULE_PARM_DESC(never_register, "Never register memory");
108
9c27847d 109static const struct kernel_param_ops srp_tmo_ops;
ed9b2264 110
a95cadb9
BVA
111static int srp_reconnect_delay = 10;
112module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
113 S_IRUGO | S_IWUSR);
114MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
115
ed9b2264
BVA
116static int srp_fast_io_fail_tmo = 15;
117module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
118 S_IRUGO | S_IWUSR);
119MODULE_PARM_DESC(fast_io_fail_tmo,
120 "Number of seconds between the observation of a transport"
121 " layer error and failing all I/O. \"off\" means that this"
122 " functionality is disabled.");
123
a95cadb9 124static int srp_dev_loss_tmo = 600;
ed9b2264
BVA
125module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
126 S_IRUGO | S_IWUSR);
127MODULE_PARM_DESC(dev_loss_tmo,
128 "Maximum number of seconds that the SRP transport should"
129 " insulate transport layer errors. After this time has been"
130 " exceeded the SCSI host is removed. Should be"
131 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
132 " if fast_io_fail_tmo has not been set. \"off\" means that"
133 " this functionality is disabled.");
134
882981f4
BVA
135static bool srp_use_imm_data = true;
136module_param_named(use_imm_data, srp_use_imm_data, bool, 0644);
137MODULE_PARM_DESC(use_imm_data,
138 "Whether or not to request permission to use immediate data during SRP login.");
139
140static unsigned int srp_max_imm_data = 8 * 1024;
141module_param_named(max_imm_data, srp_max_imm_data, uint, 0644);
142MODULE_PARM_DESC(max_imm_data, "Maximum immediate data size.");
143
d92c0da7
BVA
144static unsigned ch_count;
145module_param(ch_count, uint, 0444);
146MODULE_PARM_DESC(ch_count,
147 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
148
aef9ec39 149static void srp_add_one(struct ib_device *device);
7c1eb45a 150static void srp_remove_one(struct ib_device *device, void *client_data);
1dc7b1f1
CH
151static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
152static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
153 const char *opname);
e7ff98ae
PP
154static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
155 const struct ib_cm_event *event);
19f31343
BVA
156static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
157 struct rdma_cm_event *event);
aef9ec39 158
3236822b 159static struct scsi_transport_template *ib_srp_transport_template;
bcc05910 160static struct workqueue_struct *srp_remove_wq;
3236822b 161
aef9ec39
RD
162static struct ib_client srp_client = {
163 .name = "srp",
164 .add = srp_add_one,
165 .remove = srp_remove_one
166};
167
c1a0b23b
MT
168static struct ib_sa_client srp_sa_client;
169
ed9b2264
BVA
170static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
171{
172 int tmo = *(int *)kp->arg;
173
174 if (tmo >= 0)
175 return sprintf(buffer, "%d", tmo);
176 else
177 return sprintf(buffer, "off");
178}
179
180static int srp_tmo_set(const char *val, const struct kernel_param *kp)
181{
182 int tmo, res;
183
3fdf70ac
SG
184 res = srp_parse_tmo(&tmo, val);
185 if (res)
186 goto out;
187
a95cadb9
BVA
188 if (kp->arg == &srp_reconnect_delay)
189 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
190 srp_dev_loss_tmo);
191 else if (kp->arg == &srp_fast_io_fail_tmo)
192 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
ed9b2264 193 else
a95cadb9
BVA
194 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
195 tmo);
ed9b2264
BVA
196 if (res)
197 goto out;
198 *(int *)kp->arg = tmo;
199
200out:
201 return res;
202}
203
9c27847d 204static const struct kernel_param_ops srp_tmo_ops = {
ed9b2264
BVA
205 .get = srp_tmo_get,
206 .set = srp_tmo_set,
207};
208
aef9ec39
RD
209static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
210{
211 return (struct srp_target_port *) host->hostdata;
212}
213
214static const char *srp_target_info(struct Scsi_Host *host)
215{
216 return host_to_target(host)->target_name;
217}
218
5d7cbfd6
RD
219static int srp_target_is_topspin(struct srp_target_port *target)
220{
221 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
3d1ff48d 222 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
5d7cbfd6
RD
223
224 return topspin_workarounds &&
3d1ff48d
RK
225 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
226 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
5d7cbfd6
RD
227}
228
aef9ec39
RD
229static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
230 gfp_t gfp_mask,
231 enum dma_data_direction direction)
232{
233 struct srp_iu *iu;
234
235 iu = kmalloc(sizeof *iu, gfp_mask);
236 if (!iu)
237 goto out;
238
239 iu->buf = kzalloc(size, gfp_mask);
240 if (!iu->buf)
241 goto out_free_iu;
242
05321937
GKH
243 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
244 direction);
245 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
aef9ec39
RD
246 goto out_free_buf;
247
248 iu->size = size;
249 iu->direction = direction;
250
251 return iu;
252
253out_free_buf:
254 kfree(iu->buf);
255out_free_iu:
256 kfree(iu);
257out:
258 return NULL;
259}
260
261static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
262{
263 if (!iu)
264 return;
265
05321937
GKH
266 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
267 iu->direction);
aef9ec39
RD
268 kfree(iu->buf);
269 kfree(iu);
270}
271
272static void srp_qp_event(struct ib_event *event, void *context)
273{
57363d98
SG
274 pr_debug("QP event %s (%d)\n",
275 ib_event_msg(event->event), event->event);
aef9ec39
RD
276}
277
19f31343
BVA
278static int srp_init_ib_qp(struct srp_target_port *target,
279 struct ib_qp *qp)
aef9ec39
RD
280{
281 struct ib_qp_attr *attr;
282 int ret;
283
284 attr = kmalloc(sizeof *attr, GFP_KERNEL);
285 if (!attr)
286 return -ENOMEM;
287
56b5390c
BVA
288 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
289 target->srp_host->port,
19f31343 290 be16_to_cpu(target->ib_cm.pkey),
56b5390c 291 &attr->pkey_index);
aef9ec39
RD
292 if (ret)
293 goto out;
294
295 attr->qp_state = IB_QPS_INIT;
296 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
297 IB_ACCESS_REMOTE_WRITE);
298 attr->port_num = target->srp_host->port;
299
300 ret = ib_modify_qp(qp, attr,
301 IB_QP_STATE |
302 IB_QP_PKEY_INDEX |
303 IB_QP_ACCESS_FLAGS |
304 IB_QP_PORT);
305
306out:
307 kfree(attr);
308 return ret;
309}
310
19f31343 311static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
9fe4bcf4 312{
509c07bc 313 struct srp_target_port *target = ch->target;
9fe4bcf4
DD
314 struct ib_cm_id *new_cm_id;
315
05321937 316 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
19f31343 317 srp_ib_cm_handler, ch);
9fe4bcf4
DD
318 if (IS_ERR(new_cm_id))
319 return PTR_ERR(new_cm_id);
320
19f31343
BVA
321 if (ch->ib_cm.cm_id)
322 ib_destroy_cm_id(ch->ib_cm.cm_id);
323 ch->ib_cm.cm_id = new_cm_id;
4c33bd19
DC
324 if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
325 target->srp_host->port))
19f31343 326 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA;
4c33bd19 327 else
19f31343
BVA
328 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB;
329 ch->ib_cm.path.sgid = target->sgid;
330 ch->ib_cm.path.dgid = target->ib_cm.orig_dgid;
331 ch->ib_cm.path.pkey = target->ib_cm.pkey;
332 ch->ib_cm.path.service_id = target->ib_cm.service_id;
9fe4bcf4
DD
333
334 return 0;
335}
336
19f31343
BVA
337static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
338{
339 struct srp_target_port *target = ch->target;
340 struct rdma_cm_id *new_cm_id;
19f31343
BVA
341 int ret;
342
343 new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch,
344 RDMA_PS_TCP, IB_QPT_RC);
345 if (IS_ERR(new_cm_id)) {
346 ret = PTR_ERR(new_cm_id);
347 new_cm_id = NULL;
348 goto out;
349 }
350
351 init_completion(&ch->done);
352 ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
353 (struct sockaddr *)&target->rdma_cm.src : NULL,
354 (struct sockaddr *)&target->rdma_cm.dst,
355 SRP_PATH_REC_TIMEOUT_MS);
356 if (ret) {
7da09af9
BVA
357 pr_err("No route available from %pIS to %pIS (%d)\n",
358 &target->rdma_cm.src, &target->rdma_cm.dst, ret);
19f31343
BVA
359 goto out;
360 }
361 ret = wait_for_completion_interruptible(&ch->done);
362 if (ret < 0)
363 goto out;
364
365 ret = ch->status;
366 if (ret) {
7da09af9
BVA
367 pr_err("Resolving address %pIS failed (%d)\n",
368 &target->rdma_cm.dst, ret);
19f31343
BVA
369 goto out;
370 }
371
372 swap(ch->rdma_cm.cm_id, new_cm_id);
373
374out:
375 if (new_cm_id)
376 rdma_destroy_id(new_cm_id);
377
378 return ret;
379}
380
381static int srp_new_cm_id(struct srp_rdma_ch *ch)
382{
383 struct srp_target_port *target = ch->target;
384
385 return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
386 srp_new_ib_cm_id(ch);
387}
388
d1b4289e
BVA
389static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
390{
391 struct srp_device *dev = target->srp_host->srp_dev;
392 struct ib_fmr_pool_param fmr_param;
393
394 memset(&fmr_param, 0, sizeof(fmr_param));
fa9863f8 395 fmr_param.pool_size = target->mr_pool_size;
d1b4289e
BVA
396 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
397 fmr_param.cache = 1;
52ede08f
BVA
398 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
399 fmr_param.page_shift = ilog2(dev->mr_page_size);
d1b4289e
BVA
400 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
401 IB_ACCESS_REMOTE_WRITE |
402 IB_ACCESS_REMOTE_READ);
403
404 return ib_create_fmr_pool(dev->pd, &fmr_param);
405}
406
5cfb1782
BVA
407/**
408 * srp_destroy_fr_pool() - free the resources owned by a pool
409 * @pool: Fast registration pool to be destroyed.
410 */
411static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
412{
413 int i;
414 struct srp_fr_desc *d;
415
416 if (!pool)
417 return;
418
419 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
5cfb1782
BVA
420 if (d->mr)
421 ib_dereg_mr(d->mr);
422 }
423 kfree(pool);
424}
425
426/**
427 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
428 * @device: IB device to allocate fast registration descriptors for.
429 * @pd: Protection domain associated with the FR descriptors.
430 * @pool_size: Number of descriptors to allocate.
431 * @max_page_list_len: Maximum fast registration work request page list length.
432 */
433static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
434 struct ib_pd *pd, int pool_size,
435 int max_page_list_len)
436{
437 struct srp_fr_pool *pool;
438 struct srp_fr_desc *d;
439 struct ib_mr *mr;
5cfb1782 440 int i, ret = -EINVAL;
fbd36818 441 enum ib_mr_type mr_type;
5cfb1782
BVA
442
443 if (pool_size <= 0)
444 goto err;
445 ret = -ENOMEM;
7a7b0fea 446 pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL);
5cfb1782
BVA
447 if (!pool)
448 goto err;
449 pool->size = pool_size;
450 pool->max_page_list_len = max_page_list_len;
451 spin_lock_init(&pool->lock);
452 INIT_LIST_HEAD(&pool->free_list);
453
fbd36818
SG
454 if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
455 mr_type = IB_MR_TYPE_SG_GAPS;
456 else
457 mr_type = IB_MR_TYPE_MEM_REG;
458
5cfb1782 459 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
fbd36818 460 mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
5cfb1782
BVA
461 if (IS_ERR(mr)) {
462 ret = PTR_ERR(mr);
3787d990
BVA
463 if (ret == -ENOMEM)
464 pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
465 dev_name(&device->dev));
5cfb1782
BVA
466 goto destroy_pool;
467 }
468 d->mr = mr;
5cfb1782
BVA
469 list_add_tail(&d->entry, &pool->free_list);
470 }
471
472out:
473 return pool;
474
475destroy_pool:
476 srp_destroy_fr_pool(pool);
477
478err:
479 pool = ERR_PTR(ret);
480 goto out;
481}
482
483/**
484 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
485 * @pool: Pool to obtain descriptor from.
486 */
487static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
488{
489 struct srp_fr_desc *d = NULL;
490 unsigned long flags;
491
492 spin_lock_irqsave(&pool->lock, flags);
493 if (!list_empty(&pool->free_list)) {
494 d = list_first_entry(&pool->free_list, typeof(*d), entry);
495 list_del(&d->entry);
496 }
497 spin_unlock_irqrestore(&pool->lock, flags);
498
499 return d;
500}
501
502/**
503 * srp_fr_pool_put() - put an FR descriptor back in the free list
504 * @pool: Pool the descriptor was allocated from.
505 * @desc: Pointer to an array of fast registration descriptor pointers.
506 * @n: Number of descriptors to put back.
507 *
508 * Note: The caller must already have queued an invalidation request for
509 * desc->mr->rkey before calling this function.
510 */
511static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
512 int n)
513{
514 unsigned long flags;
515 int i;
516
517 spin_lock_irqsave(&pool->lock, flags);
518 for (i = 0; i < n; i++)
519 list_add(&desc[i]->entry, &pool->free_list);
520 spin_unlock_irqrestore(&pool->lock, flags);
521}
522
523static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
524{
525 struct srp_device *dev = target->srp_host->srp_dev;
526
fa9863f8 527 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
5cfb1782
BVA
528 dev->max_pages_per_mr);
529}
530
7dad6b2e
BVA
531/**
532 * srp_destroy_qp() - destroy an RDMA queue pair
9566b054 533 * @ch: SRP RDMA channel.
7dad6b2e 534 *
561392d4
SW
535 * Drain the qp before destroying it. This avoids that the receive
536 * completion handler can access the queue pair while it is
7dad6b2e
BVA
537 * being destroyed.
538 */
9566b054 539static void srp_destroy_qp(struct srp_rdma_ch *ch)
7dad6b2e 540{
9294000d
BVA
541 spin_lock_irq(&ch->lock);
542 ib_process_cq_direct(ch->send_cq, -1);
543 spin_unlock_irq(&ch->lock);
544
9566b054
BVA
545 ib_drain_qp(ch->qp);
546 ib_destroy_qp(ch->qp);
7dad6b2e
BVA
547}
548
509c07bc 549static int srp_create_ch_ib(struct srp_rdma_ch *ch)
aef9ec39 550{
509c07bc 551 struct srp_target_port *target = ch->target;
62154b2e 552 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39 553 struct ib_qp_init_attr *init_attr;
73aa89ed
IR
554 struct ib_cq *recv_cq, *send_cq;
555 struct ib_qp *qp;
d1b4289e 556 struct ib_fmr_pool *fmr_pool = NULL;
5cfb1782 557 struct srp_fr_pool *fr_pool = NULL;
509c5f33 558 const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
aef9ec39
RD
559 int ret;
560
561 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
562 if (!init_attr)
563 return -ENOMEM;
564
561392d4 565 /* queue_size + 1 for ib_drain_rq() */
1dc7b1f1
CH
566 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
567 ch->comp_vector, IB_POLL_SOFTIRQ);
73aa89ed
IR
568 if (IS_ERR(recv_cq)) {
569 ret = PTR_ERR(recv_cq);
da9d2f07 570 goto err;
aef9ec39
RD
571 }
572
1dc7b1f1
CH
573 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
574 ch->comp_vector, IB_POLL_DIRECT);
73aa89ed
IR
575 if (IS_ERR(send_cq)) {
576 ret = PTR_ERR(send_cq);
da9d2f07 577 goto err_recv_cq;
9c03dc9f
BVA
578 }
579
aef9ec39 580 init_attr->event_handler = srp_qp_event;
5cfb1782 581 init_attr->cap.max_send_wr = m * target->queue_size;
7dad6b2e 582 init_attr->cap.max_recv_wr = target->queue_size + 1;
aef9ec39 583 init_attr->cap.max_recv_sge = 1;
882981f4 584 init_attr->cap.max_send_sge = SRP_MAX_SGE;
5cfb1782 585 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
aef9ec39 586 init_attr->qp_type = IB_QPT_RC;
73aa89ed
IR
587 init_attr->send_cq = send_cq;
588 init_attr->recv_cq = recv_cq;
aef9ec39 589
19f31343
BVA
590 if (target->using_rdma_cm) {
591 ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
592 qp = ch->rdma_cm.cm_id->qp;
593 } else {
594 qp = ib_create_qp(dev->pd, init_attr);
595 if (!IS_ERR(qp)) {
596 ret = srp_init_ib_qp(target, qp);
597 if (ret)
598 ib_destroy_qp(qp);
599 } else {
600 ret = PTR_ERR(qp);
601 }
602 }
603 if (ret) {
604 pr_err("QP creation failed for dev %s: %d\n",
605 dev_name(&dev->dev->dev), ret);
da9d2f07 606 goto err_send_cq;
aef9ec39
RD
607 }
608
002f1567 609 if (dev->use_fast_reg) {
5cfb1782
BVA
610 fr_pool = srp_alloc_fr_pool(target);
611 if (IS_ERR(fr_pool)) {
612 ret = PTR_ERR(fr_pool);
613 shost_printk(KERN_WARNING, target->scsi_host, PFX
614 "FR pool allocation failed (%d)\n", ret);
615 goto err_qp;
616 }
002f1567 617 } else if (dev->use_fmr) {
d1b4289e
BVA
618 fmr_pool = srp_alloc_fmr_pool(target);
619 if (IS_ERR(fmr_pool)) {
620 ret = PTR_ERR(fmr_pool);
621 shost_printk(KERN_WARNING, target->scsi_host, PFX
622 "FMR pool allocation failed (%d)\n", ret);
623 goto err_qp;
624 }
d1b4289e
BVA
625 }
626
509c07bc 627 if (ch->qp)
9566b054 628 srp_destroy_qp(ch);
509c07bc 629 if (ch->recv_cq)
1dc7b1f1 630 ib_free_cq(ch->recv_cq);
509c07bc 631 if (ch->send_cq)
1dc7b1f1 632 ib_free_cq(ch->send_cq);
73aa89ed 633
509c07bc
BVA
634 ch->qp = qp;
635 ch->recv_cq = recv_cq;
636 ch->send_cq = send_cq;
73aa89ed 637
7fbc67df
SG
638 if (dev->use_fast_reg) {
639 if (ch->fr_pool)
640 srp_destroy_fr_pool(ch->fr_pool);
641 ch->fr_pool = fr_pool;
642 } else if (dev->use_fmr) {
643 if (ch->fmr_pool)
644 ib_destroy_fmr_pool(ch->fmr_pool);
645 ch->fmr_pool = fmr_pool;
646 }
647
da9d2f07
RD
648 kfree(init_attr);
649 return 0;
650
651err_qp:
19f31343
BVA
652 if (target->using_rdma_cm)
653 rdma_destroy_qp(ch->rdma_cm.cm_id);
654 else
655 ib_destroy_qp(qp);
da9d2f07
RD
656
657err_send_cq:
1dc7b1f1 658 ib_free_cq(send_cq);
da9d2f07
RD
659
660err_recv_cq:
1dc7b1f1 661 ib_free_cq(recv_cq);
da9d2f07
RD
662
663err:
aef9ec39
RD
664 kfree(init_attr);
665 return ret;
666}
667
4d73f95f
BVA
668/*
669 * Note: this function may be called without srp_alloc_iu_bufs() having been
509c07bc 670 * invoked. Hence the ch->[rt]x_ring checks.
4d73f95f 671 */
509c07bc
BVA
672static void srp_free_ch_ib(struct srp_target_port *target,
673 struct srp_rdma_ch *ch)
aef9ec39 674{
5cfb1782 675 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39
RD
676 int i;
677
d92c0da7
BVA
678 if (!ch->target)
679 return;
680
19f31343
BVA
681 if (target->using_rdma_cm) {
682 if (ch->rdma_cm.cm_id) {
683 rdma_destroy_id(ch->rdma_cm.cm_id);
684 ch->rdma_cm.cm_id = NULL;
685 }
686 } else {
687 if (ch->ib_cm.cm_id) {
688 ib_destroy_cm_id(ch->ib_cm.cm_id);
689 ch->ib_cm.cm_id = NULL;
690 }
394c595e
BVA
691 }
692
d92c0da7
BVA
693 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
694 if (!ch->qp)
695 return;
696
5cfb1782 697 if (dev->use_fast_reg) {
509c07bc
BVA
698 if (ch->fr_pool)
699 srp_destroy_fr_pool(ch->fr_pool);
002f1567 700 } else if (dev->use_fmr) {
509c07bc
BVA
701 if (ch->fmr_pool)
702 ib_destroy_fmr_pool(ch->fmr_pool);
5cfb1782 703 }
1dc7b1f1 704
9566b054 705 srp_destroy_qp(ch);
1dc7b1f1
CH
706 ib_free_cq(ch->send_cq);
707 ib_free_cq(ch->recv_cq);
aef9ec39 708
d92c0da7
BVA
709 /*
710 * Avoid that the SCSI error handler tries to use this channel after
711 * it has been freed. The SCSI error handler can namely continue
712 * trying to perform recovery actions after scsi_remove_host()
713 * returned.
714 */
715 ch->target = NULL;
716
509c07bc
BVA
717 ch->qp = NULL;
718 ch->send_cq = ch->recv_cq = NULL;
73aa89ed 719
509c07bc 720 if (ch->rx_ring) {
4d73f95f 721 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
722 srp_free_iu(target->srp_host, ch->rx_ring[i]);
723 kfree(ch->rx_ring);
724 ch->rx_ring = NULL;
4d73f95f 725 }
509c07bc 726 if (ch->tx_ring) {
4d73f95f 727 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
728 srp_free_iu(target->srp_host, ch->tx_ring[i]);
729 kfree(ch->tx_ring);
730 ch->tx_ring = NULL;
4d73f95f 731 }
aef9ec39
RD
732}
733
734static void srp_path_rec_completion(int status,
c2f8fc4e 735 struct sa_path_rec *pathrec,
509c07bc 736 void *ch_ptr)
aef9ec39 737{
509c07bc
BVA
738 struct srp_rdma_ch *ch = ch_ptr;
739 struct srp_target_port *target = ch->target;
aef9ec39 740
509c07bc 741 ch->status = status;
aef9ec39 742 if (status)
7aa54bd7
DD
743 shost_printk(KERN_ERR, target->scsi_host,
744 PFX "Got failed path rec status %d\n", status);
aef9ec39 745 else
19f31343 746 ch->ib_cm.path = *pathrec;
509c07bc 747 complete(&ch->done);
aef9ec39
RD
748}
749
19f31343 750static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
aef9ec39 751{
509c07bc 752 struct srp_target_port *target = ch->target;
c74ff750 753 int ret;
a702adce 754
19f31343 755 ch->ib_cm.path.numb_path = 1;
509c07bc
BVA
756
757 init_completion(&ch->done);
758
19f31343 759 ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client,
509c07bc
BVA
760 target->srp_host->srp_dev->dev,
761 target->srp_host->port,
19f31343 762 &ch->ib_cm.path,
509c07bc
BVA
763 IB_SA_PATH_REC_SERVICE_ID |
764 IB_SA_PATH_REC_DGID |
765 IB_SA_PATH_REC_SGID |
766 IB_SA_PATH_REC_NUMB_PATH |
767 IB_SA_PATH_REC_PKEY,
768 SRP_PATH_REC_TIMEOUT_MS,
769 GFP_KERNEL,
770 srp_path_rec_completion,
19f31343 771 ch, &ch->ib_cm.path_query);
c74ff750
BVA
772 if (ch->ib_cm.path_query_id < 0)
773 return ch->ib_cm.path_query_id;
509c07bc
BVA
774
775 ret = wait_for_completion_interruptible(&ch->done);
a702adce 776 if (ret < 0)
c74ff750 777 return ret;
aef9ec39 778
c74ff750 779 if (ch->status < 0)
7aa54bd7 780 shost_printk(KERN_WARNING, target->scsi_host,
85769c6f 781 PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n",
19f31343
BVA
782 ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw,
783 be16_to_cpu(target->ib_cm.pkey),
784 be64_to_cpu(target->ib_cm.service_id));
aef9ec39 785
c74ff750 786 return ch->status;
aef9ec39
RD
787}
788
19f31343
BVA
789static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
790{
791 struct srp_target_port *target = ch->target;
792 int ret;
793
794 init_completion(&ch->done);
795
796 ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS);
797 if (ret)
798 return ret;
799
800 wait_for_completion_interruptible(&ch->done);
801
802 if (ch->status != 0)
803 shost_printk(KERN_WARNING, target->scsi_host,
804 PFX "Path resolution failed\n");
805
806 return ch->status;
807}
808
809static int srp_lookup_path(struct srp_rdma_ch *ch)
810{
811 struct srp_target_port *target = ch->target;
812
813 return target->using_rdma_cm ? srp_rdma_lookup_path(ch) :
814 srp_ib_lookup_path(ch);
815}
816
4c532d6c
BVA
817static u8 srp_get_subnet_timeout(struct srp_host *host)
818{
819 struct ib_port_attr attr;
820 int ret;
821 u8 subnet_timeout = 18;
822
823 ret = ib_query_port(host->srp_dev->dev, host->port, &attr);
824 if (ret == 0)
825 subnet_timeout = attr.subnet_timeout;
826
827 if (unlikely(subnet_timeout < 15))
828 pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n",
829 dev_name(&host->srp_dev->dev->dev), subnet_timeout);
830
831 return subnet_timeout;
832}
833
513d5647
BVA
834static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len,
835 bool multich)
aef9ec39 836{
509c07bc 837 struct srp_target_port *target = ch->target;
aef9ec39 838 struct {
19f31343
BVA
839 struct rdma_conn_param rdma_param;
840 struct srp_login_req_rdma rdma_req;
841 struct ib_cm_req_param ib_param;
842 struct srp_login_req ib_req;
aef9ec39 843 } *req = NULL;
48900a28 844 char *ipi, *tpi;
aef9ec39
RD
845 int status;
846
847 req = kzalloc(sizeof *req, GFP_KERNEL);
848 if (!req)
849 return -ENOMEM;
850
19f31343
BVA
851 req->ib_param.flow_control = 1;
852 req->ib_param.retry_count = target->tl_retry_count;
aef9ec39
RD
853
854 /*
855 * Pick some arbitrary defaults here; we could make these
856 * module parameters if anyone cared about setting them.
857 */
19f31343
BVA
858 req->ib_param.responder_resources = 4;
859 req->ib_param.rnr_retry_count = 7;
860 req->ib_param.max_cm_retries = 15;
861
862 req->ib_req.opcode = SRP_LOGIN_REQ;
863 req->ib_req.tag = 0;
513d5647 864 req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len);
19f31343 865 req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
aef9ec39 866 SRP_BUF_FORMAT_INDIRECT);
19f31343
BVA
867 req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
868 SRP_MULTICHAN_SINGLE);
882981f4
BVA
869 if (srp_use_imm_data) {
870 req->ib_req.req_flags |= SRP_IMMED_REQUESTED;
871 req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET);
872 }
19f31343
BVA
873
874 if (target->using_rdma_cm) {
875 req->rdma_param.flow_control = req->ib_param.flow_control;
876 req->rdma_param.responder_resources =
877 req->ib_param.responder_resources;
878 req->rdma_param.initiator_depth = req->ib_param.initiator_depth;
879 req->rdma_param.retry_count = req->ib_param.retry_count;
880 req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count;
881 req->rdma_param.private_data = &req->rdma_req;
882 req->rdma_param.private_data_len = sizeof(req->rdma_req);
883
884 req->rdma_req.opcode = req->ib_req.opcode;
885 req->rdma_req.tag = req->ib_req.tag;
886 req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
887 req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
888 req->rdma_req.req_flags = req->ib_req.req_flags;
882981f4 889 req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset;
19f31343
BVA
890
891 ipi = req->rdma_req.initiator_port_id;
892 tpi = req->rdma_req.target_port_id;
893 } else {
48900a28
BVA
894 u8 subnet_timeout;
895
896 subnet_timeout = srp_get_subnet_timeout(target->srp_host);
897
19f31343
BVA
898 req->ib_param.primary_path = &ch->ib_cm.path;
899 req->ib_param.alternate_path = NULL;
900 req->ib_param.service_id = target->ib_cm.service_id;
901 get_random_bytes(&req->ib_param.starting_psn, 4);
902 req->ib_param.starting_psn &= 0xffffff;
903 req->ib_param.qp_num = ch->qp->qp_num;
904 req->ib_param.qp_type = ch->qp->qp_type;
905 req->ib_param.local_cm_response_timeout = subnet_timeout + 2;
906 req->ib_param.remote_cm_response_timeout = subnet_timeout + 2;
907 req->ib_param.private_data = &req->ib_req;
908 req->ib_param.private_data_len = sizeof(req->ib_req);
48900a28 909
19f31343
BVA
910 ipi = req->ib_req.initiator_port_id;
911 tpi = req->ib_req.target_port_id;
48900a28
BVA
912 }
913
0c0450db 914 /*
3cd96564 915 * In the published SRP specification (draft rev. 16a), the
0c0450db
R
916 * port identifier format is 8 bytes of ID extension followed
917 * by 8 bytes of GUID. Older drafts put the two halves in the
918 * opposite order, so that the GUID comes first.
919 *
920 * Targets conforming to these obsolete drafts can be
921 * recognized by the I/O Class they report.
922 */
923 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
48900a28
BVA
924 memcpy(ipi, &target->sgid.global.interface_id, 8);
925 memcpy(ipi + 8, &target->initiator_ext, 8);
926 memcpy(tpi, &target->ioc_guid, 8);
927 memcpy(tpi + 8, &target->id_ext, 8);
0c0450db 928 } else {
48900a28
BVA
929 memcpy(ipi, &target->initiator_ext, 8);
930 memcpy(ipi + 8, &target->sgid.global.interface_id, 8);
931 memcpy(tpi, &target->id_ext, 8);
932 memcpy(tpi + 8, &target->ioc_guid, 8);
0c0450db
R
933 }
934
aef9ec39
RD
935 /*
936 * Topspin/Cisco SRP targets will reject our login unless we
01cb9bcb
IR
937 * zero out the first 8 bytes of our initiator port ID and set
938 * the second 8 bytes to the local node GUID.
aef9ec39 939 */
5d7cbfd6 940 if (srp_target_is_topspin(target)) {
7aa54bd7
DD
941 shost_printk(KERN_DEBUG, target->scsi_host,
942 PFX "Topspin/Cisco initiator port ID workaround "
943 "activated for target GUID %016llx\n",
45c37cad 944 be64_to_cpu(target->ioc_guid));
48900a28
BVA
945 memset(ipi, 0, 8);
946 memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
aef9ec39 947 }
aef9ec39 948
19f31343
BVA
949 if (target->using_rdma_cm)
950 status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
951 else
952 status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
aef9ec39
RD
953
954 kfree(req);
955
956 return status;
957}
958
ef6c49d8
BVA
959static bool srp_queue_remove_work(struct srp_target_port *target)
960{
961 bool changed = false;
962
963 spin_lock_irq(&target->lock);
964 if (target->state != SRP_TARGET_REMOVED) {
965 target->state = SRP_TARGET_REMOVED;
966 changed = true;
967 }
968 spin_unlock_irq(&target->lock);
969
970 if (changed)
bcc05910 971 queue_work(srp_remove_wq, &target->remove_work);
ef6c49d8
BVA
972
973 return changed;
974}
975
aef9ec39
RD
976static void srp_disconnect_target(struct srp_target_port *target)
977{
d92c0da7 978 struct srp_rdma_ch *ch;
19f31343 979 int i, ret;
509c07bc 980
c014c8cd 981 /* XXX should send SRP_I_LOGOUT request */
aef9ec39 982
c014c8cd
BVA
983 for (i = 0; i < target->ch_count; i++) {
984 ch = &target->ch[i];
985 ch->connected = false;
19f31343
BVA
986 ret = 0;
987 if (target->using_rdma_cm) {
988 if (ch->rdma_cm.cm_id)
989 rdma_disconnect(ch->rdma_cm.cm_id);
990 } else {
991 if (ch->ib_cm.cm_id)
992 ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
993 NULL, 0);
994 }
995 if (ret < 0) {
c014c8cd
BVA
996 shost_printk(KERN_DEBUG, target->scsi_host,
997 PFX "Sending CM DREQ failed\n");
294c875a 998 }
e6581056 999 }
aef9ec39
RD
1000}
1001
509c07bc
BVA
1002static void srp_free_req_data(struct srp_target_port *target,
1003 struct srp_rdma_ch *ch)
8f26c9ff 1004{
5cfb1782
BVA
1005 struct srp_device *dev = target->srp_host->srp_dev;
1006 struct ib_device *ibdev = dev->dev;
8f26c9ff
DD
1007 struct srp_request *req;
1008 int i;
1009
47513cf4 1010 if (!ch->req_ring)
4d73f95f
BVA
1011 return;
1012
1013 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 1014 req = &ch->req_ring[i];
9a21be53 1015 if (dev->use_fast_reg) {
5cfb1782 1016 kfree(req->fr_list);
9a21be53 1017 } else {
5cfb1782 1018 kfree(req->fmr_list);
9a21be53
SG
1019 kfree(req->map_page);
1020 }
c07d424d
DD
1021 if (req->indirect_dma_addr) {
1022 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
1023 target->indirect_size,
1024 DMA_TO_DEVICE);
1025 }
1026 kfree(req->indirect_desc);
8f26c9ff 1027 }
4d73f95f 1028
509c07bc
BVA
1029 kfree(ch->req_ring);
1030 ch->req_ring = NULL;
8f26c9ff
DD
1031}
1032
509c07bc 1033static int srp_alloc_req_data(struct srp_rdma_ch *ch)
b81d00bd 1034{
509c07bc 1035 struct srp_target_port *target = ch->target;
b81d00bd
BVA
1036 struct srp_device *srp_dev = target->srp_host->srp_dev;
1037 struct ib_device *ibdev = srp_dev->dev;
1038 struct srp_request *req;
5cfb1782 1039 void *mr_list;
b81d00bd
BVA
1040 dma_addr_t dma_addr;
1041 int i, ret = -ENOMEM;
1042
509c07bc
BVA
1043 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
1044 GFP_KERNEL);
1045 if (!ch->req_ring)
4d73f95f
BVA
1046 goto out;
1047
1048 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 1049 req = &ch->req_ring[i];
6da2ec56
KC
1050 mr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *),
1051 GFP_KERNEL);
5cfb1782
BVA
1052 if (!mr_list)
1053 goto out;
9a21be53 1054 if (srp_dev->use_fast_reg) {
5cfb1782 1055 req->fr_list = mr_list;
9a21be53 1056 } else {
5cfb1782 1057 req->fmr_list = mr_list;
6da2ec56
KC
1058 req->map_page = kmalloc_array(srp_dev->max_pages_per_mr,
1059 sizeof(void *),
1060 GFP_KERNEL);
9a21be53
SG
1061 if (!req->map_page)
1062 goto out;
1063 }
b81d00bd 1064 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
5cfb1782 1065 if (!req->indirect_desc)
b81d00bd
BVA
1066 goto out;
1067
1068 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
1069 target->indirect_size,
1070 DMA_TO_DEVICE);
1071 if (ib_dma_mapping_error(ibdev, dma_addr))
1072 goto out;
1073
1074 req->indirect_dma_addr = dma_addr;
b81d00bd
BVA
1075 }
1076 ret = 0;
1077
1078out:
1079 return ret;
1080}
1081
683b159a
BVA
1082/**
1083 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
1084 * @shost: SCSI host whose attributes to remove from sysfs.
1085 *
1086 * Note: Any attributes defined in the host template and that did not exist
1087 * before invocation of this function will be ignored.
1088 */
1089static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
1090{
1091 struct device_attribute **attr;
1092
1093 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
1094 device_remove_file(&shost->shost_dev, *attr);
1095}
1096
ee12d6a8
BVA
1097static void srp_remove_target(struct srp_target_port *target)
1098{
d92c0da7
BVA
1099 struct srp_rdma_ch *ch;
1100 int i;
509c07bc 1101
ef6c49d8
BVA
1102 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1103
ee12d6a8 1104 srp_del_scsi_host_attr(target->scsi_host);
9dd69a60 1105 srp_rport_get(target->rport);
ee12d6a8
BVA
1106 srp_remove_host(target->scsi_host);
1107 scsi_remove_host(target->scsi_host);
93079162 1108 srp_stop_rport_timers(target->rport);
ef6c49d8 1109 srp_disconnect_target(target);
19f31343 1110 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
d92c0da7
BVA
1111 for (i = 0; i < target->ch_count; i++) {
1112 ch = &target->ch[i];
1113 srp_free_ch_ib(target, ch);
1114 }
c1120f89 1115 cancel_work_sync(&target->tl_err_work);
9dd69a60 1116 srp_rport_put(target->rport);
d92c0da7
BVA
1117 for (i = 0; i < target->ch_count; i++) {
1118 ch = &target->ch[i];
1119 srp_free_req_data(target, ch);
1120 }
1121 kfree(target->ch);
1122 target->ch = NULL;
65d7dd2f
VP
1123
1124 spin_lock(&target->srp_host->target_lock);
1125 list_del(&target->list);
1126 spin_unlock(&target->srp_host->target_lock);
1127
ee12d6a8
BVA
1128 scsi_host_put(target->scsi_host);
1129}
1130
c4028958 1131static void srp_remove_work(struct work_struct *work)
aef9ec39 1132{
c4028958 1133 struct srp_target_port *target =
ef6c49d8 1134 container_of(work, struct srp_target_port, remove_work);
aef9ec39 1135
ef6c49d8 1136 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
aef9ec39 1137
96fc248a 1138 srp_remove_target(target);
aef9ec39
RD
1139}
1140
dc1bdbd9
BVA
1141static void srp_rport_delete(struct srp_rport *rport)
1142{
1143 struct srp_target_port *target = rport->lld_data;
1144
1145 srp_queue_remove_work(target);
1146}
1147
c014c8cd
BVA
1148/**
1149 * srp_connected_ch() - number of connected channels
1150 * @target: SRP target port.
1151 */
1152static int srp_connected_ch(struct srp_target_port *target)
1153{
1154 int i, c = 0;
1155
1156 for (i = 0; i < target->ch_count; i++)
1157 c += target->ch[i].connected;
1158
1159 return c;
1160}
1161
513d5647
BVA
1162static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len,
1163 bool multich)
aef9ec39 1164{
509c07bc 1165 struct srp_target_port *target = ch->target;
aef9ec39
RD
1166 int ret;
1167
c014c8cd 1168 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
294c875a 1169
509c07bc 1170 ret = srp_lookup_path(ch);
aef9ec39 1171 if (ret)
4d59ad29 1172 goto out;
aef9ec39
RD
1173
1174 while (1) {
509c07bc 1175 init_completion(&ch->done);
513d5647 1176 ret = srp_send_req(ch, max_iu_len, multich);
aef9ec39 1177 if (ret)
4d59ad29 1178 goto out;
509c07bc 1179 ret = wait_for_completion_interruptible(&ch->done);
a702adce 1180 if (ret < 0)
4d59ad29 1181 goto out;
aef9ec39
RD
1182
1183 /*
1184 * The CM event handling code will set status to
1185 * SRP_PORT_REDIRECT if we get a port redirect REJ
1186 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1187 * redirect REJ back.
1188 */
4d59ad29
BVA
1189 ret = ch->status;
1190 switch (ret) {
aef9ec39 1191 case 0:
c014c8cd 1192 ch->connected = true;
4d59ad29 1193 goto out;
aef9ec39
RD
1194
1195 case SRP_PORT_REDIRECT:
509c07bc 1196 ret = srp_lookup_path(ch);
aef9ec39 1197 if (ret)
4d59ad29 1198 goto out;
aef9ec39
RD
1199 break;
1200
1201 case SRP_DLID_REDIRECT:
1202 break;
1203
9fe4bcf4 1204 case SRP_STALE_CONN:
9fe4bcf4 1205 shost_printk(KERN_ERR, target->scsi_host, PFX
205619f2 1206 "giving up on stale connection\n");
4d59ad29
BVA
1207 ret = -ECONNRESET;
1208 goto out;
9fe4bcf4 1209
aef9ec39 1210 default:
4d59ad29 1211 goto out;
aef9ec39
RD
1212 }
1213 }
4d59ad29
BVA
1214
1215out:
1216 return ret <= 0 ? ret : -ENODEV;
aef9ec39
RD
1217}
1218
1dc7b1f1
CH
1219static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1220{
1221 srp_handle_qp_err(cq, wc, "INV RKEY");
1222}
1223
1224static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1225 u32 rkey)
5cfb1782 1226{
5cfb1782
BVA
1227 struct ib_send_wr wr = {
1228 .opcode = IB_WR_LOCAL_INV,
5cfb1782
BVA
1229 .next = NULL,
1230 .num_sge = 0,
1231 .send_flags = 0,
1232 .ex.invalidate_rkey = rkey,
1233 };
1234
1dc7b1f1
CH
1235 wr.wr_cqe = &req->reg_cqe;
1236 req->reg_cqe.done = srp_inv_rkey_err_done;
71347b0c 1237 return ib_post_send(ch->qp, &wr, NULL);
5cfb1782
BVA
1238}
1239
d945e1df 1240static void srp_unmap_data(struct scsi_cmnd *scmnd,
509c07bc 1241 struct srp_rdma_ch *ch,
d945e1df
RD
1242 struct srp_request *req)
1243{
509c07bc 1244 struct srp_target_port *target = ch->target;
5cfb1782
BVA
1245 struct srp_device *dev = target->srp_host->srp_dev;
1246 struct ib_device *ibdev = dev->dev;
1247 int i, res;
8f26c9ff 1248
bb350d1d 1249 if (!scsi_sglist(scmnd) ||
d945e1df
RD
1250 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1251 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1252 return;
1253
5cfb1782
BVA
1254 if (dev->use_fast_reg) {
1255 struct srp_fr_desc **pfr;
1256
1257 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1dc7b1f1 1258 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
5cfb1782
BVA
1259 if (res < 0) {
1260 shost_printk(KERN_ERR, target->scsi_host, PFX
1261 "Queueing INV WR for rkey %#x failed (%d)\n",
1262 (*pfr)->mr->rkey, res);
1263 queue_work(system_long_wq,
1264 &target->tl_err_work);
1265 }
1266 }
1267 if (req->nmdesc)
509c07bc 1268 srp_fr_pool_put(ch->fr_pool, req->fr_list,
5cfb1782 1269 req->nmdesc);
002f1567 1270 } else if (dev->use_fmr) {
5cfb1782
BVA
1271 struct ib_pool_fmr **pfmr;
1272
1273 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1274 ib_fmr_pool_unmap(*pfmr);
1275 }
f5358a17 1276
8f26c9ff
DD
1277 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1278 scmnd->sc_data_direction);
d945e1df
RD
1279}
1280
22032991
BVA
1281/**
1282 * srp_claim_req - Take ownership of the scmnd associated with a request.
509c07bc 1283 * @ch: SRP RDMA channel.
22032991 1284 * @req: SRP request.
b3fe628d 1285 * @sdev: If not NULL, only take ownership for this SCSI device.
22032991
BVA
1286 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1287 * ownership of @req->scmnd if it equals @scmnd.
1288 *
1289 * Return value:
1290 * Either NULL or a pointer to the SCSI command the caller became owner of.
1291 */
509c07bc 1292static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
22032991 1293 struct srp_request *req,
b3fe628d 1294 struct scsi_device *sdev,
22032991
BVA
1295 struct scsi_cmnd *scmnd)
1296{
1297 unsigned long flags;
1298
509c07bc 1299 spin_lock_irqsave(&ch->lock, flags);
b3fe628d
BVA
1300 if (req->scmnd &&
1301 (!sdev || req->scmnd->device == sdev) &&
1302 (!scmnd || req->scmnd == scmnd)) {
22032991
BVA
1303 scmnd = req->scmnd;
1304 req->scmnd = NULL;
22032991
BVA
1305 } else {
1306 scmnd = NULL;
1307 }
509c07bc 1308 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1309
1310 return scmnd;
1311}
1312
1313/**
6ec2ba02 1314 * srp_free_req() - Unmap data and adjust ch->req_lim.
509c07bc 1315 * @ch: SRP RDMA channel.
af24663b
BVA
1316 * @req: Request to be freed.
1317 * @scmnd: SCSI command associated with @req.
1318 * @req_lim_delta: Amount to be added to @target->req_lim.
22032991 1319 */
509c07bc
BVA
1320static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1321 struct scsi_cmnd *scmnd, s32 req_lim_delta)
526b4caa 1322{
94a9174c
BVA
1323 unsigned long flags;
1324
509c07bc 1325 srp_unmap_data(scmnd, ch, req);
22032991 1326
509c07bc
BVA
1327 spin_lock_irqsave(&ch->lock, flags);
1328 ch->req_lim += req_lim_delta;
509c07bc 1329 spin_unlock_irqrestore(&ch->lock, flags);
526b4caa
IR
1330}
1331
509c07bc
BVA
1332static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1333 struct scsi_device *sdev, int result)
526b4caa 1334{
509c07bc 1335 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
22032991
BVA
1336
1337 if (scmnd) {
509c07bc 1338 srp_free_req(ch, req, scmnd, 0);
ed9b2264 1339 scmnd->result = result;
22032991 1340 scmnd->scsi_done(scmnd);
22032991 1341 }
526b4caa
IR
1342}
1343
ed9b2264 1344static void srp_terminate_io(struct srp_rport *rport)
aef9ec39 1345{
ed9b2264 1346 struct srp_target_port *target = rport->lld_data;
d92c0da7 1347 struct srp_rdma_ch *ch;
d92c0da7 1348 int i, j;
ed9b2264 1349
d92c0da7
BVA
1350 for (i = 0; i < target->ch_count; i++) {
1351 ch = &target->ch[i];
509c07bc 1352
d92c0da7
BVA
1353 for (j = 0; j < target->req_ring_size; ++j) {
1354 struct srp_request *req = &ch->req_ring[j];
1355
1356 srp_finish_req(ch, req, NULL,
1357 DID_TRANSPORT_FAILFAST << 16);
1358 }
ed9b2264
BVA
1359 }
1360}
aef9ec39 1361
513d5647 1362/* Calculate maximum initiator to target information unit length. */
882981f4 1363static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data)
513d5647
BVA
1364{
1365 uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN +
1366 sizeof(struct srp_indirect_buf) +
1367 cmd_sg_cnt * sizeof(struct srp_direct_buf);
1368
882981f4
BVA
1369 if (use_imm_data)
1370 max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET +
1371 srp_max_imm_data);
1372
513d5647
BVA
1373 return max_iu_len;
1374}
1375
ed9b2264
BVA
1376/*
1377 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1378 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1379 * srp_reset_device() or srp_reset_host() calls will occur while this function
1380 * is in progress. One way to realize that is not to call this function
1381 * directly but to call srp_reconnect_rport() instead since that last function
1382 * serializes calls of this function via rport->mutex and also blocks
1383 * srp_queuecommand() calls before invoking this function.
1384 */
1385static int srp_rport_reconnect(struct srp_rport *rport)
1386{
1387 struct srp_target_port *target = rport->lld_data;
d92c0da7 1388 struct srp_rdma_ch *ch;
882981f4
BVA
1389 uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
1390 srp_use_imm_data);
d92c0da7
BVA
1391 int i, j, ret = 0;
1392 bool multich = false;
09be70a2 1393
aef9ec39 1394 srp_disconnect_target(target);
34aa654e
BVA
1395
1396 if (target->state == SRP_TARGET_SCANNING)
1397 return -ENODEV;
1398
aef9ec39 1399 /*
c7c4e7ff
BVA
1400 * Now get a new local CM ID so that we avoid confusing the target in
1401 * case things are really fouled up. Doing so also ensures that all CM
1402 * callbacks will have finished before a new QP is allocated.
aef9ec39 1403 */
d92c0da7
BVA
1404 for (i = 0; i < target->ch_count; i++) {
1405 ch = &target->ch[i];
d92c0da7 1406 ret += srp_new_cm_id(ch);
536ae14e 1407 }
d92c0da7
BVA
1408 for (i = 0; i < target->ch_count; i++) {
1409 ch = &target->ch[i];
d92c0da7
BVA
1410 for (j = 0; j < target->req_ring_size; ++j) {
1411 struct srp_request *req = &ch->req_ring[j];
aef9ec39 1412
d92c0da7
BVA
1413 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1414 }
1415 }
1416 for (i = 0; i < target->ch_count; i++) {
1417 ch = &target->ch[i];
d92c0da7
BVA
1418 /*
1419 * Whether or not creating a new CM ID succeeded, create a new
1420 * QP. This guarantees that all completion callback function
1421 * invocations have finished before request resetting starts.
1422 */
1423 ret += srp_create_ch_ib(ch);
aef9ec39 1424
d92c0da7
BVA
1425 INIT_LIST_HEAD(&ch->free_tx);
1426 for (j = 0; j < target->queue_size; ++j)
1427 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1428 }
8de9fe3a
BVA
1429
1430 target->qp_in_error = false;
1431
d92c0da7
BVA
1432 for (i = 0; i < target->ch_count; i++) {
1433 ch = &target->ch[i];
bbac5ccf 1434 if (ret)
d92c0da7 1435 break;
513d5647 1436 ret = srp_connect_ch(ch, max_iu_len, multich);
d92c0da7
BVA
1437 multich = true;
1438 }
09be70a2 1439
ed9b2264
BVA
1440 if (ret == 0)
1441 shost_printk(KERN_INFO, target->scsi_host,
1442 PFX "reconnect succeeded\n");
aef9ec39
RD
1443
1444 return ret;
1445}
1446
8f26c9ff
DD
1447static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1448 unsigned int dma_len, u32 rkey)
f5358a17 1449{
8f26c9ff 1450 struct srp_direct_buf *desc = state->desc;
f5358a17 1451
3ae95da8
BVA
1452 WARN_ON_ONCE(!dma_len);
1453
8f26c9ff
DD
1454 desc->va = cpu_to_be64(dma_addr);
1455 desc->key = cpu_to_be32(rkey);
1456 desc->len = cpu_to_be32(dma_len);
f5358a17 1457
8f26c9ff
DD
1458 state->total_len += dma_len;
1459 state->desc++;
1460 state->ndesc++;
1461}
559ce8f1 1462
8f26c9ff 1463static int srp_map_finish_fmr(struct srp_map_state *state,
509c07bc 1464 struct srp_rdma_ch *ch)
8f26c9ff 1465{
186fbc66
BVA
1466 struct srp_target_port *target = ch->target;
1467 struct srp_device *dev = target->srp_host->srp_dev;
8f26c9ff
DD
1468 struct ib_pool_fmr *fmr;
1469 u64 io_addr = 0;
85507bcc 1470
290081b4
BVA
1471 if (state->fmr.next >= state->fmr.end) {
1472 shost_printk(KERN_ERR, ch->target->scsi_host,
1473 PFX "Out of MRs (mr_per_cmd = %d)\n",
1474 ch->target->mr_per_cmd);
f731ed62 1475 return -ENOMEM;
290081b4 1476 }
f731ed62 1477
26630e8a
SG
1478 WARN_ON_ONCE(!dev->use_fmr);
1479
1480 if (state->npages == 0)
1481 return 0;
1482
cee687b6 1483 if (state->npages == 1 && target->global_rkey) {
26630e8a 1484 srp_map_desc(state, state->base_dma_addr, state->dma_len,
cee687b6 1485 target->global_rkey);
26630e8a
SG
1486 goto reset_state;
1487 }
1488
509c07bc 1489 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
8f26c9ff
DD
1490 state->npages, io_addr);
1491 if (IS_ERR(fmr))
1492 return PTR_ERR(fmr);
f5358a17 1493
f731ed62 1494 *state->fmr.next++ = fmr;
52ede08f 1495 state->nmdesc++;
f5358a17 1496
186fbc66
BVA
1497 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1498 state->dma_len, fmr->fmr->rkey);
539dde6f 1499
26630e8a
SG
1500reset_state:
1501 state->npages = 0;
1502 state->dma_len = 0;
1503
8f26c9ff
DD
1504 return 0;
1505}
1506
1dc7b1f1
CH
1507static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1508{
1509 srp_handle_qp_err(cq, wc, "FAST REG");
1510}
1511
509c5f33
BVA
1512/*
1513 * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1514 * where to start in the first element. If sg_offset_p != NULL then
1515 * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1516 * byte that has not yet been mapped.
1517 */
5cfb1782 1518static int srp_map_finish_fr(struct srp_map_state *state,
1dc7b1f1 1519 struct srp_request *req,
509c5f33
BVA
1520 struct srp_rdma_ch *ch, int sg_nents,
1521 unsigned int *sg_offset_p)
5cfb1782 1522{
509c07bc 1523 struct srp_target_port *target = ch->target;
5cfb1782 1524 struct srp_device *dev = target->srp_host->srp_dev;
f7f7aab1 1525 struct ib_reg_wr wr;
5cfb1782
BVA
1526 struct srp_fr_desc *desc;
1527 u32 rkey;
f7f7aab1 1528 int n, err;
5cfb1782 1529
290081b4
BVA
1530 if (state->fr.next >= state->fr.end) {
1531 shost_printk(KERN_ERR, ch->target->scsi_host,
1532 PFX "Out of MRs (mr_per_cmd = %d)\n",
1533 ch->target->mr_per_cmd);
f731ed62 1534 return -ENOMEM;
290081b4 1535 }
f731ed62 1536
26630e8a
SG
1537 WARN_ON_ONCE(!dev->use_fast_reg);
1538
cee687b6 1539 if (sg_nents == 1 && target->global_rkey) {
509c5f33
BVA
1540 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1541
1542 srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1543 sg_dma_len(state->sg) - sg_offset,
cee687b6 1544 target->global_rkey);
509c5f33
BVA
1545 if (sg_offset_p)
1546 *sg_offset_p = 0;
f7f7aab1 1547 return 1;
26630e8a
SG
1548 }
1549
509c07bc 1550 desc = srp_fr_pool_get(ch->fr_pool);
5cfb1782
BVA
1551 if (!desc)
1552 return -ENOMEM;
1553
1554 rkey = ib_inc_rkey(desc->mr->rkey);
1555 ib_update_fast_reg_key(desc->mr, rkey);
1556
509c5f33
BVA
1557 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1558 dev->mr_page_size);
9d8e7d0d
BVA
1559 if (unlikely(n < 0)) {
1560 srp_fr_pool_put(ch->fr_pool, &desc, 1);
509c5f33 1561 pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
9d8e7d0d 1562 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
509c5f33 1563 sg_offset_p ? *sg_offset_p : -1, n);
f7f7aab1 1564 return n;
9d8e7d0d 1565 }
5cfb1782 1566
509c5f33 1567 WARN_ON_ONCE(desc->mr->length == 0);
5cfb1782 1568
1dc7b1f1
CH
1569 req->reg_cqe.done = srp_reg_mr_err_done;
1570
f7f7aab1
SG
1571 wr.wr.next = NULL;
1572 wr.wr.opcode = IB_WR_REG_MR;
1dc7b1f1 1573 wr.wr.wr_cqe = &req->reg_cqe;
f7f7aab1
SG
1574 wr.wr.num_sge = 0;
1575 wr.wr.send_flags = 0;
1576 wr.mr = desc->mr;
1577 wr.key = desc->mr->rkey;
1578 wr.access = (IB_ACCESS_LOCAL_WRITE |
1579 IB_ACCESS_REMOTE_READ |
1580 IB_ACCESS_REMOTE_WRITE);
5cfb1782 1581
f731ed62 1582 *state->fr.next++ = desc;
5cfb1782
BVA
1583 state->nmdesc++;
1584
f7f7aab1
SG
1585 srp_map_desc(state, desc->mr->iova,
1586 desc->mr->length, desc->mr->rkey);
5cfb1782 1587
71347b0c 1588 err = ib_post_send(ch->qp, &wr.wr, NULL);
509c5f33
BVA
1589 if (unlikely(err)) {
1590 WARN_ON_ONCE(err == -ENOMEM);
26630e8a 1591 return err;
509c5f33 1592 }
26630e8a 1593
f7f7aab1 1594 return n;
5cfb1782
BVA
1595}
1596
8f26c9ff 1597static int srp_map_sg_entry(struct srp_map_state *state,
509c07bc 1598 struct srp_rdma_ch *ch,
52bb8c62 1599 struct scatterlist *sg)
8f26c9ff 1600{
509c07bc 1601 struct srp_target_port *target = ch->target;
8f26c9ff 1602 struct srp_device *dev = target->srp_host->srp_dev;
a163afc8
BVA
1603 dma_addr_t dma_addr = sg_dma_address(sg);
1604 unsigned int dma_len = sg_dma_len(sg);
3ae95da8 1605 unsigned int len = 0;
8f26c9ff
DD
1606 int ret;
1607
3ae95da8 1608 WARN_ON_ONCE(!dma_len);
f5358a17 1609
8f26c9ff 1610 while (dma_len) {
5cfb1782 1611 unsigned offset = dma_addr & ~dev->mr_page_mask;
681cc360
BVA
1612
1613 if (state->npages == dev->max_pages_per_mr ||
1614 (state->npages > 0 && offset != 0)) {
f7f7aab1 1615 ret = srp_map_finish_fmr(state, ch);
8f26c9ff
DD
1616 if (ret)
1617 return ret;
8f26c9ff
DD
1618 }
1619
5cfb1782 1620 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
f5358a17 1621
8f26c9ff
DD
1622 if (!state->npages)
1623 state->base_dma_addr = dma_addr;
5cfb1782 1624 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
52ede08f 1625 state->dma_len += len;
8f26c9ff
DD
1626 dma_addr += len;
1627 dma_len -= len;
1628 }
1629
5cfb1782 1630 /*
681cc360 1631 * If the end of the MR is not on a page boundary then we need to
8f26c9ff 1632 * close it out and start a new one -- we can only merge at page
1d3d98c4 1633 * boundaries.
8f26c9ff
DD
1634 */
1635 ret = 0;
681cc360 1636 if ((dma_addr & ~dev->mr_page_mask) != 0)
f7f7aab1 1637 ret = srp_map_finish_fmr(state, ch);
f5358a17
RD
1638 return ret;
1639}
1640
26630e8a
SG
1641static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1642 struct srp_request *req, struct scatterlist *scat,
1643 int count)
76bc1e1d 1644{
76bc1e1d 1645 struct scatterlist *sg;
0e0d3a48 1646 int i, ret;
76bc1e1d 1647
26630e8a
SG
1648 state->pages = req->map_page;
1649 state->fmr.next = req->fmr_list;
509c5f33 1650 state->fmr.end = req->fmr_list + ch->target->mr_per_cmd;
26630e8a
SG
1651
1652 for_each_sg(scat, sg, count, i) {
52bb8c62 1653 ret = srp_map_sg_entry(state, ch, sg);
26630e8a
SG
1654 if (ret)
1655 return ret;
5cfb1782 1656 }
76bc1e1d 1657
f7f7aab1 1658 ret = srp_map_finish_fmr(state, ch);
26630e8a
SG
1659 if (ret)
1660 return ret;
1661
26630e8a
SG
1662 return 0;
1663}
1664
1665static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1666 struct srp_request *req, struct scatterlist *scat,
1667 int count)
1668{
509c5f33
BVA
1669 unsigned int sg_offset = 0;
1670
f7f7aab1 1671 state->fr.next = req->fr_list;
509c5f33 1672 state->fr.end = req->fr_list + ch->target->mr_per_cmd;
f7f7aab1 1673 state->sg = scat;
26630e8a 1674
3b59b7a6
BVA
1675 if (count == 0)
1676 return 0;
1677
57b0be9c 1678 while (count) {
f7f7aab1 1679 int i, n;
26630e8a 1680
509c5f33 1681 n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
f7f7aab1
SG
1682 if (unlikely(n < 0))
1683 return n;
1684
57b0be9c 1685 count -= n;
f7f7aab1
SG
1686 for (i = 0; i < n; i++)
1687 state->sg = sg_next(state->sg);
1688 }
26630e8a 1689
26630e8a
SG
1690 return 0;
1691}
1692
1693static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1694 struct srp_request *req, struct scatterlist *scat,
1695 int count)
1696{
1697 struct srp_target_port *target = ch->target;
26630e8a
SG
1698 struct scatterlist *sg;
1699 int i;
1700
26630e8a 1701 for_each_sg(scat, sg, count, i) {
a163afc8 1702 srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg),
cee687b6 1703 target->global_rkey);
0e0d3a48 1704 }
76bc1e1d 1705
26630e8a 1706 return 0;
76bc1e1d
BVA
1707}
1708
330179f2
BVA
1709/*
1710 * Register the indirect data buffer descriptor with the HCA.
1711 *
1712 * Note: since the indirect data buffer descriptor has been allocated with
1713 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1714 * memory buffer.
1715 */
1716static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1717 void **next_mr, void **end_mr, u32 idb_len,
1718 __be32 *idb_rkey)
1719{
1720 struct srp_target_port *target = ch->target;
1721 struct srp_device *dev = target->srp_host->srp_dev;
1722 struct srp_map_state state;
1723 struct srp_direct_buf idb_desc;
1724 u64 idb_pages[1];
f7f7aab1 1725 struct scatterlist idb_sg[1];
330179f2
BVA
1726 int ret;
1727
1728 memset(&state, 0, sizeof(state));
1729 memset(&idb_desc, 0, sizeof(idb_desc));
1730 state.gen.next = next_mr;
1731 state.gen.end = end_mr;
1732 state.desc = &idb_desc;
330179f2
BVA
1733 state.base_dma_addr = req->indirect_dma_addr;
1734 state.dma_len = idb_len;
f7f7aab1
SG
1735
1736 if (dev->use_fast_reg) {
1737 state.sg = idb_sg;
54f5c9c5 1738 sg_init_one(idb_sg, req->indirect_desc, idb_len);
f7f7aab1 1739 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
fc925518
CH
1740#ifdef CONFIG_NEED_SG_DMA_LENGTH
1741 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1742#endif
509c5f33 1743 ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
f7f7aab1
SG
1744 if (ret < 0)
1745 return ret;
509c5f33 1746 WARN_ON_ONCE(ret < 1);
f7f7aab1
SG
1747 } else if (dev->use_fmr) {
1748 state.pages = idb_pages;
1749 state.pages[0] = (req->indirect_dma_addr &
1750 dev->mr_page_mask);
1751 state.npages = 1;
1752 ret = srp_map_finish_fmr(&state, ch);
1753 if (ret < 0)
1754 return ret;
1755 } else {
1756 return -EINVAL;
1757 }
330179f2
BVA
1758
1759 *idb_rkey = idb_desc.key;
1760
f7f7aab1 1761 return 0;
330179f2
BVA
1762}
1763
509c5f33
BVA
1764static void srp_check_mapping(struct srp_map_state *state,
1765 struct srp_rdma_ch *ch, struct srp_request *req,
1766 struct scatterlist *scat, int count)
1767{
1768 struct srp_device *dev = ch->target->srp_host->srp_dev;
1769 struct srp_fr_desc **pfr;
1770 u64 desc_len = 0, mr_len = 0;
1771 int i;
1772
1773 for (i = 0; i < state->ndesc; i++)
1774 desc_len += be32_to_cpu(req->indirect_desc[i].len);
1775 if (dev->use_fast_reg)
1776 for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1777 mr_len += (*pfr)->mr->length;
1778 else if (dev->use_fmr)
1779 for (i = 0; i < state->nmdesc; i++)
1780 mr_len += be32_to_cpu(req->indirect_desc[i].len);
1781 if (desc_len != scsi_bufflen(req->scmnd) ||
1782 mr_len > scsi_bufflen(req->scmnd))
1783 pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1784 scsi_bufflen(req->scmnd), desc_len, mr_len,
1785 state->ndesc, state->nmdesc);
1786}
509c5f33 1787
77269cdf
BVA
1788/**
1789 * srp_map_data() - map SCSI data buffer onto an SRP request
1790 * @scmnd: SCSI command to map
1791 * @ch: SRP RDMA channel
1792 * @req: SRP request
1793 *
1794 * Returns the length in bytes of the SRP_CMD IU or a negative value if
882981f4
BVA
1795 * mapping failed. The size of any immediate data is not included in the
1796 * return value.
77269cdf 1797 */
509c07bc 1798static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
aef9ec39
RD
1799 struct srp_request *req)
1800{
509c07bc 1801 struct srp_target_port *target = ch->target;
882981f4 1802 struct scatterlist *scat, *sg;
aef9ec39 1803 struct srp_cmd *cmd = req->cmd->buf;
882981f4 1804 int i, len, nents, count, ret;
85507bcc
RC
1805 struct srp_device *dev;
1806 struct ib_device *ibdev;
8f26c9ff
DD
1807 struct srp_map_state state;
1808 struct srp_indirect_buf *indirect_hdr;
882981f4 1809 u64 data_len;
330179f2
BVA
1810 u32 idb_len, table_len;
1811 __be32 idb_rkey;
8f26c9ff 1812 u8 fmt;
aef9ec39 1813
882981f4
BVA
1814 req->cmd->num_sge = 1;
1815
bb350d1d 1816 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
482fffc4 1817 return sizeof(struct srp_cmd) + cmd->add_cdb_len;
aef9ec39
RD
1818
1819 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1820 scmnd->sc_data_direction != DMA_TO_DEVICE) {
7aa54bd7
DD
1821 shost_printk(KERN_WARNING, target->scsi_host,
1822 PFX "Unhandled data direction %d\n",
1823 scmnd->sc_data_direction);
aef9ec39
RD
1824 return -EINVAL;
1825 }
1826
bb350d1d
FT
1827 nents = scsi_sg_count(scmnd);
1828 scat = scsi_sglist(scmnd);
882981f4 1829 data_len = scsi_bufflen(scmnd);
aef9ec39 1830
05321937 1831 dev = target->srp_host->srp_dev;
85507bcc
RC
1832 ibdev = dev->dev;
1833
1834 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
8f26c9ff
DD
1835 if (unlikely(count == 0))
1836 return -EIO;
f5358a17 1837
882981f4
BVA
1838 if (ch->use_imm_data &&
1839 count <= SRP_MAX_IMM_SGE &&
1840 SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len &&
1841 scmnd->sc_data_direction == DMA_TO_DEVICE) {
1842 struct srp_imm_buf *buf;
1843 struct ib_sge *sge = &req->cmd->sge[1];
1844
1845 fmt = SRP_DATA_DESC_IMM;
1846 len = SRP_IMM_DATA_OFFSET;
1847 req->nmdesc = 0;
1848 buf = (void *)cmd->add_data + cmd->add_cdb_len;
1849 buf->len = cpu_to_be32(data_len);
1850 WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len);
1851 for_each_sg(scat, sg, count, i) {
a163afc8
BVA
1852 sge[i].addr = sg_dma_address(sg);
1853 sge[i].length = sg_dma_len(sg);
882981f4
BVA
1854 sge[i].lkey = target->lkey;
1855 }
1856 req->cmd->num_sge += count;
1857 goto map_complete;
1858 }
1859
f5358a17 1860 fmt = SRP_DATA_DESC_DIRECT;
482fffc4
BVA
1861 len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1862 sizeof(struct srp_direct_buf);
aef9ec39 1863
cee687b6 1864 if (count == 1 && target->global_rkey) {
f5358a17
RD
1865 /*
1866 * The midlayer only generated a single gather/scatter
1867 * entry, or DMA mapping coalesced everything to a
1868 * single entry. So a direct descriptor along with
1869 * the DMA MR suffices.
1870 */
482fffc4 1871 struct srp_direct_buf *buf;
aef9ec39 1872
482fffc4 1873 buf = (void *)cmd->add_data + cmd->add_cdb_len;
a163afc8 1874 buf->va = cpu_to_be64(sg_dma_address(scat));
cee687b6 1875 buf->key = cpu_to_be32(target->global_rkey);
a163afc8 1876 buf->len = cpu_to_be32(sg_dma_len(scat));
8f26c9ff 1877
52ede08f 1878 req->nmdesc = 0;
8f26c9ff
DD
1879 goto map_complete;
1880 }
1881
5cfb1782
BVA
1882 /*
1883 * We have more than one scatter/gather entry, so build our indirect
1884 * descriptor table, trying to merge as many entries as we can.
8f26c9ff 1885 */
482fffc4 1886 indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len;
8f26c9ff 1887
c07d424d
DD
1888 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1889 target->indirect_size, DMA_TO_DEVICE);
1890
8f26c9ff 1891 memset(&state, 0, sizeof(state));
9edba790 1892 state.desc = req->indirect_desc;
26630e8a 1893 if (dev->use_fast_reg)
e012f363 1894 ret = srp_map_sg_fr(&state, ch, req, scat, count);
26630e8a 1895 else if (dev->use_fmr)
e012f363 1896 ret = srp_map_sg_fmr(&state, ch, req, scat, count);
26630e8a 1897 else
e012f363
BVA
1898 ret = srp_map_sg_dma(&state, ch, req, scat, count);
1899 req->nmdesc = state.nmdesc;
1900 if (ret < 0)
1901 goto unmap;
cf368713 1902
509c5f33
BVA
1903 {
1904 DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1905 "Memory mapping consistency check");
1a1faf7a 1906 if (DYNAMIC_DEBUG_BRANCH(ddm))
509c5f33
BVA
1907 srp_check_mapping(&state, ch, req, scat, count);
1908 }
cf368713 1909
c07d424d
DD
1910 /* We've mapped the request, now pull as much of the indirect
1911 * descriptor table as we can into the command buffer. If this
1912 * target is not using an external indirect table, we are
1913 * guaranteed to fit into the command, as the SCSI layer won't
1914 * give us more S/G entries than we allow.
8f26c9ff 1915 */
8f26c9ff 1916 if (state.ndesc == 1) {
5cfb1782
BVA
1917 /*
1918 * Memory registration collapsed the sg-list into one entry,
8f26c9ff
DD
1919 * so use a direct descriptor.
1920 */
482fffc4 1921 struct srp_direct_buf *buf;
cf368713 1922
482fffc4 1923 buf = (void *)cmd->add_data + cmd->add_cdb_len;
c07d424d 1924 *buf = req->indirect_desc[0];
8f26c9ff 1925 goto map_complete;
aef9ec39
RD
1926 }
1927
c07d424d
DD
1928 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1929 !target->allow_ext_sg)) {
1930 shost_printk(KERN_ERR, target->scsi_host,
1931 "Could not fit S/G list into SRP_CMD\n");
e012f363
BVA
1932 ret = -EIO;
1933 goto unmap;
c07d424d
DD
1934 }
1935
1936 count = min(state.ndesc, target->cmd_sg_cnt);
8f26c9ff 1937 table_len = state.ndesc * sizeof (struct srp_direct_buf);
330179f2 1938 idb_len = sizeof(struct srp_indirect_buf) + table_len;
8f26c9ff
DD
1939
1940 fmt = SRP_DATA_DESC_INDIRECT;
482fffc4
BVA
1941 len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1942 sizeof(struct srp_indirect_buf);
c07d424d 1943 len += count * sizeof (struct srp_direct_buf);
8f26c9ff 1944
c07d424d
DD
1945 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1946 count * sizeof (struct srp_direct_buf));
8f26c9ff 1947
cee687b6 1948 if (!target->global_rkey) {
330179f2
BVA
1949 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1950 idb_len, &idb_rkey);
1951 if (ret < 0)
e012f363 1952 goto unmap;
330179f2
BVA
1953 req->nmdesc++;
1954 } else {
cee687b6 1955 idb_rkey = cpu_to_be32(target->global_rkey);
330179f2
BVA
1956 }
1957
c07d424d 1958 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
330179f2 1959 indirect_hdr->table_desc.key = idb_rkey;
8f26c9ff
DD
1960 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1961 indirect_hdr->len = cpu_to_be32(state.total_len);
1962
1963 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
c07d424d 1964 cmd->data_out_desc_cnt = count;
8f26c9ff 1965 else
c07d424d
DD
1966 cmd->data_in_desc_cnt = count;
1967
1968 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1969 DMA_TO_DEVICE);
8f26c9ff
DD
1970
1971map_complete:
aef9ec39
RD
1972 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1973 cmd->buf_fmt = fmt << 4;
1974 else
1975 cmd->buf_fmt = fmt;
1976
aef9ec39 1977 return len;
e012f363
BVA
1978
1979unmap:
1980 srp_unmap_data(scmnd, ch, req);
ffc548bb
BVA
1981 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1982 ret = -E2BIG;
e012f363 1983 return ret;
aef9ec39
RD
1984}
1985
76c75b25
BVA
1986/*
1987 * Return an IU and possible credit to the free pool
1988 */
509c07bc 1989static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
76c75b25
BVA
1990 enum srp_iu_type iu_type)
1991{
1992 unsigned long flags;
1993
509c07bc
BVA
1994 spin_lock_irqsave(&ch->lock, flags);
1995 list_add(&iu->list, &ch->free_tx);
76c75b25 1996 if (iu_type != SRP_IU_RSP)
509c07bc
BVA
1997 ++ch->req_lim;
1998 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25
BVA
1999}
2000
05a1d750 2001/*
509c07bc 2002 * Must be called with ch->lock held to protect req_lim and free_tx.
e9684678 2003 * If IU is not sent, it must be returned using srp_put_tx_iu().
05a1d750
DD
2004 *
2005 * Note:
2006 * An upper limit for the number of allocated information units for each
2007 * request type is:
2008 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
2009 * more than Scsi_Host.can_queue requests.
2010 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
2011 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
2012 * one unanswered SRP request to an initiator.
2013 */
509c07bc 2014static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
05a1d750
DD
2015 enum srp_iu_type iu_type)
2016{
509c07bc 2017 struct srp_target_port *target = ch->target;
05a1d750
DD
2018 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
2019 struct srp_iu *iu;
2020
93c76dbb
BVA
2021 lockdep_assert_held(&ch->lock);
2022
1dc7b1f1 2023 ib_process_cq_direct(ch->send_cq, -1);
05a1d750 2024
509c07bc 2025 if (list_empty(&ch->free_tx))
05a1d750
DD
2026 return NULL;
2027
2028 /* Initiator responses to target requests do not consume credits */
76c75b25 2029 if (iu_type != SRP_IU_RSP) {
509c07bc 2030 if (ch->req_lim <= rsv) {
76c75b25
BVA
2031 ++target->zero_req_lim;
2032 return NULL;
2033 }
2034
509c07bc 2035 --ch->req_lim;
05a1d750
DD
2036 }
2037
509c07bc 2038 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
76c75b25 2039 list_del(&iu->list);
05a1d750
DD
2040 return iu;
2041}
2042
9294000d
BVA
2043/*
2044 * Note: if this function is called from inside ib_drain_sq() then it will
2045 * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
2046 * with status IB_WC_SUCCESS then that's a bug.
2047 */
1dc7b1f1
CH
2048static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
2049{
2050 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2051 struct srp_rdma_ch *ch = cq->cq_context;
2052
2053 if (unlikely(wc->status != IB_WC_SUCCESS)) {
2054 srp_handle_qp_err(cq, wc, "SEND");
2055 return;
2056 }
2057
93c76dbb
BVA
2058 lockdep_assert_held(&ch->lock);
2059
1dc7b1f1
CH
2060 list_add(&iu->list, &ch->free_tx);
2061}
2062
882981f4
BVA
2063/**
2064 * srp_post_send() - send an SRP information unit
2065 * @ch: RDMA channel over which to send the information unit.
2066 * @iu: Information unit to send.
2067 * @len: Length of the information unit excluding immediate data.
2068 */
509c07bc 2069static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
05a1d750 2070{
509c07bc 2071 struct srp_target_port *target = ch->target;
71347b0c 2072 struct ib_send_wr wr;
05a1d750 2073
882981f4
BVA
2074 if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE))
2075 return -EINVAL;
2076
2077 iu->sge[0].addr = iu->dma;
2078 iu->sge[0].length = len;
2079 iu->sge[0].lkey = target->lkey;
05a1d750 2080
1dc7b1f1
CH
2081 iu->cqe.done = srp_send_done;
2082
05a1d750 2083 wr.next = NULL;
1dc7b1f1 2084 wr.wr_cqe = &iu->cqe;
882981f4
BVA
2085 wr.sg_list = &iu->sge[0];
2086 wr.num_sge = iu->num_sge;
05a1d750
DD
2087 wr.opcode = IB_WR_SEND;
2088 wr.send_flags = IB_SEND_SIGNALED;
2089
71347b0c 2090 return ib_post_send(ch->qp, &wr, NULL);
05a1d750
DD
2091}
2092
509c07bc 2093static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
c996bb47 2094{
509c07bc 2095 struct srp_target_port *target = ch->target;
71347b0c 2096 struct ib_recv_wr wr;
dcb4cb85 2097 struct ib_sge list;
c996bb47
BVA
2098
2099 list.addr = iu->dma;
2100 list.length = iu->size;
9af76271 2101 list.lkey = target->lkey;
c996bb47 2102
1dc7b1f1
CH
2103 iu->cqe.done = srp_recv_done;
2104
c996bb47 2105 wr.next = NULL;
1dc7b1f1 2106 wr.wr_cqe = &iu->cqe;
c996bb47
BVA
2107 wr.sg_list = &list;
2108 wr.num_sge = 1;
2109
71347b0c 2110 return ib_post_recv(ch->qp, &wr, NULL);
c996bb47
BVA
2111}
2112
509c07bc 2113static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
aef9ec39 2114{
509c07bc 2115 struct srp_target_port *target = ch->target;
aef9ec39
RD
2116 struct srp_request *req;
2117 struct scsi_cmnd *scmnd;
2118 unsigned long flags;
aef9ec39 2119
aef9ec39 2120 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
509c07bc
BVA
2121 spin_lock_irqsave(&ch->lock, flags);
2122 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
0a6fdbde
BVA
2123 if (rsp->tag == ch->tsk_mgmt_tag) {
2124 ch->tsk_mgmt_status = -1;
2125 if (be32_to_cpu(rsp->resp_data_len) >= 4)
2126 ch->tsk_mgmt_status = rsp->data[3];
2127 complete(&ch->tsk_mgmt_done);
2128 } else {
2129 shost_printk(KERN_ERR, target->scsi_host,
2130 "Received tsk mgmt response too late for tag %#llx\n",
2131 rsp->tag);
2132 }
509c07bc 2133 spin_unlock_irqrestore(&ch->lock, flags);
aef9ec39 2134 } else {
77f2c1a4 2135 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
6cb72bc1 2136 if (scmnd && scmnd->host_scribble) {
77f2c1a4
BVA
2137 req = (void *)scmnd->host_scribble;
2138 scmnd = srp_claim_req(ch, req, NULL, scmnd);
6cb72bc1
BVA
2139 } else {
2140 scmnd = NULL;
77f2c1a4 2141 }
22032991 2142 if (!scmnd) {
7aa54bd7 2143 shost_printk(KERN_ERR, target->scsi_host,
d92c0da7
BVA
2144 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
2145 rsp->tag, ch - target->ch, ch->qp->qp_num);
22032991 2146
509c07bc
BVA
2147 spin_lock_irqsave(&ch->lock, flags);
2148 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
2149 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
2150
2151 return;
2152 }
aef9ec39
RD
2153 scmnd->result = rsp->status;
2154
2155 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
2156 memcpy(scmnd->sense_buffer, rsp->data +
2157 be32_to_cpu(rsp->resp_data_len),
2158 min_t(int, be32_to_cpu(rsp->sense_data_len),
2159 SCSI_SENSE_BUFFERSIZE));
2160 }
2161
e714531a 2162 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
bb350d1d 2163 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
e714531a
BVA
2164 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
2165 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
2166 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
2167 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
2168 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
2169 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
aef9ec39 2170
509c07bc 2171 srp_free_req(ch, req, scmnd,
22032991
BVA
2172 be32_to_cpu(rsp->req_lim_delta));
2173
f8b6e31e
DD
2174 scmnd->host_scribble = NULL;
2175 scmnd->scsi_done(scmnd);
aef9ec39 2176 }
aef9ec39
RD
2177}
2178
509c07bc 2179static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
bb12588a
DD
2180 void *rsp, int len)
2181{
509c07bc 2182 struct srp_target_port *target = ch->target;
76c75b25 2183 struct ib_device *dev = target->srp_host->srp_dev->dev;
bb12588a
DD
2184 unsigned long flags;
2185 struct srp_iu *iu;
76c75b25 2186 int err;
bb12588a 2187
509c07bc
BVA
2188 spin_lock_irqsave(&ch->lock, flags);
2189 ch->req_lim += req_delta;
2190 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
2191 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25 2192
bb12588a
DD
2193 if (!iu) {
2194 shost_printk(KERN_ERR, target->scsi_host, PFX
2195 "no IU available to send response\n");
76c75b25 2196 return 1;
bb12588a
DD
2197 }
2198
882981f4 2199 iu->num_sge = 1;
bb12588a
DD
2200 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
2201 memcpy(iu->buf, rsp, len);
2202 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
2203
509c07bc 2204 err = srp_post_send(ch, iu, len);
76c75b25 2205 if (err) {
bb12588a
DD
2206 shost_printk(KERN_ERR, target->scsi_host, PFX
2207 "unable to post response: %d\n", err);
509c07bc 2208 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
76c75b25 2209 }
bb12588a 2210
bb12588a
DD
2211 return err;
2212}
2213
509c07bc 2214static void srp_process_cred_req(struct srp_rdma_ch *ch,
bb12588a
DD
2215 struct srp_cred_req *req)
2216{
2217 struct srp_cred_rsp rsp = {
2218 .opcode = SRP_CRED_RSP,
2219 .tag = req->tag,
2220 };
2221 s32 delta = be32_to_cpu(req->req_lim_delta);
2222
509c07bc
BVA
2223 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2224 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
bb12588a
DD
2225 "problems processing SRP_CRED_REQ\n");
2226}
2227
509c07bc 2228static void srp_process_aer_req(struct srp_rdma_ch *ch,
bb12588a
DD
2229 struct srp_aer_req *req)
2230{
509c07bc 2231 struct srp_target_port *target = ch->target;
bb12588a
DD
2232 struct srp_aer_rsp rsp = {
2233 .opcode = SRP_AER_RSP,
2234 .tag = req->tag,
2235 };
2236 s32 delta = be32_to_cpu(req->req_lim_delta);
2237
2238 shost_printk(KERN_ERR, target->scsi_host, PFX
985aa495 2239 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
bb12588a 2240
509c07bc 2241 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
bb12588a
DD
2242 shost_printk(KERN_ERR, target->scsi_host, PFX
2243 "problems processing SRP_AER_REQ\n");
2244}
2245
1dc7b1f1 2246static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
aef9ec39 2247{
1dc7b1f1
CH
2248 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2249 struct srp_rdma_ch *ch = cq->cq_context;
509c07bc 2250 struct srp_target_port *target = ch->target;
dcb4cb85 2251 struct ib_device *dev = target->srp_host->srp_dev->dev;
c996bb47 2252 int res;
aef9ec39
RD
2253 u8 opcode;
2254
1dc7b1f1
CH
2255 if (unlikely(wc->status != IB_WC_SUCCESS)) {
2256 srp_handle_qp_err(cq, wc, "RECV");
2257 return;
2258 }
2259
509c07bc 2260 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 2261 DMA_FROM_DEVICE);
aef9ec39
RD
2262
2263 opcode = *(u8 *) iu->buf;
2264
2265 if (0) {
7aa54bd7
DD
2266 shost_printk(KERN_ERR, target->scsi_host,
2267 PFX "recv completion, opcode 0x%02x\n", opcode);
7a700811
BVA
2268 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2269 iu->buf, wc->byte_len, true);
aef9ec39
RD
2270 }
2271
2272 switch (opcode) {
2273 case SRP_RSP:
509c07bc 2274 srp_process_rsp(ch, iu->buf);
aef9ec39
RD
2275 break;
2276
bb12588a 2277 case SRP_CRED_REQ:
509c07bc 2278 srp_process_cred_req(ch, iu->buf);
bb12588a
DD
2279 break;
2280
2281 case SRP_AER_REQ:
509c07bc 2282 srp_process_aer_req(ch, iu->buf);
bb12588a
DD
2283 break;
2284
aef9ec39
RD
2285 case SRP_T_LOGOUT:
2286 /* XXX Handle target logout */
7aa54bd7
DD
2287 shost_printk(KERN_WARNING, target->scsi_host,
2288 PFX "Got target logout request\n");
aef9ec39
RD
2289 break;
2290
2291 default:
7aa54bd7
DD
2292 shost_printk(KERN_WARNING, target->scsi_host,
2293 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
aef9ec39
RD
2294 break;
2295 }
2296
509c07bc 2297 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 2298 DMA_FROM_DEVICE);
c996bb47 2299
509c07bc 2300 res = srp_post_recv(ch, iu);
c996bb47
BVA
2301 if (res != 0)
2302 shost_printk(KERN_ERR, target->scsi_host,
2303 PFX "Recv failed with error code %d\n", res);
aef9ec39
RD
2304}
2305
c1120f89
BVA
2306/**
2307 * srp_tl_err_work() - handle a transport layer error
af24663b 2308 * @work: Work structure embedded in an SRP target port.
c1120f89
BVA
2309 *
2310 * Note: This function may get invoked before the rport has been created,
2311 * hence the target->rport test.
2312 */
2313static void srp_tl_err_work(struct work_struct *work)
2314{
2315 struct srp_target_port *target;
2316
2317 target = container_of(work, struct srp_target_port, tl_err_work);
2318 if (target->rport)
2319 srp_start_tl_fail_timers(target->rport);
2320}
2321
1dc7b1f1
CH
2322static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2323 const char *opname)
948d1e88 2324{
1dc7b1f1 2325 struct srp_rdma_ch *ch = cq->cq_context;
7dad6b2e
BVA
2326 struct srp_target_port *target = ch->target;
2327
c014c8cd 2328 if (ch->connected && !target->qp_in_error) {
1dc7b1f1
CH
2329 shost_printk(KERN_ERR, target->scsi_host,
2330 PFX "failed %s status %s (%d) for CQE %p\n",
2331 opname, ib_wc_status_msg(wc->status), wc->status,
2332 wc->wr_cqe);
c1120f89 2333 queue_work(system_long_wq, &target->tl_err_work);
4f0af697 2334 }
948d1e88
BVA
2335 target->qp_in_error = true;
2336}
2337
76c75b25 2338static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
aef9ec39 2339{
76c75b25 2340 struct srp_target_port *target = host_to_target(shost);
a95cadb9 2341 struct srp_rport *rport = target->rport;
509c07bc 2342 struct srp_rdma_ch *ch;
aef9ec39
RD
2343 struct srp_request *req;
2344 struct srp_iu *iu;
2345 struct srp_cmd *cmd;
85507bcc 2346 struct ib_device *dev;
76c75b25 2347 unsigned long flags;
77f2c1a4
BVA
2348 u32 tag;
2349 u16 idx;
d1b4289e 2350 int len, ret;
a95cadb9
BVA
2351 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2352
2353 /*
2354 * The SCSI EH thread is the only context from which srp_queuecommand()
2355 * can get invoked for blocked devices (SDEV_BLOCK /
2356 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2357 * locking the rport mutex if invoked from inside the SCSI EH.
2358 */
2359 if (in_scsi_eh)
2360 mutex_lock(&rport->mutex);
aef9ec39 2361
d1b4289e
BVA
2362 scmnd->result = srp_chkready(target->rport);
2363 if (unlikely(scmnd->result))
2364 goto err;
2ce19e72 2365
77f2c1a4
BVA
2366 WARN_ON_ONCE(scmnd->request->tag < 0);
2367 tag = blk_mq_unique_tag(scmnd->request);
d92c0da7 2368 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
77f2c1a4
BVA
2369 idx = blk_mq_unique_tag_to_tag(tag);
2370 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2371 dev_name(&shost->shost_gendev), tag, idx,
2372 target->req_ring_size);
509c07bc
BVA
2373
2374 spin_lock_irqsave(&ch->lock, flags);
2375 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
509c07bc 2376 spin_unlock_irqrestore(&ch->lock, flags);
aef9ec39 2377
77f2c1a4
BVA
2378 if (!iu)
2379 goto err;
2380
2381 req = &ch->req_ring[idx];
05321937 2382 dev = target->srp_host->srp_dev->dev;
513d5647 2383 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len,
85507bcc 2384 DMA_TO_DEVICE);
aef9ec39 2385
f8b6e31e 2386 scmnd->host_scribble = (void *) req;
aef9ec39
RD
2387
2388 cmd = iu->buf;
2389 memset(cmd, 0, sizeof *cmd);
2390
2391 cmd->opcode = SRP_CMD;
985aa495 2392 int_to_scsilun(scmnd->device->lun, &cmd->lun);
77f2c1a4 2393 cmd->tag = tag;
aef9ec39 2394 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
482fffc4
BVA
2395 if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) {
2396 cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb),
2397 4);
2398 if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN))
2399 goto err_iu;
2400 }
aef9ec39 2401
aef9ec39
RD
2402 req->scmnd = scmnd;
2403 req->cmd = iu;
aef9ec39 2404
509c07bc 2405 len = srp_map_data(scmnd, ch, req);
aef9ec39 2406 if (len < 0) {
7aa54bd7 2407 shost_printk(KERN_ERR, target->scsi_host,
d1b4289e
BVA
2408 PFX "Failed to map data (%d)\n", len);
2409 /*
2410 * If we ran out of memory descriptors (-ENOMEM) because an
2411 * application is queuing many requests with more than
52ede08f 2412 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
d1b4289e
BVA
2413 * to reduce queue depth temporarily.
2414 */
2415 scmnd->result = len == -ENOMEM ?
2416 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
76c75b25 2417 goto err_iu;
aef9ec39
RD
2418 }
2419
513d5647 2420 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len,
85507bcc 2421 DMA_TO_DEVICE);
aef9ec39 2422
509c07bc 2423 if (srp_post_send(ch, iu, len)) {
7aa54bd7 2424 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2ee00f6a 2425 scmnd->result = DID_ERROR << 16;
aef9ec39
RD
2426 goto err_unmap;
2427 }
2428
d1b4289e
BVA
2429 ret = 0;
2430
a95cadb9
BVA
2431unlock_rport:
2432 if (in_scsi_eh)
2433 mutex_unlock(&rport->mutex);
2434
d1b4289e 2435 return ret;
aef9ec39
RD
2436
2437err_unmap:
509c07bc 2438 srp_unmap_data(scmnd, ch, req);
aef9ec39 2439
76c75b25 2440err_iu:
509c07bc 2441 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
76c75b25 2442
024ca901
BVA
2443 /*
2444 * Avoid that the loops that iterate over the request ring can
2445 * encounter a dangling SCSI command pointer.
2446 */
2447 req->scmnd = NULL;
2448
d1b4289e
BVA
2449err:
2450 if (scmnd->result) {
2451 scmnd->scsi_done(scmnd);
2452 ret = 0;
2453 } else {
2454 ret = SCSI_MLQUEUE_HOST_BUSY;
2455 }
a95cadb9 2456
d1b4289e 2457 goto unlock_rport;
aef9ec39
RD
2458}
2459
4d73f95f
BVA
2460/*
2461 * Note: the resources allocated in this function are freed in
509c07bc 2462 * srp_free_ch_ib().
4d73f95f 2463 */
509c07bc 2464static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
aef9ec39 2465{
509c07bc 2466 struct srp_target_port *target = ch->target;
aef9ec39
RD
2467 int i;
2468
509c07bc
BVA
2469 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2470 GFP_KERNEL);
2471 if (!ch->rx_ring)
4d73f95f 2472 goto err_no_ring;
509c07bc
BVA
2473 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2474 GFP_KERNEL);
2475 if (!ch->tx_ring)
4d73f95f
BVA
2476 goto err_no_ring;
2477
2478 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2479 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2480 ch->max_ti_iu_len,
2481 GFP_KERNEL, DMA_FROM_DEVICE);
2482 if (!ch->rx_ring[i])
aef9ec39
RD
2483 goto err;
2484 }
2485
4d73f95f 2486 for (i = 0; i < target->queue_size; ++i) {
509c07bc 2487 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
513d5647 2488 ch->max_it_iu_len,
509c07bc
BVA
2489 GFP_KERNEL, DMA_TO_DEVICE);
2490 if (!ch->tx_ring[i])
aef9ec39 2491 goto err;
dcb4cb85 2492
509c07bc 2493 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
aef9ec39
RD
2494 }
2495
2496 return 0;
2497
2498err:
4d73f95f 2499 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2500 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2501 srp_free_iu(target->srp_host, ch->tx_ring[i]);
aef9ec39
RD
2502 }
2503
4d73f95f
BVA
2504
2505err_no_ring:
509c07bc
BVA
2506 kfree(ch->tx_ring);
2507 ch->tx_ring = NULL;
2508 kfree(ch->rx_ring);
2509 ch->rx_ring = NULL;
4d73f95f 2510
aef9ec39
RD
2511 return -ENOMEM;
2512}
2513
c9b03c1a
BVA
2514static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2515{
2516 uint64_t T_tr_ns, max_compl_time_ms;
2517 uint32_t rq_tmo_jiffies;
2518
2519 /*
2520 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2521 * table 91), both the QP timeout and the retry count have to be set
2522 * for RC QP's during the RTR to RTS transition.
2523 */
2524 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2525 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2526
2527 /*
2528 * Set target->rq_tmo_jiffies to one second more than the largest time
2529 * it can take before an error completion is generated. See also
2530 * C9-140..142 in the IBTA spec for more information about how to
2531 * convert the QP Local ACK Timeout value to nanoseconds.
2532 */
2533 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2534 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2535 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2536 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2537
2538 return rq_tmo_jiffies;
2539}
2540
961e0be8 2541static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
e6300cbd 2542 const struct srp_login_rsp *lrsp,
509c07bc 2543 struct srp_rdma_ch *ch)
961e0be8 2544{
509c07bc 2545 struct srp_target_port *target = ch->target;
961e0be8
DD
2546 struct ib_qp_attr *qp_attr = NULL;
2547 int attr_mask = 0;
19f31343 2548 int ret = 0;
961e0be8
DD
2549 int i;
2550
2551 if (lrsp->opcode == SRP_LOGIN_RSP) {
509c07bc
BVA
2552 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2553 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
882981f4
BVA
2554 ch->use_imm_data = lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP;
2555 ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
2556 ch->use_imm_data);
513d5647
BVA
2557 WARN_ON_ONCE(ch->max_it_iu_len >
2558 be32_to_cpu(lrsp->max_it_iu_len));
961e0be8 2559
882981f4
BVA
2560 if (ch->use_imm_data)
2561 shost_printk(KERN_DEBUG, target->scsi_host,
2562 PFX "using immediate data\n");
961e0be8
DD
2563
2564 /*
2565 * Reserve credits for task management so we don't
2566 * bounce requests back to the SCSI mid-layer.
2567 */
2568 target->scsi_host->can_queue
509c07bc 2569 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
961e0be8 2570 target->scsi_host->can_queue);
4d73f95f
BVA
2571 target->scsi_host->cmd_per_lun
2572 = min_t(int, target->scsi_host->can_queue,
2573 target->scsi_host->cmd_per_lun);
961e0be8
DD
2574 } else {
2575 shost_printk(KERN_WARNING, target->scsi_host,
2576 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2577 ret = -ECONNRESET;
2578 goto error;
2579 }
2580
509c07bc
BVA
2581 if (!ch->rx_ring) {
2582 ret = srp_alloc_iu_bufs(ch);
961e0be8
DD
2583 if (ret)
2584 goto error;
2585 }
2586
4d73f95f 2587 for (i = 0; i < target->queue_size; i++) {
509c07bc
BVA
2588 struct srp_iu *iu = ch->rx_ring[i];
2589
2590 ret = srp_post_recv(ch, iu);
961e0be8 2591 if (ret)
19f31343 2592 goto error;
961e0be8
DD
2593 }
2594
19f31343
BVA
2595 if (!target->using_rdma_cm) {
2596 ret = -ENOMEM;
2597 qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
2598 if (!qp_attr)
2599 goto error;
2600
2601 qp_attr->qp_state = IB_QPS_RTR;
2602 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2603 if (ret)
2604 goto error_free;
2605
2606 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2607 if (ret)
2608 goto error_free;
961e0be8 2609
19f31343
BVA
2610 qp_attr->qp_state = IB_QPS_RTS;
2611 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2612 if (ret)
2613 goto error_free;
c9b03c1a 2614
19f31343 2615 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
961e0be8 2616
19f31343
BVA
2617 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2618 if (ret)
2619 goto error_free;
2620
2621 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2622 }
961e0be8
DD
2623
2624error_free:
2625 kfree(qp_attr);
2626
2627error:
509c07bc 2628 ch->status = ret;
961e0be8
DD
2629}
2630
19f31343 2631static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
e7ff98ae 2632 const struct ib_cm_event *event,
19f31343 2633 struct srp_rdma_ch *ch)
aef9ec39 2634{
509c07bc 2635 struct srp_target_port *target = ch->target;
7aa54bd7 2636 struct Scsi_Host *shost = target->scsi_host;
aef9ec39
RD
2637 struct ib_class_port_info *cpi;
2638 int opcode;
19f31343 2639 u16 dlid;
aef9ec39
RD
2640
2641 switch (event->param.rej_rcvd.reason) {
2642 case IB_CM_REJ_PORT_CM_REDIRECT:
2643 cpi = event->param.rej_rcvd.ari;
19f31343
BVA
2644 dlid = be16_to_cpu(cpi->redirect_lid);
2645 sa_path_set_dlid(&ch->ib_cm.path, dlid);
2646 ch->ib_cm.path.pkey = cpi->redirect_pkey;
aef9ec39 2647 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
19f31343 2648 memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16);
aef9ec39 2649
19f31343 2650 ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
aef9ec39
RD
2651 break;
2652
2653 case IB_CM_REJ_PORT_REDIRECT:
5d7cbfd6 2654 if (srp_target_is_topspin(target)) {
19f31343
BVA
2655 union ib_gid *dgid = &ch->ib_cm.path.dgid;
2656
aef9ec39
RD
2657 /*
2658 * Topspin/Cisco SRP gateways incorrectly send
2659 * reject reason code 25 when they mean 24
2660 * (port redirect).
2661 */
19f31343 2662 memcpy(dgid->raw, event->param.rej_rcvd.ari, 16);
aef9ec39 2663
7aa54bd7
DD
2664 shost_printk(KERN_DEBUG, shost,
2665 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
19f31343
BVA
2666 be64_to_cpu(dgid->global.subnet_prefix),
2667 be64_to_cpu(dgid->global.interface_id));
aef9ec39 2668
509c07bc 2669 ch->status = SRP_PORT_REDIRECT;
aef9ec39 2670 } else {
7aa54bd7
DD
2671 shost_printk(KERN_WARNING, shost,
2672 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
509c07bc 2673 ch->status = -ECONNRESET;
aef9ec39
RD
2674 }
2675 break;
2676
2677 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
7aa54bd7
DD
2678 shost_printk(KERN_WARNING, shost,
2679 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
509c07bc 2680 ch->status = -ECONNRESET;
aef9ec39
RD
2681 break;
2682
2683 case IB_CM_REJ_CONSUMER_DEFINED:
2684 opcode = *(u8 *) event->private_data;
2685 if (opcode == SRP_LOGIN_REJ) {
2686 struct srp_login_rej *rej = event->private_data;
2687 u32 reason = be32_to_cpu(rej->reason);
2688
2689 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
7aa54bd7
DD
2690 shost_printk(KERN_WARNING, shost,
2691 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
aef9ec39 2692 else
e7ffde01
BVA
2693 shost_printk(KERN_WARNING, shost, PFX
2694 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
747fe000 2695 target->sgid.raw,
19f31343
BVA
2696 target->ib_cm.orig_dgid.raw,
2697 reason);
aef9ec39 2698 } else
7aa54bd7
DD
2699 shost_printk(KERN_WARNING, shost,
2700 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2701 " opcode 0x%02x\n", opcode);
509c07bc 2702 ch->status = -ECONNRESET;
aef9ec39
RD
2703 break;
2704
9fe4bcf4
DD
2705 case IB_CM_REJ_STALE_CONN:
2706 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
509c07bc 2707 ch->status = SRP_STALE_CONN;
9fe4bcf4
DD
2708 break;
2709
aef9ec39 2710 default:
7aa54bd7
DD
2711 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2712 event->param.rej_rcvd.reason);
509c07bc 2713 ch->status = -ECONNRESET;
aef9ec39
RD
2714 }
2715}
2716
e7ff98ae
PP
2717static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
2718 const struct ib_cm_event *event)
aef9ec39 2719{
509c07bc
BVA
2720 struct srp_rdma_ch *ch = cm_id->context;
2721 struct srp_target_port *target = ch->target;
aef9ec39 2722 int comp = 0;
aef9ec39
RD
2723
2724 switch (event->event) {
2725 case IB_CM_REQ_ERROR:
7aa54bd7
DD
2726 shost_printk(KERN_DEBUG, target->scsi_host,
2727 PFX "Sending CM REQ failed\n");
aef9ec39 2728 comp = 1;
509c07bc 2729 ch->status = -ECONNRESET;
aef9ec39
RD
2730 break;
2731
2732 case IB_CM_REP_RECEIVED:
2733 comp = 1;
509c07bc 2734 srp_cm_rep_handler(cm_id, event->private_data, ch);
aef9ec39
RD
2735 break;
2736
2737 case IB_CM_REJ_RECEIVED:
7aa54bd7 2738 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
aef9ec39
RD
2739 comp = 1;
2740
19f31343 2741 srp_ib_cm_rej_handler(cm_id, event, ch);
aef9ec39
RD
2742 break;
2743
b7ac4ab4 2744 case IB_CM_DREQ_RECEIVED:
7aa54bd7
DD
2745 shost_printk(KERN_WARNING, target->scsi_host,
2746 PFX "DREQ received - connection closed\n");
c014c8cd 2747 ch->connected = false;
b7ac4ab4 2748 if (ib_send_cm_drep(cm_id, NULL, 0))
7aa54bd7
DD
2749 shost_printk(KERN_ERR, target->scsi_host,
2750 PFX "Sending CM DREP failed\n");
c1120f89 2751 queue_work(system_long_wq, &target->tl_err_work);
aef9ec39
RD
2752 break;
2753
2754 case IB_CM_TIMEWAIT_EXIT:
7aa54bd7
DD
2755 shost_printk(KERN_ERR, target->scsi_host,
2756 PFX "connection closed\n");
ac72d766 2757 comp = 1;
aef9ec39 2758
509c07bc 2759 ch->status = 0;
aef9ec39
RD
2760 break;
2761
b7ac4ab4
IR
2762 case IB_CM_MRA_RECEIVED:
2763 case IB_CM_DREQ_ERROR:
2764 case IB_CM_DREP_RECEIVED:
2765 break;
2766
aef9ec39 2767 default:
7aa54bd7
DD
2768 shost_printk(KERN_WARNING, target->scsi_host,
2769 PFX "Unhandled CM event %d\n", event->event);
aef9ec39
RD
2770 break;
2771 }
2772
2773 if (comp)
509c07bc 2774 complete(&ch->done);
aef9ec39 2775
aef9ec39
RD
2776 return 0;
2777}
2778
19f31343
BVA
2779static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
2780 struct rdma_cm_event *event)
2781{
2782 struct srp_target_port *target = ch->target;
2783 struct Scsi_Host *shost = target->scsi_host;
2784 int opcode;
2785
2786 switch (event->status) {
2787 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2788 shost_printk(KERN_WARNING, shost,
2789 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2790 ch->status = -ECONNRESET;
2791 break;
2792
2793 case IB_CM_REJ_CONSUMER_DEFINED:
2794 opcode = *(u8 *) event->param.conn.private_data;
2795 if (opcode == SRP_LOGIN_REJ) {
2796 struct srp_login_rej *rej =
2797 (struct srp_login_rej *)
2798 event->param.conn.private_data;
2799 u32 reason = be32_to_cpu(rej->reason);
2800
2801 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2802 shost_printk(KERN_WARNING, shost,
2803 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2804 else
2805 shost_printk(KERN_WARNING, shost,
2806 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
2807 } else {
2808 shost_printk(KERN_WARNING, shost,
2809 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n",
2810 opcode);
2811 }
2812 ch->status = -ECONNRESET;
2813 break;
2814
2815 case IB_CM_REJ_STALE_CONN:
2816 shost_printk(KERN_WARNING, shost,
2817 " REJ reason: stale connection\n");
2818 ch->status = SRP_STALE_CONN;
2819 break;
2820
2821 default:
2822 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2823 event->status);
2824 ch->status = -ECONNRESET;
2825 break;
2826 }
2827}
2828
2829static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
2830 struct rdma_cm_event *event)
2831{
2832 struct srp_rdma_ch *ch = cm_id->context;
2833 struct srp_target_port *target = ch->target;
2834 int comp = 0;
2835
2836 switch (event->event) {
2837 case RDMA_CM_EVENT_ADDR_RESOLVED:
2838 ch->status = 0;
2839 comp = 1;
2840 break;
2841
2842 case RDMA_CM_EVENT_ADDR_ERROR:
2843 ch->status = -ENXIO;
2844 comp = 1;
2845 break;
2846
2847 case RDMA_CM_EVENT_ROUTE_RESOLVED:
2848 ch->status = 0;
2849 comp = 1;
2850 break;
2851
2852 case RDMA_CM_EVENT_ROUTE_ERROR:
2853 case RDMA_CM_EVENT_UNREACHABLE:
2854 ch->status = -EHOSTUNREACH;
2855 comp = 1;
2856 break;
2857
2858 case RDMA_CM_EVENT_CONNECT_ERROR:
2859 shost_printk(KERN_DEBUG, target->scsi_host,
2860 PFX "Sending CM REQ failed\n");
2861 comp = 1;
2862 ch->status = -ECONNRESET;
2863 break;
2864
2865 case RDMA_CM_EVENT_ESTABLISHED:
2866 comp = 1;
2867 srp_cm_rep_handler(NULL, event->param.conn.private_data, ch);
2868 break;
2869
2870 case RDMA_CM_EVENT_REJECTED:
2871 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2872 comp = 1;
2873
2874 srp_rdma_cm_rej_handler(ch, event);
2875 break;
2876
2877 case RDMA_CM_EVENT_DISCONNECTED:
2878 if (ch->connected) {
2879 shost_printk(KERN_WARNING, target->scsi_host,
2880 PFX "received DREQ\n");
2881 rdma_disconnect(ch->rdma_cm.cm_id);
2882 comp = 1;
2883 ch->status = 0;
2884 queue_work(system_long_wq, &target->tl_err_work);
2885 }
2886 break;
2887
2888 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2889 shost_printk(KERN_ERR, target->scsi_host,
2890 PFX "connection closed\n");
2891
2892 comp = 1;
2893 ch->status = 0;
2894 break;
2895
2896 default:
2897 shost_printk(KERN_WARNING, target->scsi_host,
2898 PFX "Unhandled CM event %d\n", event->event);
2899 break;
2900 }
2901
2902 if (comp)
2903 complete(&ch->done);
2904
2905 return 0;
2906}
2907
71444b97
JW
2908/**
2909 * srp_change_queue_depth - setting device queue depth
2910 * @sdev: scsi device struct
2911 * @qdepth: requested queue depth
71444b97
JW
2912 *
2913 * Returns queue depth.
2914 */
2915static int
db5ed4df 2916srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
71444b97 2917{
c40ecc12 2918 if (!sdev->tagged_supported)
1e6f2416 2919 qdepth = 1;
db5ed4df 2920 return scsi_change_queue_depth(sdev, qdepth);
71444b97
JW
2921}
2922
985aa495 2923static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
0a6fdbde 2924 u8 func, u8 *status)
aef9ec39 2925{
509c07bc 2926 struct srp_target_port *target = ch->target;
a95cadb9 2927 struct srp_rport *rport = target->rport;
19081f31 2928 struct ib_device *dev = target->srp_host->srp_dev->dev;
aef9ec39
RD
2929 struct srp_iu *iu;
2930 struct srp_tsk_mgmt *tsk_mgmt;
0a6fdbde 2931 int res;
aef9ec39 2932
c014c8cd 2933 if (!ch->connected || target->qp_in_error)
3780d1f0
BVA
2934 return -1;
2935
a95cadb9 2936 /*
509c07bc 2937 * Lock the rport mutex to avoid that srp_create_ch_ib() is
a95cadb9
BVA
2938 * invoked while a task management function is being sent.
2939 */
2940 mutex_lock(&rport->mutex);
509c07bc
BVA
2941 spin_lock_irq(&ch->lock);
2942 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2943 spin_unlock_irq(&ch->lock);
76c75b25 2944
a95cadb9
BVA
2945 if (!iu) {
2946 mutex_unlock(&rport->mutex);
2947
76c75b25 2948 return -1;
a95cadb9 2949 }
aef9ec39 2950
882981f4
BVA
2951 iu->num_sge = 1;
2952
19081f31
DD
2953 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2954 DMA_TO_DEVICE);
aef9ec39
RD
2955 tsk_mgmt = iu->buf;
2956 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2957
2958 tsk_mgmt->opcode = SRP_TSK_MGMT;
985aa495 2959 int_to_scsilun(lun, &tsk_mgmt->lun);
aef9ec39 2960 tsk_mgmt->tsk_mgmt_func = func;
f8b6e31e 2961 tsk_mgmt->task_tag = req_tag;
aef9ec39 2962
0a6fdbde
BVA
2963 spin_lock_irq(&ch->lock);
2964 ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
2965 tsk_mgmt->tag = ch->tsk_mgmt_tag;
2966 spin_unlock_irq(&ch->lock);
2967
2968 init_completion(&ch->tsk_mgmt_done);
2969
19081f31
DD
2970 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2971 DMA_TO_DEVICE);
509c07bc
BVA
2972 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2973 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
a95cadb9
BVA
2974 mutex_unlock(&rport->mutex);
2975
76c75b25
BVA
2976 return -1;
2977 }
0a6fdbde
BVA
2978 res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
2979 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
2980 if (res > 0 && status)
2981 *status = ch->tsk_mgmt_status;
a95cadb9 2982 mutex_unlock(&rport->mutex);
d945e1df 2983
0a6fdbde 2984 WARN_ON_ONCE(res < 0);
aef9ec39 2985
0a6fdbde 2986 return res > 0 ? 0 : -1;
d945e1df
RD
2987}
2988
aef9ec39
RD
2989static int srp_abort(struct scsi_cmnd *scmnd)
2990{
d945e1df 2991 struct srp_target_port *target = host_to_target(scmnd->device->host);
f8b6e31e 2992 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
77f2c1a4 2993 u32 tag;
d92c0da7 2994 u16 ch_idx;
509c07bc 2995 struct srp_rdma_ch *ch;
086f44f5 2996 int ret;
d945e1df 2997
7aa54bd7 2998 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
aef9ec39 2999
d92c0da7 3000 if (!req)
99b6697a 3001 return SUCCESS;
77f2c1a4 3002 tag = blk_mq_unique_tag(scmnd->request);
d92c0da7
BVA
3003 ch_idx = blk_mq_unique_tag_to_hwq(tag);
3004 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
3005 return SUCCESS;
3006 ch = &target->ch[ch_idx];
3007 if (!srp_claim_req(ch, req, NULL, scmnd))
3008 return SUCCESS;
3009 shost_printk(KERN_ERR, target->scsi_host,
3010 "Sending SRP abort for tag %#x\n", tag);
77f2c1a4 3011 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
0a6fdbde 3012 SRP_TSK_ABORT_TASK, NULL) == 0)
086f44f5 3013 ret = SUCCESS;
ed9b2264 3014 else if (target->rport->state == SRP_RPORT_LOST)
99e1c139 3015 ret = FAST_IO_FAIL;
086f44f5
BVA
3016 else
3017 ret = FAILED;
e68088e7
BVA
3018 if (ret == SUCCESS) {
3019 srp_free_req(ch, req, scmnd, 0);
3020 scmnd->result = DID_ABORT << 16;
3021 scmnd->scsi_done(scmnd);
3022 }
d945e1df 3023
086f44f5 3024 return ret;
aef9ec39
RD
3025}
3026
3027static int srp_reset_device(struct scsi_cmnd *scmnd)
3028{
d945e1df 3029 struct srp_target_port *target = host_to_target(scmnd->device->host);
d92c0da7 3030 struct srp_rdma_ch *ch;
0a6fdbde 3031 u8 status;
d945e1df 3032
7aa54bd7 3033 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
aef9ec39 3034
d92c0da7 3035 ch = &target->ch[0];
509c07bc 3036 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
0a6fdbde 3037 SRP_TSK_LUN_RESET, &status))
d945e1df 3038 return FAILED;
0a6fdbde 3039 if (status)
d945e1df
RD
3040 return FAILED;
3041
d945e1df 3042 return SUCCESS;
aef9ec39
RD
3043}
3044
3045static int srp_reset_host(struct scsi_cmnd *scmnd)
3046{
3047 struct srp_target_port *target = host_to_target(scmnd->device->host);
aef9ec39 3048
7aa54bd7 3049 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
aef9ec39 3050
ed9b2264 3051 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
aef9ec39
RD
3052}
3053
b0780ee5
BVA
3054static int srp_target_alloc(struct scsi_target *starget)
3055{
3056 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3057 struct srp_target_port *target = host_to_target(shost);
3058
3059 if (target->target_can_queue)
3060 starget->can_queue = target->target_can_queue;
3061 return 0;
3062}
3063
509c5f33
BVA
3064static int srp_slave_alloc(struct scsi_device *sdev)
3065{
3066 struct Scsi_Host *shost = sdev->host;
3067 struct srp_target_port *target = host_to_target(shost);
3068 struct srp_device *srp_dev = target->srp_host->srp_dev;
fbd36818 3069 struct ib_device *ibdev = srp_dev->dev;
509c5f33 3070
fbd36818 3071 if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
509c5f33
BVA
3072 blk_queue_virt_boundary(sdev->request_queue,
3073 ~srp_dev->mr_page_mask);
3074
3075 return 0;
3076}
3077
c9b03c1a
BVA
3078static int srp_slave_configure(struct scsi_device *sdev)
3079{
3080 struct Scsi_Host *shost = sdev->host;
3081 struct srp_target_port *target = host_to_target(shost);
3082 struct request_queue *q = sdev->request_queue;
3083 unsigned long timeout;
3084
3085 if (sdev->type == TYPE_DISK) {
3086 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
3087 blk_queue_rq_timeout(q, timeout);
3088 }
3089
3090 return 0;
3091}
3092
ee959b00
TJ
3093static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
3094 char *buf)
6ecb0c84 3095{
ee959b00 3096 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 3097
45c37cad 3098 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
6ecb0c84
RD
3099}
3100
ee959b00
TJ
3101static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
3102 char *buf)
6ecb0c84 3103{
ee959b00 3104 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 3105
45c37cad 3106 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
6ecb0c84
RD
3107}
3108
ee959b00
TJ
3109static ssize_t show_service_id(struct device *dev,
3110 struct device_attribute *attr, char *buf)
6ecb0c84 3111{
ee959b00 3112 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 3113
19f31343
BVA
3114 if (target->using_rdma_cm)
3115 return -ENOENT;
3116 return sprintf(buf, "0x%016llx\n",
3117 be64_to_cpu(target->ib_cm.service_id));
6ecb0c84
RD
3118}
3119
ee959b00
TJ
3120static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
3121 char *buf)
6ecb0c84 3122{
ee959b00 3123 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 3124
19f31343
BVA
3125 if (target->using_rdma_cm)
3126 return -ENOENT;
3127 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
6ecb0c84
RD
3128}
3129
848b3082
BVA
3130static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
3131 char *buf)
3132{
3133 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3134
747fe000 3135 return sprintf(buf, "%pI6\n", target->sgid.raw);
848b3082
BVA
3136}
3137
ee959b00
TJ
3138static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
3139 char *buf)
6ecb0c84 3140{
ee959b00 3141 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7 3142 struct srp_rdma_ch *ch = &target->ch[0];
6ecb0c84 3143
19f31343
BVA
3144 if (target->using_rdma_cm)
3145 return -ENOENT;
3146 return sprintf(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
6ecb0c84
RD
3147}
3148
ee959b00
TJ
3149static ssize_t show_orig_dgid(struct device *dev,
3150 struct device_attribute *attr, char *buf)
3633b3d0 3151{
ee959b00 3152 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3633b3d0 3153
19f31343
BVA
3154 if (target->using_rdma_cm)
3155 return -ENOENT;
3156 return sprintf(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
3633b3d0
IR
3157}
3158
89de7486
BVA
3159static ssize_t show_req_lim(struct device *dev,
3160 struct device_attribute *attr, char *buf)
3161{
3162 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7
BVA
3163 struct srp_rdma_ch *ch;
3164 int i, req_lim = INT_MAX;
89de7486 3165
d92c0da7
BVA
3166 for (i = 0; i < target->ch_count; i++) {
3167 ch = &target->ch[i];
3168 req_lim = min(req_lim, ch->req_lim);
3169 }
3170 return sprintf(buf, "%d\n", req_lim);
89de7486
BVA
3171}
3172
ee959b00
TJ
3173static ssize_t show_zero_req_lim(struct device *dev,
3174 struct device_attribute *attr, char *buf)
6bfa24fa 3175{
ee959b00 3176 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6bfa24fa 3177
6bfa24fa
RD
3178 return sprintf(buf, "%d\n", target->zero_req_lim);
3179}
3180
ee959b00
TJ
3181static ssize_t show_local_ib_port(struct device *dev,
3182 struct device_attribute *attr, char *buf)
ded7f1a1 3183{
ee959b00 3184 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1
IR
3185
3186 return sprintf(buf, "%d\n", target->srp_host->port);
3187}
3188
ee959b00
TJ
3189static ssize_t show_local_ib_device(struct device *dev,
3190 struct device_attribute *attr, char *buf)
ded7f1a1 3191{
ee959b00 3192 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1 3193
6c854111
JG
3194 return sprintf(buf, "%s\n",
3195 dev_name(&target->srp_host->srp_dev->dev->dev));
ded7f1a1
IR
3196}
3197
d92c0da7
BVA
3198static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
3199 char *buf)
3200{
3201 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3202
3203 return sprintf(buf, "%d\n", target->ch_count);
3204}
3205
4b5e5f41
BVA
3206static ssize_t show_comp_vector(struct device *dev,
3207 struct device_attribute *attr, char *buf)
3208{
3209 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3210
3211 return sprintf(buf, "%d\n", target->comp_vector);
3212}
3213
7bb312e4
VP
3214static ssize_t show_tl_retry_count(struct device *dev,
3215 struct device_attribute *attr, char *buf)
3216{
3217 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3218
3219 return sprintf(buf, "%d\n", target->tl_retry_count);
3220}
3221
49248644
DD
3222static ssize_t show_cmd_sg_entries(struct device *dev,
3223 struct device_attribute *attr, char *buf)
3224{
3225 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3226
3227 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
3228}
3229
c07d424d
DD
3230static ssize_t show_allow_ext_sg(struct device *dev,
3231 struct device_attribute *attr, char *buf)
3232{
3233 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3234
3235 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
3236}
3237
ee959b00
TJ
3238static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
3239static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
3240static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
3241static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
848b3082 3242static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
ee959b00
TJ
3243static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
3244static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
89de7486 3245static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
ee959b00
TJ
3246static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
3247static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
3248static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
d92c0da7 3249static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
4b5e5f41 3250static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
7bb312e4 3251static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
49248644 3252static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
c07d424d 3253static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
ee959b00
TJ
3254
3255static struct device_attribute *srp_host_attrs[] = {
3256 &dev_attr_id_ext,
3257 &dev_attr_ioc_guid,
3258 &dev_attr_service_id,
3259 &dev_attr_pkey,
848b3082 3260 &dev_attr_sgid,
ee959b00
TJ
3261 &dev_attr_dgid,
3262 &dev_attr_orig_dgid,
89de7486 3263 &dev_attr_req_lim,
ee959b00
TJ
3264 &dev_attr_zero_req_lim,
3265 &dev_attr_local_ib_port,
3266 &dev_attr_local_ib_device,
d92c0da7 3267 &dev_attr_ch_count,
4b5e5f41 3268 &dev_attr_comp_vector,
7bb312e4 3269 &dev_attr_tl_retry_count,
49248644 3270 &dev_attr_cmd_sg_entries,
c07d424d 3271 &dev_attr_allow_ext_sg,
6ecb0c84
RD
3272 NULL
3273};
3274
aef9ec39
RD
3275static struct scsi_host_template srp_template = {
3276 .module = THIS_MODULE,
b7f008fd
RD
3277 .name = "InfiniBand SRP initiator",
3278 .proc_name = DRV_NAME,
b0780ee5 3279 .target_alloc = srp_target_alloc,
509c5f33 3280 .slave_alloc = srp_slave_alloc,
c9b03c1a 3281 .slave_configure = srp_slave_configure,
aef9ec39
RD
3282 .info = srp_target_info,
3283 .queuecommand = srp_queuecommand,
71444b97 3284 .change_queue_depth = srp_change_queue_depth,
b6a05c82 3285 .eh_timed_out = srp_timed_out,
aef9ec39
RD
3286 .eh_abort_handler = srp_abort,
3287 .eh_device_reset_handler = srp_reset_device,
3288 .eh_host_reset_handler = srp_reset_host,
2742c1da 3289 .skip_settle_delay = true,
49248644 3290 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
4d73f95f 3291 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
aef9ec39 3292 .this_id = -1,
4d73f95f 3293 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
77f2c1a4 3294 .shost_attrs = srp_host_attrs,
c40ecc12 3295 .track_queue_depth = 1,
aef9ec39
RD
3296};
3297
34aa654e
BVA
3298static int srp_sdev_count(struct Scsi_Host *host)
3299{
3300 struct scsi_device *sdev;
3301 int c = 0;
3302
3303 shost_for_each_device(sdev, host)
3304 c++;
3305
3306 return c;
3307}
3308
bc44bd1d
BVA
3309/*
3310 * Return values:
3311 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
3312 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
3313 * removal has been scheduled.
3314 * 0 and target->state != SRP_TARGET_REMOVED upon success.
3315 */
aef9ec39
RD
3316static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
3317{
3236822b
FT
3318 struct srp_rport_identifiers ids;
3319 struct srp_rport *rport;
3320
34aa654e 3321 target->state = SRP_TARGET_SCANNING;
aef9ec39 3322 sprintf(target->target_name, "SRP.T10:%016llX",
45c37cad 3323 be64_to_cpu(target->id_ext));
aef9ec39 3324
dee2b82a 3325 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent))
aef9ec39
RD
3326 return -ENODEV;
3327
3236822b
FT
3328 memcpy(ids.port_id, &target->id_ext, 8);
3329 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
aebd5e47 3330 ids.roles = SRP_RPORT_ROLE_TARGET;
3236822b
FT
3331 rport = srp_rport_add(target->scsi_host, &ids);
3332 if (IS_ERR(rport)) {
3333 scsi_remove_host(target->scsi_host);
3334 return PTR_ERR(rport);
3335 }
3336
dc1bdbd9 3337 rport->lld_data = target;
9dd69a60 3338 target->rport = rport;
dc1bdbd9 3339
b3589fd4 3340 spin_lock(&host->target_lock);
aef9ec39 3341 list_add_tail(&target->list, &host->target_list);
b3589fd4 3342 spin_unlock(&host->target_lock);
aef9ec39 3343
aef9ec39 3344 scsi_scan_target(&target->scsi_host->shost_gendev,
1d645088 3345 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
aef9ec39 3346
c014c8cd
BVA
3347 if (srp_connected_ch(target) < target->ch_count ||
3348 target->qp_in_error) {
34aa654e
BVA
3349 shost_printk(KERN_INFO, target->scsi_host,
3350 PFX "SCSI scan failed - removing SCSI host\n");
3351 srp_queue_remove_work(target);
3352 goto out;
3353 }
3354
cf1acab7 3355 pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
34aa654e
BVA
3356 dev_name(&target->scsi_host->shost_gendev),
3357 srp_sdev_count(target->scsi_host));
3358
3359 spin_lock_irq(&target->lock);
3360 if (target->state == SRP_TARGET_SCANNING)
3361 target->state = SRP_TARGET_LIVE;
3362 spin_unlock_irq(&target->lock);
3363
3364out:
aef9ec39
RD
3365 return 0;
3366}
3367
ee959b00 3368static void srp_release_dev(struct device *dev)
aef9ec39
RD
3369{
3370 struct srp_host *host =
ee959b00 3371 container_of(dev, struct srp_host, dev);
aef9ec39
RD
3372
3373 complete(&host->released);
3374}
3375
3376static struct class srp_class = {
3377 .name = "infiniband_srp",
ee959b00 3378 .dev_release = srp_release_dev
aef9ec39
RD
3379};
3380
96fc248a
BVA
3381/**
3382 * srp_conn_unique() - check whether the connection to a target is unique
af24663b
BVA
3383 * @host: SRP host.
3384 * @target: SRP target port.
96fc248a
BVA
3385 */
3386static bool srp_conn_unique(struct srp_host *host,
3387 struct srp_target_port *target)
3388{
3389 struct srp_target_port *t;
3390 bool ret = false;
3391
3392 if (target->state == SRP_TARGET_REMOVED)
3393 goto out;
3394
3395 ret = true;
3396
3397 spin_lock(&host->target_lock);
3398 list_for_each_entry(t, &host->target_list, list) {
3399 if (t != target &&
3400 target->id_ext == t->id_ext &&
3401 target->ioc_guid == t->ioc_guid &&
3402 target->initiator_ext == t->initiator_ext) {
3403 ret = false;
3404 break;
3405 }
3406 }
3407 spin_unlock(&host->target_lock);
3408
3409out:
3410 return ret;
3411}
3412
aef9ec39
RD
3413/*
3414 * Target ports are added by writing
3415 *
3416 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
3417 * pkey=<P_Key>,service_id=<service ID>
19f31343
BVA
3418 * or
3419 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,
3420 * [src=<IPv4 address>,]dest=<IPv4 address>:<port number>
aef9ec39
RD
3421 *
3422 * to the add_target sysfs attribute.
3423 */
3424enum {
3425 SRP_OPT_ERR = 0,
3426 SRP_OPT_ID_EXT = 1 << 0,
3427 SRP_OPT_IOC_GUID = 1 << 1,
3428 SRP_OPT_DGID = 1 << 2,
3429 SRP_OPT_PKEY = 1 << 3,
3430 SRP_OPT_SERVICE_ID = 1 << 4,
3431 SRP_OPT_MAX_SECT = 1 << 5,
52fb2b50 3432 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
0c0450db 3433 SRP_OPT_IO_CLASS = 1 << 7,
01cb9bcb 3434 SRP_OPT_INITIATOR_EXT = 1 << 8,
49248644 3435 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
c07d424d
DD
3436 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
3437 SRP_OPT_SG_TABLESIZE = 1 << 11,
4b5e5f41 3438 SRP_OPT_COMP_VECTOR = 1 << 12,
7bb312e4 3439 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
4d73f95f 3440 SRP_OPT_QUEUE_SIZE = 1 << 14,
19f31343
BVA
3441 SRP_OPT_IP_SRC = 1 << 15,
3442 SRP_OPT_IP_DEST = 1 << 16,
b0780ee5 3443 SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
19f31343
BVA
3444};
3445
3446static unsigned int srp_opt_mandatory[] = {
3447 SRP_OPT_ID_EXT |
3448 SRP_OPT_IOC_GUID |
3449 SRP_OPT_DGID |
3450 SRP_OPT_PKEY |
3451 SRP_OPT_SERVICE_ID,
3452 SRP_OPT_ID_EXT |
3453 SRP_OPT_IOC_GUID |
3454 SRP_OPT_IP_DEST,
aef9ec39
RD
3455};
3456
a447c093 3457static const match_table_t srp_opt_tokens = {
52fb2b50
VP
3458 { SRP_OPT_ID_EXT, "id_ext=%s" },
3459 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
3460 { SRP_OPT_DGID, "dgid=%s" },
3461 { SRP_OPT_PKEY, "pkey=%x" },
3462 { SRP_OPT_SERVICE_ID, "service_id=%s" },
3463 { SRP_OPT_MAX_SECT, "max_sect=%d" },
3464 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
b0780ee5 3465 { SRP_OPT_TARGET_CAN_QUEUE, "target_can_queue=%d" },
0c0450db 3466 { SRP_OPT_IO_CLASS, "io_class=%x" },
01cb9bcb 3467 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
49248644 3468 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
c07d424d
DD
3469 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
3470 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
4b5e5f41 3471 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
7bb312e4 3472 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
4d73f95f 3473 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
19f31343
BVA
3474 { SRP_OPT_IP_SRC, "src=%s" },
3475 { SRP_OPT_IP_DEST, "dest=%s" },
52fb2b50 3476 { SRP_OPT_ERR, NULL }
aef9ec39
RD
3477};
3478
c62adb7d
BVA
3479/**
3480 * srp_parse_in - parse an IP address and port number combination
e37df2d5
BVA
3481 * @net: [in] Network namespace.
3482 * @sa: [out] Address family, IP address and port number.
3483 * @addr_port_str: [in] IP address and port number.
c62adb7d
BVA
3484 *
3485 * Parse the following address formats:
3486 * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
3487 * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
3488 */
19f31343
BVA
3489static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
3490 const char *addr_port_str)
3491{
c62adb7d
BVA
3492 char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
3493 char *port_str;
19f31343
BVA
3494 int ret;
3495
3496 if (!addr)
3497 return -ENOMEM;
c62adb7d
BVA
3498 port_str = strrchr(addr, ':');
3499 if (!port_str)
3500 return -EINVAL;
3501 *port_str++ = '\0';
3502 ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
3503 if (ret && addr[0]) {
3504 addr_end = addr + strlen(addr) - 1;
3505 if (addr[0] == '[' && *addr_end == ']') {
3506 *addr_end = '\0';
3507 ret = inet_pton_with_scope(net, AF_INET6, addr + 1,
3508 port_str, sa);
3509 }
3510 }
19f31343 3511 kfree(addr);
c62adb7d 3512 pr_debug("%s -> %pISpfsc\n", addr_port_str, sa);
19f31343
BVA
3513 return ret;
3514}
3515
3516static int srp_parse_options(struct net *net, const char *buf,
3517 struct srp_target_port *target)
aef9ec39
RD
3518{
3519 char *options, *sep_opt;
3520 char *p;
aef9ec39 3521 substring_t args[MAX_OPT_ARGS];
2a174df0 3522 unsigned long long ull;
aef9ec39
RD
3523 int opt_mask = 0;
3524 int token;
3525 int ret = -EINVAL;
3526 int i;
3527
3528 options = kstrdup(buf, GFP_KERNEL);
3529 if (!options)
3530 return -ENOMEM;
3531
3532 sep_opt = options;
7dcf9c19 3533 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
aef9ec39
RD
3534 if (!*p)
3535 continue;
3536
3537 token = match_token(p, srp_opt_tokens, args);
3538 opt_mask |= token;
3539
3540 switch (token) {
3541 case SRP_OPT_ID_EXT:
3542 p = match_strdup(args);
a20f3a6d
IR
3543 if (!p) {
3544 ret = -ENOMEM;
3545 goto out;
3546 }
2a174df0
BVA
3547 ret = kstrtoull(p, 16, &ull);
3548 if (ret) {
3549 pr_warn("invalid id_ext parameter '%s'\n", p);
3550 kfree(p);
3551 goto out;
3552 }
3553 target->id_ext = cpu_to_be64(ull);
aef9ec39
RD
3554 kfree(p);
3555 break;
3556
3557 case SRP_OPT_IOC_GUID:
3558 p = match_strdup(args);
a20f3a6d
IR
3559 if (!p) {
3560 ret = -ENOMEM;
3561 goto out;
3562 }
2a174df0
BVA
3563 ret = kstrtoull(p, 16, &ull);
3564 if (ret) {
3565 pr_warn("invalid ioc_guid parameter '%s'\n", p);
3566 kfree(p);
3567 goto out;
3568 }
3569 target->ioc_guid = cpu_to_be64(ull);
aef9ec39
RD
3570 kfree(p);
3571 break;
3572
3573 case SRP_OPT_DGID:
3574 p = match_strdup(args);
a20f3a6d
IR
3575 if (!p) {
3576 ret = -ENOMEM;
3577 goto out;
3578 }
aef9ec39 3579 if (strlen(p) != 32) {
e0bda7d8 3580 pr_warn("bad dest GID parameter '%s'\n", p);
ce1823f0 3581 kfree(p);
aef9ec39
RD
3582 goto out;
3583 }
3584
19f31343 3585 ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16);
bf17c1c7 3586 kfree(p);
e711f968
AS
3587 if (ret < 0)
3588 goto out;
aef9ec39
RD
3589 break;
3590
3591 case SRP_OPT_PKEY:
3592 if (match_hex(args, &token)) {
e0bda7d8 3593 pr_warn("bad P_Key parameter '%s'\n", p);
aef9ec39
RD
3594 goto out;
3595 }
19f31343 3596 target->ib_cm.pkey = cpu_to_be16(token);
aef9ec39
RD
3597 break;
3598
3599 case SRP_OPT_SERVICE_ID:
3600 p = match_strdup(args);
a20f3a6d
IR
3601 if (!p) {
3602 ret = -ENOMEM;
3603 goto out;
3604 }
2a174df0
BVA
3605 ret = kstrtoull(p, 16, &ull);
3606 if (ret) {
3607 pr_warn("bad service_id parameter '%s'\n", p);
3608 kfree(p);
3609 goto out;
3610 }
19f31343
BVA
3611 target->ib_cm.service_id = cpu_to_be64(ull);
3612 kfree(p);
3613 break;
3614
3615 case SRP_OPT_IP_SRC:
3616 p = match_strdup(args);
3617 if (!p) {
3618 ret = -ENOMEM;
3619 goto out;
3620 }
3621 ret = srp_parse_in(net, &target->rdma_cm.src.ss, p);
3622 if (ret < 0) {
3623 pr_warn("bad source parameter '%s'\n", p);
3624 kfree(p);
3625 goto out;
3626 }
3627 target->rdma_cm.src_specified = true;
3628 kfree(p);
3629 break;
3630
3631 case SRP_OPT_IP_DEST:
3632 p = match_strdup(args);
3633 if (!p) {
3634 ret = -ENOMEM;
3635 goto out;
3636 }
3637 ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p);
3638 if (ret < 0) {
3639 pr_warn("bad dest parameter '%s'\n", p);
3640 kfree(p);
3641 goto out;
3642 }
3643 target->using_rdma_cm = true;
aef9ec39
RD
3644 kfree(p);
3645 break;
3646
3647 case SRP_OPT_MAX_SECT:
3648 if (match_int(args, &token)) {
e0bda7d8 3649 pr_warn("bad max sect parameter '%s'\n", p);
aef9ec39
RD
3650 goto out;
3651 }
3652 target->scsi_host->max_sectors = token;
3653 break;
3654
4d73f95f
BVA
3655 case SRP_OPT_QUEUE_SIZE:
3656 if (match_int(args, &token) || token < 1) {
3657 pr_warn("bad queue_size parameter '%s'\n", p);
3658 goto out;
3659 }
3660 target->scsi_host->can_queue = token;
3661 target->queue_size = token + SRP_RSP_SQ_SIZE +
3662 SRP_TSK_MGMT_SQ_SIZE;
3663 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3664 target->scsi_host->cmd_per_lun = token;
3665 break;
3666
52fb2b50 3667 case SRP_OPT_MAX_CMD_PER_LUN:
4d73f95f 3668 if (match_int(args, &token) || token < 1) {
e0bda7d8
BVA
3669 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3670 p);
52fb2b50
VP
3671 goto out;
3672 }
4d73f95f 3673 target->scsi_host->cmd_per_lun = token;
52fb2b50
VP
3674 break;
3675
b0780ee5
BVA
3676 case SRP_OPT_TARGET_CAN_QUEUE:
3677 if (match_int(args, &token) || token < 1) {
3678 pr_warn("bad max target_can_queue parameter '%s'\n",
3679 p);
3680 goto out;
3681 }
3682 target->target_can_queue = token;
3683 break;
3684
0c0450db
R
3685 case SRP_OPT_IO_CLASS:
3686 if (match_hex(args, &token)) {
e0bda7d8 3687 pr_warn("bad IO class parameter '%s'\n", p);
0c0450db
R
3688 goto out;
3689 }
3690 if (token != SRP_REV10_IB_IO_CLASS &&
3691 token != SRP_REV16A_IB_IO_CLASS) {
e0bda7d8
BVA
3692 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3693 token, SRP_REV10_IB_IO_CLASS,
3694 SRP_REV16A_IB_IO_CLASS);
0c0450db
R
3695 goto out;
3696 }
3697 target->io_class = token;
3698 break;
3699
01cb9bcb
IR
3700 case SRP_OPT_INITIATOR_EXT:
3701 p = match_strdup(args);
a20f3a6d
IR
3702 if (!p) {
3703 ret = -ENOMEM;
3704 goto out;
3705 }
2a174df0
BVA
3706 ret = kstrtoull(p, 16, &ull);
3707 if (ret) {
3708 pr_warn("bad initiator_ext value '%s'\n", p);
3709 kfree(p);
3710 goto out;
3711 }
3712 target->initiator_ext = cpu_to_be64(ull);
01cb9bcb
IR
3713 kfree(p);
3714 break;
3715
49248644
DD
3716 case SRP_OPT_CMD_SG_ENTRIES:
3717 if (match_int(args, &token) || token < 1 || token > 255) {
e0bda7d8
BVA
3718 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3719 p);
49248644
DD
3720 goto out;
3721 }
3722 target->cmd_sg_cnt = token;
3723 break;
3724
c07d424d
DD
3725 case SRP_OPT_ALLOW_EXT_SG:
3726 if (match_int(args, &token)) {
e0bda7d8 3727 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
c07d424d
DD
3728 goto out;
3729 }
3730 target->allow_ext_sg = !!token;
3731 break;
3732
3733 case SRP_OPT_SG_TABLESIZE:
3734 if (match_int(args, &token) || token < 1 ||
65e8617f 3735 token > SG_MAX_SEGMENTS) {
e0bda7d8
BVA
3736 pr_warn("bad max sg_tablesize parameter '%s'\n",
3737 p);
c07d424d
DD
3738 goto out;
3739 }
3740 target->sg_tablesize = token;
3741 break;
3742
4b5e5f41
BVA
3743 case SRP_OPT_COMP_VECTOR:
3744 if (match_int(args, &token) || token < 0) {
3745 pr_warn("bad comp_vector parameter '%s'\n", p);
3746 goto out;
3747 }
3748 target->comp_vector = token;
3749 break;
3750
7bb312e4
VP
3751 case SRP_OPT_TL_RETRY_COUNT:
3752 if (match_int(args, &token) || token < 2 || token > 7) {
3753 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3754 p);
3755 goto out;
3756 }
3757 target->tl_retry_count = token;
3758 break;
3759
aef9ec39 3760 default:
e0bda7d8
BVA
3761 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3762 p);
aef9ec39
RD
3763 goto out;
3764 }
3765 }
3766
19f31343
BVA
3767 for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) {
3768 if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) {
3769 ret = 0;
3770 break;
3771 }
3772 }
3773 if (ret)
3774 pr_warn("target creation request is missing one or more parameters\n");
aef9ec39 3775
4d73f95f
BVA
3776 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3777 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3778 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3779 target->scsi_host->cmd_per_lun,
3780 target->scsi_host->can_queue);
3781
aef9ec39
RD
3782out:
3783 kfree(options);
3784 return ret;
3785}
3786
ee959b00
TJ
3787static ssize_t srp_create_target(struct device *dev,
3788 struct device_attribute *attr,
aef9ec39
RD
3789 const char *buf, size_t count)
3790{
3791 struct srp_host *host =
ee959b00 3792 container_of(dev, struct srp_host, dev);
aef9ec39
RD
3793 struct Scsi_Host *target_host;
3794 struct srp_target_port *target;
509c07bc 3795 struct srp_rdma_ch *ch;
d1b4289e
BVA
3796 struct srp_device *srp_dev = host->srp_dev;
3797 struct ib_device *ibdev = srp_dev->dev;
d92c0da7 3798 int ret, node_idx, node, cpu, i;
509c5f33 3799 unsigned int max_sectors_per_mr, mr_per_cmd = 0;
d92c0da7 3800 bool multich = false;
513d5647 3801 uint32_t max_iu_len;
aef9ec39
RD
3802
3803 target_host = scsi_host_alloc(&srp_template,
3804 sizeof (struct srp_target_port));
3805 if (!target_host)
3806 return -ENOMEM;
3807
49248644 3808 target_host->transportt = ib_srp_transport_template;
fd1b6c4a
BVA
3809 target_host->max_channel = 0;
3810 target_host->max_id = 1;
985aa495 3811 target_host->max_lun = -1LL;
3c8edf0e 3812 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
0b5cb330 3813 target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
5f068992 3814
aef9ec39 3815 target = host_to_target(target_host);
aef9ec39 3816
19f31343 3817 target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
49248644
DD
3818 target->io_class = SRP_REV16A_IB_IO_CLASS;
3819 target->scsi_host = target_host;
3820 target->srp_host = host;
e6bf5f48 3821 target->lkey = host->srp_dev->pd->local_dma_lkey;
cee687b6 3822 target->global_rkey = host->srp_dev->global_rkey;
49248644 3823 target->cmd_sg_cnt = cmd_sg_entries;
c07d424d
DD
3824 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3825 target->allow_ext_sg = allow_ext_sg;
7bb312e4 3826 target->tl_retry_count = 7;
4d73f95f 3827 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
aef9ec39 3828
34aa654e
BVA
3829 /*
3830 * Avoid that the SCSI host can be removed by srp_remove_target()
3831 * before this function returns.
3832 */
3833 scsi_host_get(target->scsi_host);
3834
4fa354c9
BVA
3835 ret = mutex_lock_interruptible(&host->add_target_mutex);
3836 if (ret < 0)
3837 goto put;
2d7091bc 3838
19f31343 3839 ret = srp_parse_options(target->net, buf, target);
aef9ec39 3840 if (ret)
fb49c8bb 3841 goto out;
aef9ec39 3842
4d73f95f
BVA
3843 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3844
96fc248a 3845 if (!srp_conn_unique(target->srp_host, target)) {
19f31343 3846 if (target->using_rdma_cm) {
19f31343 3847 shost_printk(KERN_INFO, target->scsi_host,
7da09af9 3848 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%pIS\n",
19f31343
BVA
3849 be64_to_cpu(target->id_ext),
3850 be64_to_cpu(target->ioc_guid),
7da09af9 3851 &target->rdma_cm.dst);
19f31343
BVA
3852 } else {
3853 shost_printk(KERN_INFO, target->scsi_host,
3854 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3855 be64_to_cpu(target->id_ext),
3856 be64_to_cpu(target->ioc_guid),
3857 be64_to_cpu(target->initiator_ext));
3858 }
96fc248a 3859 ret = -EEXIST;
fb49c8bb 3860 goto out;
96fc248a
BVA
3861 }
3862
5cfb1782 3863 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
d1b4289e 3864 target->cmd_sg_cnt < target->sg_tablesize) {
5cfb1782 3865 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
c07d424d
DD
3866 target->sg_tablesize = target->cmd_sg_cnt;
3867 }
3868
509c5f33 3869 if (srp_dev->use_fast_reg || srp_dev->use_fmr) {
fbd36818
SG
3870 bool gaps_reg = (ibdev->attrs.device_cap_flags &
3871 IB_DEVICE_SG_GAPS_REG);
3872
509c5f33
BVA
3873 max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3874 (ilog2(srp_dev->mr_page_size) - 9);
fbd36818
SG
3875 if (!gaps_reg) {
3876 /*
3877 * FR and FMR can only map one HCA page per entry. If
3878 * the start address is not aligned on a HCA page
3879 * boundary two entries will be used for the head and
3880 * the tail although these two entries combined
3881 * contain at most one HCA page of data. Hence the "+
3882 * 1" in the calculation below.
3883 *
3884 * The indirect data buffer descriptor is contiguous
3885 * so the memory for that buffer will only be
3886 * registered if register_always is true. Hence add
3887 * one to mr_per_cmd if register_always has been set.
3888 */
3889 mr_per_cmd = register_always +
3890 (target->scsi_host->max_sectors + 1 +
3891 max_sectors_per_mr - 1) / max_sectors_per_mr;
3892 } else {
3893 mr_per_cmd = register_always +
3894 (target->sg_tablesize +
3895 srp_dev->max_pages_per_mr - 1) /
3896 srp_dev->max_pages_per_mr;
3897 }
509c5f33 3898 pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
fbd36818 3899 target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
509c5f33
BVA
3900 max_sectors_per_mr, mr_per_cmd);
3901 }
3902
c07d424d 3903 target_host->sg_tablesize = target->sg_tablesize;
509c5f33
BVA
3904 target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3905 target->mr_per_cmd = mr_per_cmd;
c07d424d
DD
3906 target->indirect_size = target->sg_tablesize *
3907 sizeof (struct srp_direct_buf);
882981f4 3908 max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, srp_use_imm_data);
49248644 3909
c1120f89 3910 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
ef6c49d8 3911 INIT_WORK(&target->remove_work, srp_remove_work);
8f26c9ff 3912 spin_lock_init(&target->lock);
1dfce294 3913 ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid);
2088ca66 3914 if (ret)
fb49c8bb 3915 goto out;
aef9ec39 3916
d92c0da7
BVA
3917 ret = -ENOMEM;
3918 target->ch_count = max_t(unsigned, num_online_nodes(),
3919 min(ch_count ? :
3920 min(4 * num_online_nodes(),
3921 ibdev->num_comp_vectors),
3922 num_online_cpus()));
3923 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3924 GFP_KERNEL);
3925 if (!target->ch)
fb49c8bb 3926 goto out;
aef9ec39 3927
d92c0da7
BVA
3928 node_idx = 0;
3929 for_each_online_node(node) {
3930 const int ch_start = (node_idx * target->ch_count /
3931 num_online_nodes());
3932 const int ch_end = ((node_idx + 1) * target->ch_count /
3933 num_online_nodes());
3a148896
BVA
3934 const int cv_start = node_idx * ibdev->num_comp_vectors /
3935 num_online_nodes();
3936 const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors /
3937 num_online_nodes();
d92c0da7
BVA
3938 int cpu_idx = 0;
3939
3940 for_each_online_cpu(cpu) {
3941 if (cpu_to_node(cpu) != node)
3942 continue;
3943 if (ch_start + cpu_idx >= ch_end)
3944 continue;
3945 ch = &target->ch[ch_start + cpu_idx];
3946 ch->target = target;
3947 ch->comp_vector = cv_start == cv_end ? cv_start :
3948 cv_start + cpu_idx % (cv_end - cv_start);
3949 spin_lock_init(&ch->lock);
3950 INIT_LIST_HEAD(&ch->free_tx);
3951 ret = srp_new_cm_id(ch);
3952 if (ret)
3953 goto err_disconnect;
aef9ec39 3954
d92c0da7
BVA
3955 ret = srp_create_ch_ib(ch);
3956 if (ret)
3957 goto err_disconnect;
3958
3959 ret = srp_alloc_req_data(ch);
3960 if (ret)
3961 goto err_disconnect;
3962
513d5647 3963 ret = srp_connect_ch(ch, max_iu_len, multich);
d92c0da7 3964 if (ret) {
19f31343
BVA
3965 char dst[64];
3966
3967 if (target->using_rdma_cm)
7da09af9
BVA
3968 snprintf(dst, sizeof(dst), "%pIS",
3969 &target->rdma_cm.dst);
19f31343
BVA
3970 else
3971 snprintf(dst, sizeof(dst), "%pI6",
3972 target->ib_cm.orig_dgid.raw);
d92c0da7 3973 shost_printk(KERN_ERR, target->scsi_host,
19f31343 3974 PFX "Connection %d/%d to %s failed\n",
d92c0da7 3975 ch_start + cpu_idx,
19f31343 3976 target->ch_count, dst);
d92c0da7 3977 if (node_idx == 0 && cpu_idx == 0) {
b02c1536 3978 goto free_ch;
d92c0da7
BVA
3979 } else {
3980 srp_free_ch_ib(target, ch);
3981 srp_free_req_data(target, ch);
3982 target->ch_count = ch - target->ch;
c257ea6f 3983 goto connected;
d92c0da7
BVA
3984 }
3985 }
3986
3987 multich = true;
3988 cpu_idx++;
3989 }
3990 node_idx++;
aef9ec39
RD
3991 }
3992
c257ea6f 3993connected:
d92c0da7
BVA
3994 target->scsi_host->nr_hw_queues = target->ch_count;
3995
aef9ec39
RD
3996 ret = srp_add_target(host, target);
3997 if (ret)
3998 goto err_disconnect;
3999
34aa654e 4000 if (target->state != SRP_TARGET_REMOVED) {
19f31343 4001 if (target->using_rdma_cm) {
19f31343 4002 shost_printk(KERN_DEBUG, target->scsi_host, PFX
7da09af9 4003 "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %pIS\n",
19f31343
BVA
4004 be64_to_cpu(target->id_ext),
4005 be64_to_cpu(target->ioc_guid),
7da09af9 4006 target->sgid.raw, &target->rdma_cm.dst);
19f31343
BVA
4007 } else {
4008 shost_printk(KERN_DEBUG, target->scsi_host, PFX
4009 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
4010 be64_to_cpu(target->id_ext),
4011 be64_to_cpu(target->ioc_guid),
4012 be16_to_cpu(target->ib_cm.pkey),
4013 be64_to_cpu(target->ib_cm.service_id),
4014 target->sgid.raw,
4015 target->ib_cm.orig_dgid.raw);
4016 }
34aa654e 4017 }
e7ffde01 4018
2d7091bc
BVA
4019 ret = count;
4020
4021out:
4022 mutex_unlock(&host->add_target_mutex);
34aa654e 4023
4fa354c9 4024put:
34aa654e 4025 scsi_host_put(target->scsi_host);
19f31343
BVA
4026 if (ret < 0) {
4027 /*
4028 * If a call to srp_remove_target() has not been scheduled,
4029 * drop the network namespace reference now that was obtained
4030 * earlier in this function.
4031 */
4032 if (target->state != SRP_TARGET_REMOVED)
4033 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
bc44bd1d 4034 scsi_host_put(target->scsi_host);
19f31343 4035 }
34aa654e 4036
2d7091bc 4037 return ret;
aef9ec39
RD
4038
4039err_disconnect:
4040 srp_disconnect_target(target);
4041
b02c1536 4042free_ch:
d92c0da7
BVA
4043 for (i = 0; i < target->ch_count; i++) {
4044 ch = &target->ch[i];
4045 srp_free_ch_ib(target, ch);
4046 srp_free_req_data(target, ch);
4047 }
aef9ec39 4048
d92c0da7 4049 kfree(target->ch);
2d7091bc 4050 goto out;
aef9ec39
RD
4051}
4052
ee959b00 4053static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
aef9ec39 4054
ee959b00
TJ
4055static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
4056 char *buf)
aef9ec39 4057{
ee959b00 4058 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39 4059
6c854111 4060 return sprintf(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
aef9ec39
RD
4061}
4062
ee959b00 4063static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
aef9ec39 4064
ee959b00
TJ
4065static ssize_t show_port(struct device *dev, struct device_attribute *attr,
4066 char *buf)
aef9ec39 4067{
ee959b00 4068 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39
RD
4069
4070 return sprintf(buf, "%d\n", host->port);
4071}
4072
ee959b00 4073static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
aef9ec39 4074
f5358a17 4075static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
aef9ec39
RD
4076{
4077 struct srp_host *host;
4078
4079 host = kzalloc(sizeof *host, GFP_KERNEL);
4080 if (!host)
4081 return NULL;
4082
4083 INIT_LIST_HEAD(&host->target_list);
b3589fd4 4084 spin_lock_init(&host->target_lock);
aef9ec39 4085 init_completion(&host->released);
2d7091bc 4086 mutex_init(&host->add_target_mutex);
05321937 4087 host->srp_dev = device;
aef9ec39
RD
4088 host->port = port;
4089
ee959b00 4090 host->dev.class = &srp_class;
dee2b82a 4091 host->dev.parent = device->dev->dev.parent;
6c854111
JG
4092 dev_set_name(&host->dev, "srp-%s-%d", dev_name(&device->dev->dev),
4093 port);
aef9ec39 4094
ee959b00 4095 if (device_register(&host->dev))
f5358a17 4096 goto free_host;
ee959b00 4097 if (device_create_file(&host->dev, &dev_attr_add_target))
aef9ec39 4098 goto err_class;
ee959b00 4099 if (device_create_file(&host->dev, &dev_attr_ibdev))
aef9ec39 4100 goto err_class;
ee959b00 4101 if (device_create_file(&host->dev, &dev_attr_port))
aef9ec39
RD
4102 goto err_class;
4103
4104 return host;
4105
4106err_class:
ee959b00 4107 device_unregister(&host->dev);
aef9ec39 4108
f5358a17 4109free_host:
aef9ec39
RD
4110 kfree(host);
4111
4112 return NULL;
4113}
4114
4115static void srp_add_one(struct ib_device *device)
4116{
f5358a17 4117 struct srp_device *srp_dev;
042dd765 4118 struct ib_device_attr *attr = &device->attrs;
aef9ec39 4119 struct srp_host *host;
ea1075ed
JG
4120 int mr_page_shift;
4121 unsigned int p;
52ede08f 4122 u64 max_pages_per_mr;
5f071777 4123 unsigned int flags = 0;
aef9ec39 4124
249f0656 4125 srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
f5358a17 4126 if (!srp_dev)
4a061b28 4127 return;
f5358a17
RD
4128
4129 /*
4130 * Use the smallest page size supported by the HCA, down to a
8f26c9ff
DD
4131 * minimum of 4096 bytes. We're unlikely to build large sglists
4132 * out of smaller entries.
f5358a17 4133 */
042dd765 4134 mr_page_shift = max(12, ffs(attr->page_size_cap) - 1);
52ede08f
BVA
4135 srp_dev->mr_page_size = 1 << mr_page_shift;
4136 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
042dd765 4137 max_pages_per_mr = attr->max_mr_size;
52ede08f 4138 do_div(max_pages_per_mr, srp_dev->mr_page_size);
509c5f33 4139 pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
042dd765 4140 attr->max_mr_size, srp_dev->mr_page_size,
509c5f33 4141 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
52ede08f
BVA
4142 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
4143 max_pages_per_mr);
835ee624 4144
3023a1e9
KH
4145 srp_dev->has_fmr = (device->ops.alloc_fmr &&
4146 device->ops.dealloc_fmr &&
4147 device->ops.map_phys_fmr &&
4148 device->ops.unmap_fmr);
042dd765 4149 srp_dev->has_fr = (attr->device_cap_flags &
835ee624 4150 IB_DEVICE_MEM_MGT_EXTENSIONS);
c222a39f 4151 if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
835ee624 4152 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
c222a39f 4153 } else if (!never_register &&
042dd765 4154 attr->max_mr_size >= 2 * srp_dev->mr_page_size) {
509c5f33
BVA
4155 srp_dev->use_fast_reg = (srp_dev->has_fr &&
4156 (!srp_dev->has_fmr || prefer_fr));
4157 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
4158 }
835ee624 4159
5f071777
CH
4160 if (never_register || !register_always ||
4161 (!srp_dev->has_fmr && !srp_dev->has_fr))
4162 flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
4163
5cfb1782
BVA
4164 if (srp_dev->use_fast_reg) {
4165 srp_dev->max_pages_per_mr =
4166 min_t(u32, srp_dev->max_pages_per_mr,
042dd765 4167 attr->max_fast_reg_page_list_len);
5cfb1782 4168 }
52ede08f
BVA
4169 srp_dev->mr_max_size = srp_dev->mr_page_size *
4170 srp_dev->max_pages_per_mr;
4a061b28 4171 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
6c854111 4172 dev_name(&device->dev), mr_page_shift, attr->max_mr_size,
042dd765 4173 attr->max_fast_reg_page_list_len,
52ede08f 4174 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
f5358a17
RD
4175
4176 INIT_LIST_HEAD(&srp_dev->dev_list);
4177
4178 srp_dev->dev = device;
5f071777 4179 srp_dev->pd = ib_alloc_pd(device, flags);
f5358a17
RD
4180 if (IS_ERR(srp_dev->pd))
4181 goto free_dev;
4182
cee687b6
BVA
4183 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
4184 srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
4185 WARN_ON_ONCE(srp_dev->global_rkey == 0);
4186 }
f5358a17 4187
ea1075ed 4188 rdma_for_each_port (device, p) {
f5358a17 4189 host = srp_add_port(srp_dev, p);
aef9ec39 4190 if (host)
f5358a17 4191 list_add_tail(&host->list, &srp_dev->dev_list);
aef9ec39
RD
4192 }
4193
f5358a17 4194 ib_set_client_data(device, &srp_client, srp_dev);
4a061b28 4195 return;
f5358a17 4196
f5358a17
RD
4197free_dev:
4198 kfree(srp_dev);
aef9ec39
RD
4199}
4200
7c1eb45a 4201static void srp_remove_one(struct ib_device *device, void *client_data)
aef9ec39 4202{
f5358a17 4203 struct srp_device *srp_dev;
aef9ec39 4204 struct srp_host *host, *tmp_host;
ef6c49d8 4205 struct srp_target_port *target;
aef9ec39 4206
7c1eb45a 4207 srp_dev = client_data;
1fe0cb84
DB
4208 if (!srp_dev)
4209 return;
aef9ec39 4210
f5358a17 4211 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
ee959b00 4212 device_unregister(&host->dev);
aef9ec39
RD
4213 /*
4214 * Wait for the sysfs entry to go away, so that no new
4215 * target ports can be created.
4216 */
4217 wait_for_completion(&host->released);
4218
4219 /*
ef6c49d8 4220 * Remove all target ports.
aef9ec39 4221 */
b3589fd4 4222 spin_lock(&host->target_lock);
ef6c49d8
BVA
4223 list_for_each_entry(target, &host->target_list, list)
4224 srp_queue_remove_work(target);
b3589fd4 4225 spin_unlock(&host->target_lock);
aef9ec39
RD
4226
4227 /*
bcc05910 4228 * Wait for tl_err and target port removal tasks.
aef9ec39 4229 */
ef6c49d8 4230 flush_workqueue(system_long_wq);
bcc05910 4231 flush_workqueue(srp_remove_wq);
aef9ec39 4232
aef9ec39
RD
4233 kfree(host);
4234 }
4235
f5358a17
RD
4236 ib_dealloc_pd(srp_dev->pd);
4237
4238 kfree(srp_dev);
aef9ec39
RD
4239}
4240
3236822b 4241static struct srp_function_template ib_srp_transport_functions = {
ed9b2264
BVA
4242 .has_rport_state = true,
4243 .reset_timer_if_blocked = true,
a95cadb9 4244 .reconnect_delay = &srp_reconnect_delay,
ed9b2264
BVA
4245 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
4246 .dev_loss_tmo = &srp_dev_loss_tmo,
4247 .reconnect = srp_rport_reconnect,
dc1bdbd9 4248 .rport_delete = srp_rport_delete,
ed9b2264 4249 .terminate_rport_io = srp_terminate_io,
3236822b
FT
4250};
4251
aef9ec39
RD
4252static int __init srp_init_module(void)
4253{
4254 int ret;
4255
16d14e01
BVA
4256 BUILD_BUG_ON(sizeof(struct srp_imm_buf) != 4);
4257 BUILD_BUG_ON(sizeof(struct srp_login_req) != 64);
4258 BUILD_BUG_ON(sizeof(struct srp_login_req_rdma) != 56);
4259 BUILD_BUG_ON(sizeof(struct srp_cmd) != 48);
4260
49248644 4261 if (srp_sg_tablesize) {
e0bda7d8 4262 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
49248644
DD
4263 if (!cmd_sg_entries)
4264 cmd_sg_entries = srp_sg_tablesize;
4265 }
4266
4267 if (!cmd_sg_entries)
4268 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
4269
4270 if (cmd_sg_entries > 255) {
e0bda7d8 4271 pr_warn("Clamping cmd_sg_entries to 255\n");
49248644 4272 cmd_sg_entries = 255;
1e89a194
DD
4273 }
4274
c07d424d
DD
4275 if (!indirect_sg_entries)
4276 indirect_sg_entries = cmd_sg_entries;
4277 else if (indirect_sg_entries < cmd_sg_entries) {
e0bda7d8
BVA
4278 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
4279 cmd_sg_entries);
c07d424d
DD
4280 indirect_sg_entries = cmd_sg_entries;
4281 }
4282
0a475ef4
IR
4283 if (indirect_sg_entries > SG_MAX_SEGMENTS) {
4284 pr_warn("Clamping indirect_sg_entries to %u\n",
4285 SG_MAX_SEGMENTS);
4286 indirect_sg_entries = SG_MAX_SEGMENTS;
4287 }
4288
bcc05910 4289 srp_remove_wq = create_workqueue("srp_remove");
da05be29
WY
4290 if (!srp_remove_wq) {
4291 ret = -ENOMEM;
bcc05910
BVA
4292 goto out;
4293 }
4294
4295 ret = -ENOMEM;
3236822b
FT
4296 ib_srp_transport_template =
4297 srp_attach_transport(&ib_srp_transport_functions);
4298 if (!ib_srp_transport_template)
bcc05910 4299 goto destroy_wq;
3236822b 4300
aef9ec39
RD
4301 ret = class_register(&srp_class);
4302 if (ret) {
e0bda7d8 4303 pr_err("couldn't register class infiniband_srp\n");
bcc05910 4304 goto release_tr;
aef9ec39
RD
4305 }
4306
c1a0b23b
MT
4307 ib_sa_register_client(&srp_sa_client);
4308
aef9ec39
RD
4309 ret = ib_register_client(&srp_client);
4310 if (ret) {
e0bda7d8 4311 pr_err("couldn't register IB client\n");
bcc05910 4312 goto unreg_sa;
aef9ec39
RD
4313 }
4314
bcc05910
BVA
4315out:
4316 return ret;
4317
4318unreg_sa:
4319 ib_sa_unregister_client(&srp_sa_client);
4320 class_unregister(&srp_class);
4321
4322release_tr:
4323 srp_release_transport(ib_srp_transport_template);
4324
4325destroy_wq:
4326 destroy_workqueue(srp_remove_wq);
4327 goto out;
aef9ec39
RD
4328}
4329
4330static void __exit srp_cleanup_module(void)
4331{
4332 ib_unregister_client(&srp_client);
c1a0b23b 4333 ib_sa_unregister_client(&srp_sa_client);
aef9ec39 4334 class_unregister(&srp_class);
3236822b 4335 srp_release_transport(ib_srp_transport_template);
bcc05910 4336 destroy_workqueue(srp_remove_wq);
aef9ec39
RD
4337}
4338
4339module_init(srp_init_module);
4340module_exit(srp_cleanup_module);