]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/infiniband/ulp/srp/ib_srp.c
scsi: RDMA/srp: Fix a sleep-in-invalid-context bug
[mirror_ubuntu-hirsute-kernel.git] / drivers / infiniband / ulp / srp / ib_srp.c
CommitLineData
aef9ec39
RD
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
aef9ec39
RD
31 */
32
d236cd0e 33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
e0bda7d8 34
aef9ec39
RD
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
de25968c 42#include <linux/jiffies.h>
93c76dbb 43#include <linux/lockdep.h>
19f31343 44#include <linux/inet.h>
56b5390c 45#include <rdma/ib_cache.h>
aef9ec39 46
60063497 47#include <linux/atomic.h>
aef9ec39
RD
48
49#include <scsi/scsi.h>
50#include <scsi/scsi_device.h>
51#include <scsi/scsi_dbg.h>
71444b97 52#include <scsi/scsi_tcq.h>
aef9ec39 53#include <scsi/srp.h>
3236822b 54#include <scsi/scsi_transport_srp.h>
aef9ec39 55
aef9ec39
RD
56#include "ib_srp.h"
57
58#define DRV_NAME "ib_srp"
59#define PFX DRV_NAME ": "
aef9ec39
RD
60
61MODULE_AUTHOR("Roland Dreier");
33ab3e5b 62MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
aef9ec39
RD
63MODULE_LICENSE("Dual BSD/GPL");
64
1a1faf7a
BVA
65#if !defined(CONFIG_DYNAMIC_DEBUG)
66#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
67#define DYNAMIC_DEBUG_BRANCH(descriptor) false
68#endif
69
49248644
DD
70static unsigned int srp_sg_tablesize;
71static unsigned int cmd_sg_entries;
c07d424d
DD
72static unsigned int indirect_sg_entries;
73static bool allow_ext_sg;
03f6fb93
BVA
74static bool prefer_fr = true;
75static bool register_always = true;
c222a39f 76static bool never_register;
49248644 77static int topspin_workarounds = 1;
74b0a15b 78
49248644
DD
79module_param(srp_sg_tablesize, uint, 0444);
80MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
74b0a15b 81
49248644
DD
82module_param(cmd_sg_entries, uint, 0444);
83MODULE_PARM_DESC(cmd_sg_entries,
84 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
aef9ec39 85
c07d424d
DD
86module_param(indirect_sg_entries, uint, 0444);
87MODULE_PARM_DESC(indirect_sg_entries,
65e8617f 88 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
c07d424d
DD
89
90module_param(allow_ext_sg, bool, 0444);
91MODULE_PARM_DESC(allow_ext_sg,
92 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
93
aef9ec39
RD
94module_param(topspin_workarounds, int, 0444);
95MODULE_PARM_DESC(topspin_workarounds,
96 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
97
5cfb1782
BVA
98module_param(prefer_fr, bool, 0444);
99MODULE_PARM_DESC(prefer_fr,
100"Whether to use fast registration if both FMR and fast registration are supported");
101
b1b8854d
BVA
102module_param(register_always, bool, 0444);
103MODULE_PARM_DESC(register_always,
104 "Use memory registration even for contiguous memory regions");
105
c222a39f
BVA
106module_param(never_register, bool, 0444);
107MODULE_PARM_DESC(never_register, "Never register memory");
108
9c27847d 109static const struct kernel_param_ops srp_tmo_ops;
ed9b2264 110
a95cadb9
BVA
111static int srp_reconnect_delay = 10;
112module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
113 S_IRUGO | S_IWUSR);
114MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
115
ed9b2264
BVA
116static int srp_fast_io_fail_tmo = 15;
117module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
118 S_IRUGO | S_IWUSR);
119MODULE_PARM_DESC(fast_io_fail_tmo,
120 "Number of seconds between the observation of a transport"
121 " layer error and failing all I/O. \"off\" means that this"
122 " functionality is disabled.");
123
a95cadb9 124static int srp_dev_loss_tmo = 600;
ed9b2264
BVA
125module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
126 S_IRUGO | S_IWUSR);
127MODULE_PARM_DESC(dev_loss_tmo,
128 "Maximum number of seconds that the SRP transport should"
129 " insulate transport layer errors. After this time has been"
130 " exceeded the SCSI host is removed. Should be"
131 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
132 " if fast_io_fail_tmo has not been set. \"off\" means that"
133 " this functionality is disabled.");
134
882981f4
BVA
135static bool srp_use_imm_data = true;
136module_param_named(use_imm_data, srp_use_imm_data, bool, 0644);
137MODULE_PARM_DESC(use_imm_data,
138 "Whether or not to request permission to use immediate data during SRP login.");
139
140static unsigned int srp_max_imm_data = 8 * 1024;
141module_param_named(max_imm_data, srp_max_imm_data, uint, 0644);
142MODULE_PARM_DESC(max_imm_data, "Maximum immediate data size.");
143
d92c0da7
BVA
144static unsigned ch_count;
145module_param(ch_count, uint, 0444);
146MODULE_PARM_DESC(ch_count,
147 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
148
aef9ec39 149static void srp_add_one(struct ib_device *device);
7c1eb45a 150static void srp_remove_one(struct ib_device *device, void *client_data);
1dc7b1f1
CH
151static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
152static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
153 const char *opname);
e7ff98ae
PP
154static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
155 const struct ib_cm_event *event);
19f31343
BVA
156static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
157 struct rdma_cm_event *event);
aef9ec39 158
3236822b 159static struct scsi_transport_template *ib_srp_transport_template;
bcc05910 160static struct workqueue_struct *srp_remove_wq;
3236822b 161
aef9ec39
RD
162static struct ib_client srp_client = {
163 .name = "srp",
164 .add = srp_add_one,
165 .remove = srp_remove_one
166};
167
c1a0b23b
MT
168static struct ib_sa_client srp_sa_client;
169
ed9b2264
BVA
170static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
171{
172 int tmo = *(int *)kp->arg;
173
174 if (tmo >= 0)
175 return sprintf(buffer, "%d", tmo);
176 else
177 return sprintf(buffer, "off");
178}
179
180static int srp_tmo_set(const char *val, const struct kernel_param *kp)
181{
182 int tmo, res;
183
3fdf70ac
SG
184 res = srp_parse_tmo(&tmo, val);
185 if (res)
186 goto out;
187
a95cadb9
BVA
188 if (kp->arg == &srp_reconnect_delay)
189 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
190 srp_dev_loss_tmo);
191 else if (kp->arg == &srp_fast_io_fail_tmo)
192 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
ed9b2264 193 else
a95cadb9
BVA
194 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
195 tmo);
ed9b2264
BVA
196 if (res)
197 goto out;
198 *(int *)kp->arg = tmo;
199
200out:
201 return res;
202}
203
9c27847d 204static const struct kernel_param_ops srp_tmo_ops = {
ed9b2264
BVA
205 .get = srp_tmo_get,
206 .set = srp_tmo_set,
207};
208
aef9ec39
RD
209static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
210{
211 return (struct srp_target_port *) host->hostdata;
212}
213
214static const char *srp_target_info(struct Scsi_Host *host)
215{
216 return host_to_target(host)->target_name;
217}
218
5d7cbfd6
RD
219static int srp_target_is_topspin(struct srp_target_port *target)
220{
221 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
3d1ff48d 222 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
5d7cbfd6
RD
223
224 return topspin_workarounds &&
3d1ff48d
RK
225 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
226 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
5d7cbfd6
RD
227}
228
aef9ec39
RD
229static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
230 gfp_t gfp_mask,
231 enum dma_data_direction direction)
232{
233 struct srp_iu *iu;
234
235 iu = kmalloc(sizeof *iu, gfp_mask);
236 if (!iu)
237 goto out;
238
239 iu->buf = kzalloc(size, gfp_mask);
240 if (!iu->buf)
241 goto out_free_iu;
242
05321937
GKH
243 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
244 direction);
245 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
aef9ec39
RD
246 goto out_free_buf;
247
248 iu->size = size;
249 iu->direction = direction;
250
251 return iu;
252
253out_free_buf:
254 kfree(iu->buf);
255out_free_iu:
256 kfree(iu);
257out:
258 return NULL;
259}
260
261static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
262{
263 if (!iu)
264 return;
265
05321937
GKH
266 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
267 iu->direction);
aef9ec39
RD
268 kfree(iu->buf);
269 kfree(iu);
270}
271
272static void srp_qp_event(struct ib_event *event, void *context)
273{
57363d98
SG
274 pr_debug("QP event %s (%d)\n",
275 ib_event_msg(event->event), event->event);
aef9ec39
RD
276}
277
19f31343
BVA
278static int srp_init_ib_qp(struct srp_target_port *target,
279 struct ib_qp *qp)
aef9ec39
RD
280{
281 struct ib_qp_attr *attr;
282 int ret;
283
284 attr = kmalloc(sizeof *attr, GFP_KERNEL);
285 if (!attr)
286 return -ENOMEM;
287
56b5390c
BVA
288 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
289 target->srp_host->port,
19f31343 290 be16_to_cpu(target->ib_cm.pkey),
56b5390c 291 &attr->pkey_index);
aef9ec39
RD
292 if (ret)
293 goto out;
294
295 attr->qp_state = IB_QPS_INIT;
296 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
297 IB_ACCESS_REMOTE_WRITE);
298 attr->port_num = target->srp_host->port;
299
300 ret = ib_modify_qp(qp, attr,
301 IB_QP_STATE |
302 IB_QP_PKEY_INDEX |
303 IB_QP_ACCESS_FLAGS |
304 IB_QP_PORT);
305
306out:
307 kfree(attr);
308 return ret;
309}
310
19f31343 311static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
9fe4bcf4 312{
509c07bc 313 struct srp_target_port *target = ch->target;
9fe4bcf4
DD
314 struct ib_cm_id *new_cm_id;
315
05321937 316 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
19f31343 317 srp_ib_cm_handler, ch);
9fe4bcf4
DD
318 if (IS_ERR(new_cm_id))
319 return PTR_ERR(new_cm_id);
320
19f31343
BVA
321 if (ch->ib_cm.cm_id)
322 ib_destroy_cm_id(ch->ib_cm.cm_id);
323 ch->ib_cm.cm_id = new_cm_id;
4c33bd19
DC
324 if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
325 target->srp_host->port))
19f31343 326 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA;
4c33bd19 327 else
19f31343
BVA
328 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB;
329 ch->ib_cm.path.sgid = target->sgid;
330 ch->ib_cm.path.dgid = target->ib_cm.orig_dgid;
331 ch->ib_cm.path.pkey = target->ib_cm.pkey;
332 ch->ib_cm.path.service_id = target->ib_cm.service_id;
9fe4bcf4
DD
333
334 return 0;
335}
336
19f31343
BVA
337static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
338{
339 struct srp_target_port *target = ch->target;
340 struct rdma_cm_id *new_cm_id;
19f31343
BVA
341 int ret;
342
343 new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch,
344 RDMA_PS_TCP, IB_QPT_RC);
345 if (IS_ERR(new_cm_id)) {
346 ret = PTR_ERR(new_cm_id);
347 new_cm_id = NULL;
348 goto out;
349 }
350
351 init_completion(&ch->done);
352 ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
353 (struct sockaddr *)&target->rdma_cm.src : NULL,
354 (struct sockaddr *)&target->rdma_cm.dst,
355 SRP_PATH_REC_TIMEOUT_MS);
356 if (ret) {
7da09af9
BVA
357 pr_err("No route available from %pIS to %pIS (%d)\n",
358 &target->rdma_cm.src, &target->rdma_cm.dst, ret);
19f31343
BVA
359 goto out;
360 }
361 ret = wait_for_completion_interruptible(&ch->done);
362 if (ret < 0)
363 goto out;
364
365 ret = ch->status;
366 if (ret) {
7da09af9
BVA
367 pr_err("Resolving address %pIS failed (%d)\n",
368 &target->rdma_cm.dst, ret);
19f31343
BVA
369 goto out;
370 }
371
372 swap(ch->rdma_cm.cm_id, new_cm_id);
373
374out:
375 if (new_cm_id)
376 rdma_destroy_id(new_cm_id);
377
378 return ret;
379}
380
381static int srp_new_cm_id(struct srp_rdma_ch *ch)
382{
383 struct srp_target_port *target = ch->target;
384
385 return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
386 srp_new_ib_cm_id(ch);
387}
388
d1b4289e
BVA
389static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
390{
391 struct srp_device *dev = target->srp_host->srp_dev;
392 struct ib_fmr_pool_param fmr_param;
393
394 memset(&fmr_param, 0, sizeof(fmr_param));
fa9863f8 395 fmr_param.pool_size = target->mr_pool_size;
d1b4289e
BVA
396 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
397 fmr_param.cache = 1;
52ede08f
BVA
398 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
399 fmr_param.page_shift = ilog2(dev->mr_page_size);
d1b4289e
BVA
400 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
401 IB_ACCESS_REMOTE_WRITE |
402 IB_ACCESS_REMOTE_READ);
403
404 return ib_create_fmr_pool(dev->pd, &fmr_param);
405}
406
5cfb1782
BVA
407/**
408 * srp_destroy_fr_pool() - free the resources owned by a pool
409 * @pool: Fast registration pool to be destroyed.
410 */
411static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
412{
413 int i;
414 struct srp_fr_desc *d;
415
416 if (!pool)
417 return;
418
419 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
5cfb1782
BVA
420 if (d->mr)
421 ib_dereg_mr(d->mr);
422 }
423 kfree(pool);
424}
425
426/**
427 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
428 * @device: IB device to allocate fast registration descriptors for.
429 * @pd: Protection domain associated with the FR descriptors.
430 * @pool_size: Number of descriptors to allocate.
431 * @max_page_list_len: Maximum fast registration work request page list length.
432 */
433static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
434 struct ib_pd *pd, int pool_size,
435 int max_page_list_len)
436{
437 struct srp_fr_pool *pool;
438 struct srp_fr_desc *d;
439 struct ib_mr *mr;
5cfb1782 440 int i, ret = -EINVAL;
fbd36818 441 enum ib_mr_type mr_type;
5cfb1782
BVA
442
443 if (pool_size <= 0)
444 goto err;
445 ret = -ENOMEM;
7a7b0fea 446 pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL);
5cfb1782
BVA
447 if (!pool)
448 goto err;
449 pool->size = pool_size;
450 pool->max_page_list_len = max_page_list_len;
451 spin_lock_init(&pool->lock);
452 INIT_LIST_HEAD(&pool->free_list);
453
fbd36818
SG
454 if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
455 mr_type = IB_MR_TYPE_SG_GAPS;
456 else
457 mr_type = IB_MR_TYPE_MEM_REG;
458
5cfb1782 459 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
fbd36818 460 mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
5cfb1782
BVA
461 if (IS_ERR(mr)) {
462 ret = PTR_ERR(mr);
3787d990
BVA
463 if (ret == -ENOMEM)
464 pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
465 dev_name(&device->dev));
5cfb1782
BVA
466 goto destroy_pool;
467 }
468 d->mr = mr;
5cfb1782
BVA
469 list_add_tail(&d->entry, &pool->free_list);
470 }
471
472out:
473 return pool;
474
475destroy_pool:
476 srp_destroy_fr_pool(pool);
477
478err:
479 pool = ERR_PTR(ret);
480 goto out;
481}
482
483/**
484 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
485 * @pool: Pool to obtain descriptor from.
486 */
487static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
488{
489 struct srp_fr_desc *d = NULL;
490 unsigned long flags;
491
492 spin_lock_irqsave(&pool->lock, flags);
493 if (!list_empty(&pool->free_list)) {
494 d = list_first_entry(&pool->free_list, typeof(*d), entry);
495 list_del(&d->entry);
496 }
497 spin_unlock_irqrestore(&pool->lock, flags);
498
499 return d;
500}
501
502/**
503 * srp_fr_pool_put() - put an FR descriptor back in the free list
504 * @pool: Pool the descriptor was allocated from.
505 * @desc: Pointer to an array of fast registration descriptor pointers.
506 * @n: Number of descriptors to put back.
507 *
508 * Note: The caller must already have queued an invalidation request for
509 * desc->mr->rkey before calling this function.
510 */
511static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
512 int n)
513{
514 unsigned long flags;
515 int i;
516
517 spin_lock_irqsave(&pool->lock, flags);
518 for (i = 0; i < n; i++)
519 list_add(&desc[i]->entry, &pool->free_list);
520 spin_unlock_irqrestore(&pool->lock, flags);
521}
522
523static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
524{
525 struct srp_device *dev = target->srp_host->srp_dev;
526
fa9863f8 527 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
5cfb1782
BVA
528 dev->max_pages_per_mr);
529}
530
7dad6b2e
BVA
531/**
532 * srp_destroy_qp() - destroy an RDMA queue pair
9566b054 533 * @ch: SRP RDMA channel.
7dad6b2e 534 *
561392d4
SW
535 * Drain the qp before destroying it. This avoids that the receive
536 * completion handler can access the queue pair while it is
7dad6b2e
BVA
537 * being destroyed.
538 */
9566b054 539static void srp_destroy_qp(struct srp_rdma_ch *ch)
7dad6b2e 540{
9294000d
BVA
541 spin_lock_irq(&ch->lock);
542 ib_process_cq_direct(ch->send_cq, -1);
543 spin_unlock_irq(&ch->lock);
544
9566b054
BVA
545 ib_drain_qp(ch->qp);
546 ib_destroy_qp(ch->qp);
7dad6b2e
BVA
547}
548
509c07bc 549static int srp_create_ch_ib(struct srp_rdma_ch *ch)
aef9ec39 550{
509c07bc 551 struct srp_target_port *target = ch->target;
62154b2e 552 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39 553 struct ib_qp_init_attr *init_attr;
73aa89ed
IR
554 struct ib_cq *recv_cq, *send_cq;
555 struct ib_qp *qp;
d1b4289e 556 struct ib_fmr_pool *fmr_pool = NULL;
5cfb1782 557 struct srp_fr_pool *fr_pool = NULL;
509c5f33 558 const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
aef9ec39
RD
559 int ret;
560
561 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
562 if (!init_attr)
563 return -ENOMEM;
564
561392d4 565 /* queue_size + 1 for ib_drain_rq() */
1dc7b1f1
CH
566 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
567 ch->comp_vector, IB_POLL_SOFTIRQ);
73aa89ed
IR
568 if (IS_ERR(recv_cq)) {
569 ret = PTR_ERR(recv_cq);
da9d2f07 570 goto err;
aef9ec39
RD
571 }
572
1dc7b1f1
CH
573 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
574 ch->comp_vector, IB_POLL_DIRECT);
73aa89ed
IR
575 if (IS_ERR(send_cq)) {
576 ret = PTR_ERR(send_cq);
da9d2f07 577 goto err_recv_cq;
9c03dc9f
BVA
578 }
579
aef9ec39 580 init_attr->event_handler = srp_qp_event;
5cfb1782 581 init_attr->cap.max_send_wr = m * target->queue_size;
7dad6b2e 582 init_attr->cap.max_recv_wr = target->queue_size + 1;
aef9ec39 583 init_attr->cap.max_recv_sge = 1;
882981f4 584 init_attr->cap.max_send_sge = SRP_MAX_SGE;
5cfb1782 585 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
aef9ec39 586 init_attr->qp_type = IB_QPT_RC;
73aa89ed
IR
587 init_attr->send_cq = send_cq;
588 init_attr->recv_cq = recv_cq;
aef9ec39 589
19f31343
BVA
590 if (target->using_rdma_cm) {
591 ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
592 qp = ch->rdma_cm.cm_id->qp;
593 } else {
594 qp = ib_create_qp(dev->pd, init_attr);
595 if (!IS_ERR(qp)) {
596 ret = srp_init_ib_qp(target, qp);
597 if (ret)
598 ib_destroy_qp(qp);
599 } else {
600 ret = PTR_ERR(qp);
601 }
602 }
603 if (ret) {
604 pr_err("QP creation failed for dev %s: %d\n",
605 dev_name(&dev->dev->dev), ret);
da9d2f07 606 goto err_send_cq;
aef9ec39
RD
607 }
608
002f1567 609 if (dev->use_fast_reg) {
5cfb1782
BVA
610 fr_pool = srp_alloc_fr_pool(target);
611 if (IS_ERR(fr_pool)) {
612 ret = PTR_ERR(fr_pool);
613 shost_printk(KERN_WARNING, target->scsi_host, PFX
614 "FR pool allocation failed (%d)\n", ret);
615 goto err_qp;
616 }
002f1567 617 } else if (dev->use_fmr) {
d1b4289e
BVA
618 fmr_pool = srp_alloc_fmr_pool(target);
619 if (IS_ERR(fmr_pool)) {
620 ret = PTR_ERR(fmr_pool);
621 shost_printk(KERN_WARNING, target->scsi_host, PFX
622 "FMR pool allocation failed (%d)\n", ret);
623 goto err_qp;
624 }
d1b4289e
BVA
625 }
626
509c07bc 627 if (ch->qp)
9566b054 628 srp_destroy_qp(ch);
509c07bc 629 if (ch->recv_cq)
1dc7b1f1 630 ib_free_cq(ch->recv_cq);
509c07bc 631 if (ch->send_cq)
1dc7b1f1 632 ib_free_cq(ch->send_cq);
73aa89ed 633
509c07bc
BVA
634 ch->qp = qp;
635 ch->recv_cq = recv_cq;
636 ch->send_cq = send_cq;
73aa89ed 637
7fbc67df
SG
638 if (dev->use_fast_reg) {
639 if (ch->fr_pool)
640 srp_destroy_fr_pool(ch->fr_pool);
641 ch->fr_pool = fr_pool;
642 } else if (dev->use_fmr) {
643 if (ch->fmr_pool)
644 ib_destroy_fmr_pool(ch->fmr_pool);
645 ch->fmr_pool = fmr_pool;
646 }
647
da9d2f07
RD
648 kfree(init_attr);
649 return 0;
650
651err_qp:
19f31343
BVA
652 if (target->using_rdma_cm)
653 rdma_destroy_qp(ch->rdma_cm.cm_id);
654 else
655 ib_destroy_qp(qp);
da9d2f07
RD
656
657err_send_cq:
1dc7b1f1 658 ib_free_cq(send_cq);
da9d2f07
RD
659
660err_recv_cq:
1dc7b1f1 661 ib_free_cq(recv_cq);
da9d2f07
RD
662
663err:
aef9ec39
RD
664 kfree(init_attr);
665 return ret;
666}
667
4d73f95f
BVA
668/*
669 * Note: this function may be called without srp_alloc_iu_bufs() having been
509c07bc 670 * invoked. Hence the ch->[rt]x_ring checks.
4d73f95f 671 */
509c07bc
BVA
672static void srp_free_ch_ib(struct srp_target_port *target,
673 struct srp_rdma_ch *ch)
aef9ec39 674{
5cfb1782 675 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39
RD
676 int i;
677
d92c0da7
BVA
678 if (!ch->target)
679 return;
680
19f31343
BVA
681 if (target->using_rdma_cm) {
682 if (ch->rdma_cm.cm_id) {
683 rdma_destroy_id(ch->rdma_cm.cm_id);
684 ch->rdma_cm.cm_id = NULL;
685 }
686 } else {
687 if (ch->ib_cm.cm_id) {
688 ib_destroy_cm_id(ch->ib_cm.cm_id);
689 ch->ib_cm.cm_id = NULL;
690 }
394c595e
BVA
691 }
692
d92c0da7
BVA
693 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
694 if (!ch->qp)
695 return;
696
5cfb1782 697 if (dev->use_fast_reg) {
509c07bc
BVA
698 if (ch->fr_pool)
699 srp_destroy_fr_pool(ch->fr_pool);
002f1567 700 } else if (dev->use_fmr) {
509c07bc
BVA
701 if (ch->fmr_pool)
702 ib_destroy_fmr_pool(ch->fmr_pool);
5cfb1782 703 }
1dc7b1f1 704
9566b054 705 srp_destroy_qp(ch);
1dc7b1f1
CH
706 ib_free_cq(ch->send_cq);
707 ib_free_cq(ch->recv_cq);
aef9ec39 708
d92c0da7
BVA
709 /*
710 * Avoid that the SCSI error handler tries to use this channel after
711 * it has been freed. The SCSI error handler can namely continue
712 * trying to perform recovery actions after scsi_remove_host()
713 * returned.
714 */
715 ch->target = NULL;
716
509c07bc
BVA
717 ch->qp = NULL;
718 ch->send_cq = ch->recv_cq = NULL;
73aa89ed 719
509c07bc 720 if (ch->rx_ring) {
4d73f95f 721 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
722 srp_free_iu(target->srp_host, ch->rx_ring[i]);
723 kfree(ch->rx_ring);
724 ch->rx_ring = NULL;
4d73f95f 725 }
509c07bc 726 if (ch->tx_ring) {
4d73f95f 727 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
728 srp_free_iu(target->srp_host, ch->tx_ring[i]);
729 kfree(ch->tx_ring);
730 ch->tx_ring = NULL;
4d73f95f 731 }
aef9ec39
RD
732}
733
734static void srp_path_rec_completion(int status,
c2f8fc4e 735 struct sa_path_rec *pathrec,
509c07bc 736 void *ch_ptr)
aef9ec39 737{
509c07bc
BVA
738 struct srp_rdma_ch *ch = ch_ptr;
739 struct srp_target_port *target = ch->target;
aef9ec39 740
509c07bc 741 ch->status = status;
aef9ec39 742 if (status)
7aa54bd7
DD
743 shost_printk(KERN_ERR, target->scsi_host,
744 PFX "Got failed path rec status %d\n", status);
aef9ec39 745 else
19f31343 746 ch->ib_cm.path = *pathrec;
509c07bc 747 complete(&ch->done);
aef9ec39
RD
748}
749
19f31343 750static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
aef9ec39 751{
509c07bc 752 struct srp_target_port *target = ch->target;
c74ff750 753 int ret;
a702adce 754
19f31343 755 ch->ib_cm.path.numb_path = 1;
509c07bc
BVA
756
757 init_completion(&ch->done);
758
19f31343 759 ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client,
509c07bc
BVA
760 target->srp_host->srp_dev->dev,
761 target->srp_host->port,
19f31343 762 &ch->ib_cm.path,
509c07bc
BVA
763 IB_SA_PATH_REC_SERVICE_ID |
764 IB_SA_PATH_REC_DGID |
765 IB_SA_PATH_REC_SGID |
766 IB_SA_PATH_REC_NUMB_PATH |
767 IB_SA_PATH_REC_PKEY,
768 SRP_PATH_REC_TIMEOUT_MS,
769 GFP_KERNEL,
770 srp_path_rec_completion,
19f31343 771 ch, &ch->ib_cm.path_query);
c74ff750
BVA
772 if (ch->ib_cm.path_query_id < 0)
773 return ch->ib_cm.path_query_id;
509c07bc
BVA
774
775 ret = wait_for_completion_interruptible(&ch->done);
a702adce 776 if (ret < 0)
c74ff750 777 return ret;
aef9ec39 778
c74ff750 779 if (ch->status < 0)
7aa54bd7 780 shost_printk(KERN_WARNING, target->scsi_host,
85769c6f 781 PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n",
19f31343
BVA
782 ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw,
783 be16_to_cpu(target->ib_cm.pkey),
784 be64_to_cpu(target->ib_cm.service_id));
aef9ec39 785
c74ff750 786 return ch->status;
aef9ec39
RD
787}
788
19f31343
BVA
789static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
790{
791 struct srp_target_port *target = ch->target;
792 int ret;
793
794 init_completion(&ch->done);
795
796 ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS);
797 if (ret)
798 return ret;
799
800 wait_for_completion_interruptible(&ch->done);
801
802 if (ch->status != 0)
803 shost_printk(KERN_WARNING, target->scsi_host,
804 PFX "Path resolution failed\n");
805
806 return ch->status;
807}
808
809static int srp_lookup_path(struct srp_rdma_ch *ch)
810{
811 struct srp_target_port *target = ch->target;
812
813 return target->using_rdma_cm ? srp_rdma_lookup_path(ch) :
814 srp_ib_lookup_path(ch);
815}
816
4c532d6c
BVA
817static u8 srp_get_subnet_timeout(struct srp_host *host)
818{
819 struct ib_port_attr attr;
820 int ret;
821 u8 subnet_timeout = 18;
822
823 ret = ib_query_port(host->srp_dev->dev, host->port, &attr);
824 if (ret == 0)
825 subnet_timeout = attr.subnet_timeout;
826
827 if (unlikely(subnet_timeout < 15))
828 pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n",
829 dev_name(&host->srp_dev->dev->dev), subnet_timeout);
830
831 return subnet_timeout;
832}
833
513d5647
BVA
834static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len,
835 bool multich)
aef9ec39 836{
509c07bc 837 struct srp_target_port *target = ch->target;
aef9ec39 838 struct {
19f31343
BVA
839 struct rdma_conn_param rdma_param;
840 struct srp_login_req_rdma rdma_req;
841 struct ib_cm_req_param ib_param;
842 struct srp_login_req ib_req;
aef9ec39 843 } *req = NULL;
48900a28 844 char *ipi, *tpi;
aef9ec39
RD
845 int status;
846
847 req = kzalloc(sizeof *req, GFP_KERNEL);
848 if (!req)
849 return -ENOMEM;
850
19f31343
BVA
851 req->ib_param.flow_control = 1;
852 req->ib_param.retry_count = target->tl_retry_count;
aef9ec39
RD
853
854 /*
855 * Pick some arbitrary defaults here; we could make these
856 * module parameters if anyone cared about setting them.
857 */
19f31343
BVA
858 req->ib_param.responder_resources = 4;
859 req->ib_param.rnr_retry_count = 7;
860 req->ib_param.max_cm_retries = 15;
861
862 req->ib_req.opcode = SRP_LOGIN_REQ;
863 req->ib_req.tag = 0;
513d5647 864 req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len);
19f31343 865 req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
aef9ec39 866 SRP_BUF_FORMAT_INDIRECT);
19f31343
BVA
867 req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
868 SRP_MULTICHAN_SINGLE);
882981f4
BVA
869 if (srp_use_imm_data) {
870 req->ib_req.req_flags |= SRP_IMMED_REQUESTED;
871 req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET);
872 }
19f31343
BVA
873
874 if (target->using_rdma_cm) {
875 req->rdma_param.flow_control = req->ib_param.flow_control;
876 req->rdma_param.responder_resources =
877 req->ib_param.responder_resources;
878 req->rdma_param.initiator_depth = req->ib_param.initiator_depth;
879 req->rdma_param.retry_count = req->ib_param.retry_count;
880 req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count;
881 req->rdma_param.private_data = &req->rdma_req;
882 req->rdma_param.private_data_len = sizeof(req->rdma_req);
883
884 req->rdma_req.opcode = req->ib_req.opcode;
885 req->rdma_req.tag = req->ib_req.tag;
886 req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
887 req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
888 req->rdma_req.req_flags = req->ib_req.req_flags;
882981f4 889 req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset;
19f31343
BVA
890
891 ipi = req->rdma_req.initiator_port_id;
892 tpi = req->rdma_req.target_port_id;
893 } else {
48900a28
BVA
894 u8 subnet_timeout;
895
896 subnet_timeout = srp_get_subnet_timeout(target->srp_host);
897
19f31343
BVA
898 req->ib_param.primary_path = &ch->ib_cm.path;
899 req->ib_param.alternate_path = NULL;
900 req->ib_param.service_id = target->ib_cm.service_id;
901 get_random_bytes(&req->ib_param.starting_psn, 4);
902 req->ib_param.starting_psn &= 0xffffff;
903 req->ib_param.qp_num = ch->qp->qp_num;
904 req->ib_param.qp_type = ch->qp->qp_type;
905 req->ib_param.local_cm_response_timeout = subnet_timeout + 2;
906 req->ib_param.remote_cm_response_timeout = subnet_timeout + 2;
907 req->ib_param.private_data = &req->ib_req;
908 req->ib_param.private_data_len = sizeof(req->ib_req);
48900a28 909
19f31343
BVA
910 ipi = req->ib_req.initiator_port_id;
911 tpi = req->ib_req.target_port_id;
48900a28
BVA
912 }
913
0c0450db 914 /*
3cd96564 915 * In the published SRP specification (draft rev. 16a), the
0c0450db
R
916 * port identifier format is 8 bytes of ID extension followed
917 * by 8 bytes of GUID. Older drafts put the two halves in the
918 * opposite order, so that the GUID comes first.
919 *
920 * Targets conforming to these obsolete drafts can be
921 * recognized by the I/O Class they report.
922 */
923 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
48900a28
BVA
924 memcpy(ipi, &target->sgid.global.interface_id, 8);
925 memcpy(ipi + 8, &target->initiator_ext, 8);
926 memcpy(tpi, &target->ioc_guid, 8);
927 memcpy(tpi + 8, &target->id_ext, 8);
0c0450db 928 } else {
48900a28
BVA
929 memcpy(ipi, &target->initiator_ext, 8);
930 memcpy(ipi + 8, &target->sgid.global.interface_id, 8);
931 memcpy(tpi, &target->id_ext, 8);
932 memcpy(tpi + 8, &target->ioc_guid, 8);
0c0450db
R
933 }
934
aef9ec39
RD
935 /*
936 * Topspin/Cisco SRP targets will reject our login unless we
01cb9bcb
IR
937 * zero out the first 8 bytes of our initiator port ID and set
938 * the second 8 bytes to the local node GUID.
aef9ec39 939 */
5d7cbfd6 940 if (srp_target_is_topspin(target)) {
7aa54bd7
DD
941 shost_printk(KERN_DEBUG, target->scsi_host,
942 PFX "Topspin/Cisco initiator port ID workaround "
943 "activated for target GUID %016llx\n",
45c37cad 944 be64_to_cpu(target->ioc_guid));
48900a28
BVA
945 memset(ipi, 0, 8);
946 memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
aef9ec39 947 }
aef9ec39 948
19f31343
BVA
949 if (target->using_rdma_cm)
950 status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
951 else
952 status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
aef9ec39
RD
953
954 kfree(req);
955
956 return status;
957}
958
ef6c49d8
BVA
959static bool srp_queue_remove_work(struct srp_target_port *target)
960{
961 bool changed = false;
962
963 spin_lock_irq(&target->lock);
964 if (target->state != SRP_TARGET_REMOVED) {
965 target->state = SRP_TARGET_REMOVED;
966 changed = true;
967 }
968 spin_unlock_irq(&target->lock);
969
970 if (changed)
bcc05910 971 queue_work(srp_remove_wq, &target->remove_work);
ef6c49d8
BVA
972
973 return changed;
974}
975
aef9ec39
RD
976static void srp_disconnect_target(struct srp_target_port *target)
977{
d92c0da7 978 struct srp_rdma_ch *ch;
19f31343 979 int i, ret;
509c07bc 980
c014c8cd 981 /* XXX should send SRP_I_LOGOUT request */
aef9ec39 982
c014c8cd
BVA
983 for (i = 0; i < target->ch_count; i++) {
984 ch = &target->ch[i];
985 ch->connected = false;
19f31343
BVA
986 ret = 0;
987 if (target->using_rdma_cm) {
988 if (ch->rdma_cm.cm_id)
989 rdma_disconnect(ch->rdma_cm.cm_id);
990 } else {
991 if (ch->ib_cm.cm_id)
992 ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
993 NULL, 0);
994 }
995 if (ret < 0) {
c014c8cd
BVA
996 shost_printk(KERN_DEBUG, target->scsi_host,
997 PFX "Sending CM DREQ failed\n");
294c875a 998 }
e6581056 999 }
aef9ec39
RD
1000}
1001
509c07bc
BVA
1002static void srp_free_req_data(struct srp_target_port *target,
1003 struct srp_rdma_ch *ch)
8f26c9ff 1004{
5cfb1782
BVA
1005 struct srp_device *dev = target->srp_host->srp_dev;
1006 struct ib_device *ibdev = dev->dev;
8f26c9ff
DD
1007 struct srp_request *req;
1008 int i;
1009
47513cf4 1010 if (!ch->req_ring)
4d73f95f
BVA
1011 return;
1012
1013 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 1014 req = &ch->req_ring[i];
9a21be53 1015 if (dev->use_fast_reg) {
5cfb1782 1016 kfree(req->fr_list);
9a21be53 1017 } else {
5cfb1782 1018 kfree(req->fmr_list);
9a21be53
SG
1019 kfree(req->map_page);
1020 }
c07d424d
DD
1021 if (req->indirect_dma_addr) {
1022 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
1023 target->indirect_size,
1024 DMA_TO_DEVICE);
1025 }
1026 kfree(req->indirect_desc);
8f26c9ff 1027 }
4d73f95f 1028
509c07bc
BVA
1029 kfree(ch->req_ring);
1030 ch->req_ring = NULL;
8f26c9ff
DD
1031}
1032
509c07bc 1033static int srp_alloc_req_data(struct srp_rdma_ch *ch)
b81d00bd 1034{
509c07bc 1035 struct srp_target_port *target = ch->target;
b81d00bd
BVA
1036 struct srp_device *srp_dev = target->srp_host->srp_dev;
1037 struct ib_device *ibdev = srp_dev->dev;
1038 struct srp_request *req;
5cfb1782 1039 void *mr_list;
b81d00bd
BVA
1040 dma_addr_t dma_addr;
1041 int i, ret = -ENOMEM;
1042
509c07bc
BVA
1043 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
1044 GFP_KERNEL);
1045 if (!ch->req_ring)
4d73f95f
BVA
1046 goto out;
1047
1048 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 1049 req = &ch->req_ring[i];
6da2ec56
KC
1050 mr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *),
1051 GFP_KERNEL);
5cfb1782
BVA
1052 if (!mr_list)
1053 goto out;
9a21be53 1054 if (srp_dev->use_fast_reg) {
5cfb1782 1055 req->fr_list = mr_list;
9a21be53 1056 } else {
5cfb1782 1057 req->fmr_list = mr_list;
6da2ec56
KC
1058 req->map_page = kmalloc_array(srp_dev->max_pages_per_mr,
1059 sizeof(void *),
1060 GFP_KERNEL);
9a21be53
SG
1061 if (!req->map_page)
1062 goto out;
1063 }
b81d00bd 1064 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
5cfb1782 1065 if (!req->indirect_desc)
b81d00bd
BVA
1066 goto out;
1067
1068 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
1069 target->indirect_size,
1070 DMA_TO_DEVICE);
1071 if (ib_dma_mapping_error(ibdev, dma_addr))
1072 goto out;
1073
1074 req->indirect_dma_addr = dma_addr;
b81d00bd
BVA
1075 }
1076 ret = 0;
1077
1078out:
1079 return ret;
1080}
1081
683b159a
BVA
1082/**
1083 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
1084 * @shost: SCSI host whose attributes to remove from sysfs.
1085 *
1086 * Note: Any attributes defined in the host template and that did not exist
1087 * before invocation of this function will be ignored.
1088 */
1089static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
1090{
1091 struct device_attribute **attr;
1092
1093 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
1094 device_remove_file(&shost->shost_dev, *attr);
1095}
1096
ee12d6a8
BVA
1097static void srp_remove_target(struct srp_target_port *target)
1098{
d92c0da7
BVA
1099 struct srp_rdma_ch *ch;
1100 int i;
509c07bc 1101
ef6c49d8
BVA
1102 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1103
ee12d6a8 1104 srp_del_scsi_host_attr(target->scsi_host);
9dd69a60 1105 srp_rport_get(target->rport);
ee12d6a8
BVA
1106 srp_remove_host(target->scsi_host);
1107 scsi_remove_host(target->scsi_host);
93079162 1108 srp_stop_rport_timers(target->rport);
ef6c49d8 1109 srp_disconnect_target(target);
19f31343 1110 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
d92c0da7
BVA
1111 for (i = 0; i < target->ch_count; i++) {
1112 ch = &target->ch[i];
1113 srp_free_ch_ib(target, ch);
1114 }
c1120f89 1115 cancel_work_sync(&target->tl_err_work);
9dd69a60 1116 srp_rport_put(target->rport);
d92c0da7
BVA
1117 for (i = 0; i < target->ch_count; i++) {
1118 ch = &target->ch[i];
1119 srp_free_req_data(target, ch);
1120 }
1121 kfree(target->ch);
1122 target->ch = NULL;
65d7dd2f
VP
1123
1124 spin_lock(&target->srp_host->target_lock);
1125 list_del(&target->list);
1126 spin_unlock(&target->srp_host->target_lock);
1127
ee12d6a8
BVA
1128 scsi_host_put(target->scsi_host);
1129}
1130
c4028958 1131static void srp_remove_work(struct work_struct *work)
aef9ec39 1132{
c4028958 1133 struct srp_target_port *target =
ef6c49d8 1134 container_of(work, struct srp_target_port, remove_work);
aef9ec39 1135
ef6c49d8 1136 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
aef9ec39 1137
96fc248a 1138 srp_remove_target(target);
aef9ec39
RD
1139}
1140
dc1bdbd9
BVA
1141static void srp_rport_delete(struct srp_rport *rport)
1142{
1143 struct srp_target_port *target = rport->lld_data;
1144
1145 srp_queue_remove_work(target);
1146}
1147
c014c8cd
BVA
1148/**
1149 * srp_connected_ch() - number of connected channels
1150 * @target: SRP target port.
1151 */
1152static int srp_connected_ch(struct srp_target_port *target)
1153{
1154 int i, c = 0;
1155
1156 for (i = 0; i < target->ch_count; i++)
1157 c += target->ch[i].connected;
1158
1159 return c;
1160}
1161
513d5647
BVA
1162static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len,
1163 bool multich)
aef9ec39 1164{
509c07bc 1165 struct srp_target_port *target = ch->target;
aef9ec39
RD
1166 int ret;
1167
c014c8cd 1168 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
294c875a 1169
509c07bc 1170 ret = srp_lookup_path(ch);
aef9ec39 1171 if (ret)
4d59ad29 1172 goto out;
aef9ec39
RD
1173
1174 while (1) {
509c07bc 1175 init_completion(&ch->done);
513d5647 1176 ret = srp_send_req(ch, max_iu_len, multich);
aef9ec39 1177 if (ret)
4d59ad29 1178 goto out;
509c07bc 1179 ret = wait_for_completion_interruptible(&ch->done);
a702adce 1180 if (ret < 0)
4d59ad29 1181 goto out;
aef9ec39
RD
1182
1183 /*
1184 * The CM event handling code will set status to
1185 * SRP_PORT_REDIRECT if we get a port redirect REJ
1186 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1187 * redirect REJ back.
1188 */
4d59ad29
BVA
1189 ret = ch->status;
1190 switch (ret) {
aef9ec39 1191 case 0:
c014c8cd 1192 ch->connected = true;
4d59ad29 1193 goto out;
aef9ec39
RD
1194
1195 case SRP_PORT_REDIRECT:
509c07bc 1196 ret = srp_lookup_path(ch);
aef9ec39 1197 if (ret)
4d59ad29 1198 goto out;
aef9ec39
RD
1199 break;
1200
1201 case SRP_DLID_REDIRECT:
1202 break;
1203
9fe4bcf4 1204 case SRP_STALE_CONN:
9fe4bcf4 1205 shost_printk(KERN_ERR, target->scsi_host, PFX
205619f2 1206 "giving up on stale connection\n");
4d59ad29
BVA
1207 ret = -ECONNRESET;
1208 goto out;
9fe4bcf4 1209
aef9ec39 1210 default:
4d59ad29 1211 goto out;
aef9ec39
RD
1212 }
1213 }
4d59ad29
BVA
1214
1215out:
1216 return ret <= 0 ? ret : -ENODEV;
aef9ec39
RD
1217}
1218
1dc7b1f1
CH
1219static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1220{
1221 srp_handle_qp_err(cq, wc, "INV RKEY");
1222}
1223
1224static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1225 u32 rkey)
5cfb1782 1226{
5cfb1782
BVA
1227 struct ib_send_wr wr = {
1228 .opcode = IB_WR_LOCAL_INV,
5cfb1782
BVA
1229 .next = NULL,
1230 .num_sge = 0,
1231 .send_flags = 0,
1232 .ex.invalidate_rkey = rkey,
1233 };
1234
1dc7b1f1
CH
1235 wr.wr_cqe = &req->reg_cqe;
1236 req->reg_cqe.done = srp_inv_rkey_err_done;
71347b0c 1237 return ib_post_send(ch->qp, &wr, NULL);
5cfb1782
BVA
1238}
1239
d945e1df 1240static void srp_unmap_data(struct scsi_cmnd *scmnd,
509c07bc 1241 struct srp_rdma_ch *ch,
d945e1df
RD
1242 struct srp_request *req)
1243{
509c07bc 1244 struct srp_target_port *target = ch->target;
5cfb1782
BVA
1245 struct srp_device *dev = target->srp_host->srp_dev;
1246 struct ib_device *ibdev = dev->dev;
1247 int i, res;
8f26c9ff 1248
bb350d1d 1249 if (!scsi_sglist(scmnd) ||
d945e1df
RD
1250 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1251 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1252 return;
1253
5cfb1782
BVA
1254 if (dev->use_fast_reg) {
1255 struct srp_fr_desc **pfr;
1256
1257 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1dc7b1f1 1258 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
5cfb1782
BVA
1259 if (res < 0) {
1260 shost_printk(KERN_ERR, target->scsi_host, PFX
1261 "Queueing INV WR for rkey %#x failed (%d)\n",
1262 (*pfr)->mr->rkey, res);
1263 queue_work(system_long_wq,
1264 &target->tl_err_work);
1265 }
1266 }
1267 if (req->nmdesc)
509c07bc 1268 srp_fr_pool_put(ch->fr_pool, req->fr_list,
5cfb1782 1269 req->nmdesc);
002f1567 1270 } else if (dev->use_fmr) {
5cfb1782
BVA
1271 struct ib_pool_fmr **pfmr;
1272
1273 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1274 ib_fmr_pool_unmap(*pfmr);
1275 }
f5358a17 1276
8f26c9ff
DD
1277 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1278 scmnd->sc_data_direction);
d945e1df
RD
1279}
1280
22032991
BVA
1281/**
1282 * srp_claim_req - Take ownership of the scmnd associated with a request.
509c07bc 1283 * @ch: SRP RDMA channel.
22032991 1284 * @req: SRP request.
b3fe628d 1285 * @sdev: If not NULL, only take ownership for this SCSI device.
22032991
BVA
1286 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1287 * ownership of @req->scmnd if it equals @scmnd.
1288 *
1289 * Return value:
1290 * Either NULL or a pointer to the SCSI command the caller became owner of.
1291 */
509c07bc 1292static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
22032991 1293 struct srp_request *req,
b3fe628d 1294 struct scsi_device *sdev,
22032991
BVA
1295 struct scsi_cmnd *scmnd)
1296{
1297 unsigned long flags;
1298
509c07bc 1299 spin_lock_irqsave(&ch->lock, flags);
b3fe628d
BVA
1300 if (req->scmnd &&
1301 (!sdev || req->scmnd->device == sdev) &&
1302 (!scmnd || req->scmnd == scmnd)) {
22032991
BVA
1303 scmnd = req->scmnd;
1304 req->scmnd = NULL;
22032991
BVA
1305 } else {
1306 scmnd = NULL;
1307 }
509c07bc 1308 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1309
1310 return scmnd;
1311}
1312
1313/**
6ec2ba02 1314 * srp_free_req() - Unmap data and adjust ch->req_lim.
509c07bc 1315 * @ch: SRP RDMA channel.
af24663b
BVA
1316 * @req: Request to be freed.
1317 * @scmnd: SCSI command associated with @req.
1318 * @req_lim_delta: Amount to be added to @target->req_lim.
22032991 1319 */
509c07bc
BVA
1320static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1321 struct scsi_cmnd *scmnd, s32 req_lim_delta)
526b4caa 1322{
94a9174c
BVA
1323 unsigned long flags;
1324
509c07bc 1325 srp_unmap_data(scmnd, ch, req);
22032991 1326
509c07bc
BVA
1327 spin_lock_irqsave(&ch->lock, flags);
1328 ch->req_lim += req_lim_delta;
509c07bc 1329 spin_unlock_irqrestore(&ch->lock, flags);
526b4caa
IR
1330}
1331
509c07bc
BVA
1332static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1333 struct scsi_device *sdev, int result)
526b4caa 1334{
509c07bc 1335 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
22032991
BVA
1336
1337 if (scmnd) {
509c07bc 1338 srp_free_req(ch, req, scmnd, 0);
ed9b2264 1339 scmnd->result = result;
22032991 1340 scmnd->scsi_done(scmnd);
22032991 1341 }
526b4caa
IR
1342}
1343
ed9b2264 1344static void srp_terminate_io(struct srp_rport *rport)
aef9ec39 1345{
ed9b2264 1346 struct srp_target_port *target = rport->lld_data;
d92c0da7 1347 struct srp_rdma_ch *ch;
d92c0da7 1348 int i, j;
ed9b2264 1349
d92c0da7
BVA
1350 for (i = 0; i < target->ch_count; i++) {
1351 ch = &target->ch[i];
509c07bc 1352
d92c0da7
BVA
1353 for (j = 0; j < target->req_ring_size; ++j) {
1354 struct srp_request *req = &ch->req_ring[j];
1355
1356 srp_finish_req(ch, req, NULL,
1357 DID_TRANSPORT_FAILFAST << 16);
1358 }
ed9b2264
BVA
1359 }
1360}
aef9ec39 1361
513d5647 1362/* Calculate maximum initiator to target information unit length. */
882981f4 1363static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data)
513d5647
BVA
1364{
1365 uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN +
1366 sizeof(struct srp_indirect_buf) +
1367 cmd_sg_cnt * sizeof(struct srp_direct_buf);
1368
882981f4
BVA
1369 if (use_imm_data)
1370 max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET +
1371 srp_max_imm_data);
1372
513d5647
BVA
1373 return max_iu_len;
1374}
1375
ed9b2264
BVA
1376/*
1377 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1378 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1379 * srp_reset_device() or srp_reset_host() calls will occur while this function
1380 * is in progress. One way to realize that is not to call this function
1381 * directly but to call srp_reconnect_rport() instead since that last function
1382 * serializes calls of this function via rport->mutex and also blocks
1383 * srp_queuecommand() calls before invoking this function.
1384 */
1385static int srp_rport_reconnect(struct srp_rport *rport)
1386{
1387 struct srp_target_port *target = rport->lld_data;
d92c0da7 1388 struct srp_rdma_ch *ch;
882981f4
BVA
1389 uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
1390 srp_use_imm_data);
d92c0da7
BVA
1391 int i, j, ret = 0;
1392 bool multich = false;
09be70a2 1393
aef9ec39 1394 srp_disconnect_target(target);
34aa654e
BVA
1395
1396 if (target->state == SRP_TARGET_SCANNING)
1397 return -ENODEV;
1398
aef9ec39 1399 /*
c7c4e7ff
BVA
1400 * Now get a new local CM ID so that we avoid confusing the target in
1401 * case things are really fouled up. Doing so also ensures that all CM
1402 * callbacks will have finished before a new QP is allocated.
aef9ec39 1403 */
d92c0da7
BVA
1404 for (i = 0; i < target->ch_count; i++) {
1405 ch = &target->ch[i];
d92c0da7 1406 ret += srp_new_cm_id(ch);
536ae14e 1407 }
d92c0da7
BVA
1408 for (i = 0; i < target->ch_count; i++) {
1409 ch = &target->ch[i];
d92c0da7
BVA
1410 for (j = 0; j < target->req_ring_size; ++j) {
1411 struct srp_request *req = &ch->req_ring[j];
aef9ec39 1412
d92c0da7
BVA
1413 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1414 }
1415 }
1416 for (i = 0; i < target->ch_count; i++) {
1417 ch = &target->ch[i];
d92c0da7
BVA
1418 /*
1419 * Whether or not creating a new CM ID succeeded, create a new
1420 * QP. This guarantees that all completion callback function
1421 * invocations have finished before request resetting starts.
1422 */
1423 ret += srp_create_ch_ib(ch);
aef9ec39 1424
d92c0da7
BVA
1425 INIT_LIST_HEAD(&ch->free_tx);
1426 for (j = 0; j < target->queue_size; ++j)
1427 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1428 }
8de9fe3a
BVA
1429
1430 target->qp_in_error = false;
1431
d92c0da7
BVA
1432 for (i = 0; i < target->ch_count; i++) {
1433 ch = &target->ch[i];
bbac5ccf 1434 if (ret)
d92c0da7 1435 break;
513d5647 1436 ret = srp_connect_ch(ch, max_iu_len, multich);
d92c0da7
BVA
1437 multich = true;
1438 }
09be70a2 1439
ed9b2264
BVA
1440 if (ret == 0)
1441 shost_printk(KERN_INFO, target->scsi_host,
1442 PFX "reconnect succeeded\n");
aef9ec39
RD
1443
1444 return ret;
1445}
1446
8f26c9ff
DD
1447static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1448 unsigned int dma_len, u32 rkey)
f5358a17 1449{
8f26c9ff 1450 struct srp_direct_buf *desc = state->desc;
f5358a17 1451
3ae95da8
BVA
1452 WARN_ON_ONCE(!dma_len);
1453
8f26c9ff
DD
1454 desc->va = cpu_to_be64(dma_addr);
1455 desc->key = cpu_to_be32(rkey);
1456 desc->len = cpu_to_be32(dma_len);
f5358a17 1457
8f26c9ff
DD
1458 state->total_len += dma_len;
1459 state->desc++;
1460 state->ndesc++;
1461}
559ce8f1 1462
8f26c9ff 1463static int srp_map_finish_fmr(struct srp_map_state *state,
509c07bc 1464 struct srp_rdma_ch *ch)
8f26c9ff 1465{
186fbc66
BVA
1466 struct srp_target_port *target = ch->target;
1467 struct srp_device *dev = target->srp_host->srp_dev;
8f26c9ff
DD
1468 struct ib_pool_fmr *fmr;
1469 u64 io_addr = 0;
85507bcc 1470
290081b4
BVA
1471 if (state->fmr.next >= state->fmr.end) {
1472 shost_printk(KERN_ERR, ch->target->scsi_host,
1473 PFX "Out of MRs (mr_per_cmd = %d)\n",
1474 ch->target->mr_per_cmd);
f731ed62 1475 return -ENOMEM;
290081b4 1476 }
f731ed62 1477
26630e8a
SG
1478 WARN_ON_ONCE(!dev->use_fmr);
1479
1480 if (state->npages == 0)
1481 return 0;
1482
cee687b6 1483 if (state->npages == 1 && target->global_rkey) {
26630e8a 1484 srp_map_desc(state, state->base_dma_addr, state->dma_len,
cee687b6 1485 target->global_rkey);
26630e8a
SG
1486 goto reset_state;
1487 }
1488
509c07bc 1489 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
8f26c9ff
DD
1490 state->npages, io_addr);
1491 if (IS_ERR(fmr))
1492 return PTR_ERR(fmr);
f5358a17 1493
f731ed62 1494 *state->fmr.next++ = fmr;
52ede08f 1495 state->nmdesc++;
f5358a17 1496
186fbc66
BVA
1497 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1498 state->dma_len, fmr->fmr->rkey);
539dde6f 1499
26630e8a
SG
1500reset_state:
1501 state->npages = 0;
1502 state->dma_len = 0;
1503
8f26c9ff
DD
1504 return 0;
1505}
1506
1dc7b1f1
CH
1507static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1508{
1509 srp_handle_qp_err(cq, wc, "FAST REG");
1510}
1511
509c5f33
BVA
1512/*
1513 * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1514 * where to start in the first element. If sg_offset_p != NULL then
1515 * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1516 * byte that has not yet been mapped.
1517 */
5cfb1782 1518static int srp_map_finish_fr(struct srp_map_state *state,
1dc7b1f1 1519 struct srp_request *req,
509c5f33
BVA
1520 struct srp_rdma_ch *ch, int sg_nents,
1521 unsigned int *sg_offset_p)
5cfb1782 1522{
509c07bc 1523 struct srp_target_port *target = ch->target;
5cfb1782 1524 struct srp_device *dev = target->srp_host->srp_dev;
f7f7aab1 1525 struct ib_reg_wr wr;
5cfb1782
BVA
1526 struct srp_fr_desc *desc;
1527 u32 rkey;
f7f7aab1 1528 int n, err;
5cfb1782 1529
290081b4
BVA
1530 if (state->fr.next >= state->fr.end) {
1531 shost_printk(KERN_ERR, ch->target->scsi_host,
1532 PFX "Out of MRs (mr_per_cmd = %d)\n",
1533 ch->target->mr_per_cmd);
f731ed62 1534 return -ENOMEM;
290081b4 1535 }
f731ed62 1536
26630e8a
SG
1537 WARN_ON_ONCE(!dev->use_fast_reg);
1538
cee687b6 1539 if (sg_nents == 1 && target->global_rkey) {
509c5f33
BVA
1540 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1541
1542 srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1543 sg_dma_len(state->sg) - sg_offset,
cee687b6 1544 target->global_rkey);
509c5f33
BVA
1545 if (sg_offset_p)
1546 *sg_offset_p = 0;
f7f7aab1 1547 return 1;
26630e8a
SG
1548 }
1549
509c07bc 1550 desc = srp_fr_pool_get(ch->fr_pool);
5cfb1782
BVA
1551 if (!desc)
1552 return -ENOMEM;
1553
1554 rkey = ib_inc_rkey(desc->mr->rkey);
1555 ib_update_fast_reg_key(desc->mr, rkey);
1556
509c5f33
BVA
1557 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1558 dev->mr_page_size);
9d8e7d0d
BVA
1559 if (unlikely(n < 0)) {
1560 srp_fr_pool_put(ch->fr_pool, &desc, 1);
509c5f33 1561 pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
9d8e7d0d 1562 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
509c5f33 1563 sg_offset_p ? *sg_offset_p : -1, n);
f7f7aab1 1564 return n;
9d8e7d0d 1565 }
5cfb1782 1566
509c5f33 1567 WARN_ON_ONCE(desc->mr->length == 0);
5cfb1782 1568
1dc7b1f1
CH
1569 req->reg_cqe.done = srp_reg_mr_err_done;
1570
f7f7aab1
SG
1571 wr.wr.next = NULL;
1572 wr.wr.opcode = IB_WR_REG_MR;
1dc7b1f1 1573 wr.wr.wr_cqe = &req->reg_cqe;
f7f7aab1
SG
1574 wr.wr.num_sge = 0;
1575 wr.wr.send_flags = 0;
1576 wr.mr = desc->mr;
1577 wr.key = desc->mr->rkey;
1578 wr.access = (IB_ACCESS_LOCAL_WRITE |
1579 IB_ACCESS_REMOTE_READ |
1580 IB_ACCESS_REMOTE_WRITE);
5cfb1782 1581
f731ed62 1582 *state->fr.next++ = desc;
5cfb1782
BVA
1583 state->nmdesc++;
1584
f7f7aab1
SG
1585 srp_map_desc(state, desc->mr->iova,
1586 desc->mr->length, desc->mr->rkey);
5cfb1782 1587
71347b0c 1588 err = ib_post_send(ch->qp, &wr.wr, NULL);
509c5f33
BVA
1589 if (unlikely(err)) {
1590 WARN_ON_ONCE(err == -ENOMEM);
26630e8a 1591 return err;
509c5f33 1592 }
26630e8a 1593
f7f7aab1 1594 return n;
5cfb1782
BVA
1595}
1596
8f26c9ff 1597static int srp_map_sg_entry(struct srp_map_state *state,
509c07bc 1598 struct srp_rdma_ch *ch,
52bb8c62 1599 struct scatterlist *sg)
8f26c9ff 1600{
509c07bc 1601 struct srp_target_port *target = ch->target;
8f26c9ff 1602 struct srp_device *dev = target->srp_host->srp_dev;
a163afc8
BVA
1603 dma_addr_t dma_addr = sg_dma_address(sg);
1604 unsigned int dma_len = sg_dma_len(sg);
3ae95da8 1605 unsigned int len = 0;
8f26c9ff
DD
1606 int ret;
1607
3ae95da8 1608 WARN_ON_ONCE(!dma_len);
f5358a17 1609
8f26c9ff 1610 while (dma_len) {
5cfb1782 1611 unsigned offset = dma_addr & ~dev->mr_page_mask;
681cc360
BVA
1612
1613 if (state->npages == dev->max_pages_per_mr ||
1614 (state->npages > 0 && offset != 0)) {
f7f7aab1 1615 ret = srp_map_finish_fmr(state, ch);
8f26c9ff
DD
1616 if (ret)
1617 return ret;
8f26c9ff
DD
1618 }
1619
5cfb1782 1620 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
f5358a17 1621
8f26c9ff
DD
1622 if (!state->npages)
1623 state->base_dma_addr = dma_addr;
5cfb1782 1624 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
52ede08f 1625 state->dma_len += len;
8f26c9ff
DD
1626 dma_addr += len;
1627 dma_len -= len;
1628 }
1629
5cfb1782 1630 /*
681cc360 1631 * If the end of the MR is not on a page boundary then we need to
8f26c9ff 1632 * close it out and start a new one -- we can only merge at page
1d3d98c4 1633 * boundaries.
8f26c9ff
DD
1634 */
1635 ret = 0;
681cc360 1636 if ((dma_addr & ~dev->mr_page_mask) != 0)
f7f7aab1 1637 ret = srp_map_finish_fmr(state, ch);
f5358a17
RD
1638 return ret;
1639}
1640
26630e8a
SG
1641static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1642 struct srp_request *req, struct scatterlist *scat,
1643 int count)
76bc1e1d 1644{
76bc1e1d 1645 struct scatterlist *sg;
0e0d3a48 1646 int i, ret;
76bc1e1d 1647
26630e8a
SG
1648 state->pages = req->map_page;
1649 state->fmr.next = req->fmr_list;
509c5f33 1650 state->fmr.end = req->fmr_list + ch->target->mr_per_cmd;
26630e8a
SG
1651
1652 for_each_sg(scat, sg, count, i) {
52bb8c62 1653 ret = srp_map_sg_entry(state, ch, sg);
26630e8a
SG
1654 if (ret)
1655 return ret;
5cfb1782 1656 }
76bc1e1d 1657
f7f7aab1 1658 ret = srp_map_finish_fmr(state, ch);
26630e8a
SG
1659 if (ret)
1660 return ret;
1661
26630e8a
SG
1662 return 0;
1663}
1664
1665static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1666 struct srp_request *req, struct scatterlist *scat,
1667 int count)
1668{
509c5f33
BVA
1669 unsigned int sg_offset = 0;
1670
f7f7aab1 1671 state->fr.next = req->fr_list;
509c5f33 1672 state->fr.end = req->fr_list + ch->target->mr_per_cmd;
f7f7aab1 1673 state->sg = scat;
26630e8a 1674
3b59b7a6
BVA
1675 if (count == 0)
1676 return 0;
1677
57b0be9c 1678 while (count) {
f7f7aab1 1679 int i, n;
26630e8a 1680
509c5f33 1681 n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
f7f7aab1
SG
1682 if (unlikely(n < 0))
1683 return n;
1684
57b0be9c 1685 count -= n;
f7f7aab1
SG
1686 for (i = 0; i < n; i++)
1687 state->sg = sg_next(state->sg);
1688 }
26630e8a 1689
26630e8a
SG
1690 return 0;
1691}
1692
1693static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1694 struct srp_request *req, struct scatterlist *scat,
1695 int count)
1696{
1697 struct srp_target_port *target = ch->target;
26630e8a
SG
1698 struct scatterlist *sg;
1699 int i;
1700
26630e8a 1701 for_each_sg(scat, sg, count, i) {
a163afc8 1702 srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg),
cee687b6 1703 target->global_rkey);
0e0d3a48 1704 }
76bc1e1d 1705
26630e8a 1706 return 0;
76bc1e1d
BVA
1707}
1708
330179f2
BVA
1709/*
1710 * Register the indirect data buffer descriptor with the HCA.
1711 *
1712 * Note: since the indirect data buffer descriptor has been allocated with
1713 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1714 * memory buffer.
1715 */
1716static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1717 void **next_mr, void **end_mr, u32 idb_len,
1718 __be32 *idb_rkey)
1719{
1720 struct srp_target_port *target = ch->target;
1721 struct srp_device *dev = target->srp_host->srp_dev;
1722 struct srp_map_state state;
1723 struct srp_direct_buf idb_desc;
1724 u64 idb_pages[1];
f7f7aab1 1725 struct scatterlist idb_sg[1];
330179f2
BVA
1726 int ret;
1727
1728 memset(&state, 0, sizeof(state));
1729 memset(&idb_desc, 0, sizeof(idb_desc));
1730 state.gen.next = next_mr;
1731 state.gen.end = end_mr;
1732 state.desc = &idb_desc;
330179f2
BVA
1733 state.base_dma_addr = req->indirect_dma_addr;
1734 state.dma_len = idb_len;
f7f7aab1
SG
1735
1736 if (dev->use_fast_reg) {
1737 state.sg = idb_sg;
54f5c9c5 1738 sg_init_one(idb_sg, req->indirect_desc, idb_len);
f7f7aab1 1739 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
fc925518
CH
1740#ifdef CONFIG_NEED_SG_DMA_LENGTH
1741 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1742#endif
509c5f33 1743 ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
f7f7aab1
SG
1744 if (ret < 0)
1745 return ret;
509c5f33 1746 WARN_ON_ONCE(ret < 1);
f7f7aab1
SG
1747 } else if (dev->use_fmr) {
1748 state.pages = idb_pages;
1749 state.pages[0] = (req->indirect_dma_addr &
1750 dev->mr_page_mask);
1751 state.npages = 1;
1752 ret = srp_map_finish_fmr(&state, ch);
1753 if (ret < 0)
1754 return ret;
1755 } else {
1756 return -EINVAL;
1757 }
330179f2
BVA
1758
1759 *idb_rkey = idb_desc.key;
1760
f7f7aab1 1761 return 0;
330179f2
BVA
1762}
1763
509c5f33
BVA
1764static void srp_check_mapping(struct srp_map_state *state,
1765 struct srp_rdma_ch *ch, struct srp_request *req,
1766 struct scatterlist *scat, int count)
1767{
1768 struct srp_device *dev = ch->target->srp_host->srp_dev;
1769 struct srp_fr_desc **pfr;
1770 u64 desc_len = 0, mr_len = 0;
1771 int i;
1772
1773 for (i = 0; i < state->ndesc; i++)
1774 desc_len += be32_to_cpu(req->indirect_desc[i].len);
1775 if (dev->use_fast_reg)
1776 for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1777 mr_len += (*pfr)->mr->length;
1778 else if (dev->use_fmr)
1779 for (i = 0; i < state->nmdesc; i++)
1780 mr_len += be32_to_cpu(req->indirect_desc[i].len);
1781 if (desc_len != scsi_bufflen(req->scmnd) ||
1782 mr_len > scsi_bufflen(req->scmnd))
1783 pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1784 scsi_bufflen(req->scmnd), desc_len, mr_len,
1785 state->ndesc, state->nmdesc);
1786}
509c5f33 1787
77269cdf
BVA
1788/**
1789 * srp_map_data() - map SCSI data buffer onto an SRP request
1790 * @scmnd: SCSI command to map
1791 * @ch: SRP RDMA channel
1792 * @req: SRP request
1793 *
1794 * Returns the length in bytes of the SRP_CMD IU or a negative value if
882981f4
BVA
1795 * mapping failed. The size of any immediate data is not included in the
1796 * return value.
77269cdf 1797 */
509c07bc 1798static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
aef9ec39
RD
1799 struct srp_request *req)
1800{
509c07bc 1801 struct srp_target_port *target = ch->target;
882981f4 1802 struct scatterlist *scat, *sg;
aef9ec39 1803 struct srp_cmd *cmd = req->cmd->buf;
882981f4 1804 int i, len, nents, count, ret;
85507bcc
RC
1805 struct srp_device *dev;
1806 struct ib_device *ibdev;
8f26c9ff
DD
1807 struct srp_map_state state;
1808 struct srp_indirect_buf *indirect_hdr;
882981f4 1809 u64 data_len;
330179f2
BVA
1810 u32 idb_len, table_len;
1811 __be32 idb_rkey;
8f26c9ff 1812 u8 fmt;
aef9ec39 1813
882981f4
BVA
1814 req->cmd->num_sge = 1;
1815
bb350d1d 1816 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
482fffc4 1817 return sizeof(struct srp_cmd) + cmd->add_cdb_len;
aef9ec39
RD
1818
1819 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1820 scmnd->sc_data_direction != DMA_TO_DEVICE) {
7aa54bd7
DD
1821 shost_printk(KERN_WARNING, target->scsi_host,
1822 PFX "Unhandled data direction %d\n",
1823 scmnd->sc_data_direction);
aef9ec39
RD
1824 return -EINVAL;
1825 }
1826
bb350d1d
FT
1827 nents = scsi_sg_count(scmnd);
1828 scat = scsi_sglist(scmnd);
882981f4 1829 data_len = scsi_bufflen(scmnd);
aef9ec39 1830
05321937 1831 dev = target->srp_host->srp_dev;
85507bcc
RC
1832 ibdev = dev->dev;
1833
1834 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
8f26c9ff
DD
1835 if (unlikely(count == 0))
1836 return -EIO;
f5358a17 1837
882981f4
BVA
1838 if (ch->use_imm_data &&
1839 count <= SRP_MAX_IMM_SGE &&
1840 SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len &&
1841 scmnd->sc_data_direction == DMA_TO_DEVICE) {
1842 struct srp_imm_buf *buf;
1843 struct ib_sge *sge = &req->cmd->sge[1];
1844
1845 fmt = SRP_DATA_DESC_IMM;
1846 len = SRP_IMM_DATA_OFFSET;
1847 req->nmdesc = 0;
1848 buf = (void *)cmd->add_data + cmd->add_cdb_len;
1849 buf->len = cpu_to_be32(data_len);
1850 WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len);
1851 for_each_sg(scat, sg, count, i) {
a163afc8
BVA
1852 sge[i].addr = sg_dma_address(sg);
1853 sge[i].length = sg_dma_len(sg);
882981f4
BVA
1854 sge[i].lkey = target->lkey;
1855 }
1856 req->cmd->num_sge += count;
1857 goto map_complete;
1858 }
1859
f5358a17 1860 fmt = SRP_DATA_DESC_DIRECT;
482fffc4
BVA
1861 len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1862 sizeof(struct srp_direct_buf);
aef9ec39 1863
cee687b6 1864 if (count == 1 && target->global_rkey) {
f5358a17
RD
1865 /*
1866 * The midlayer only generated a single gather/scatter
1867 * entry, or DMA mapping coalesced everything to a
1868 * single entry. So a direct descriptor along with
1869 * the DMA MR suffices.
1870 */
482fffc4 1871 struct srp_direct_buf *buf;
aef9ec39 1872
482fffc4 1873 buf = (void *)cmd->add_data + cmd->add_cdb_len;
a163afc8 1874 buf->va = cpu_to_be64(sg_dma_address(scat));
cee687b6 1875 buf->key = cpu_to_be32(target->global_rkey);
a163afc8 1876 buf->len = cpu_to_be32(sg_dma_len(scat));
8f26c9ff 1877
52ede08f 1878 req->nmdesc = 0;
8f26c9ff
DD
1879 goto map_complete;
1880 }
1881
5cfb1782
BVA
1882 /*
1883 * We have more than one scatter/gather entry, so build our indirect
1884 * descriptor table, trying to merge as many entries as we can.
8f26c9ff 1885 */
482fffc4 1886 indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len;
8f26c9ff 1887
c07d424d
DD
1888 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1889 target->indirect_size, DMA_TO_DEVICE);
1890
8f26c9ff 1891 memset(&state, 0, sizeof(state));
9edba790 1892 state.desc = req->indirect_desc;
26630e8a 1893 if (dev->use_fast_reg)
e012f363 1894 ret = srp_map_sg_fr(&state, ch, req, scat, count);
26630e8a 1895 else if (dev->use_fmr)
e012f363 1896 ret = srp_map_sg_fmr(&state, ch, req, scat, count);
26630e8a 1897 else
e012f363
BVA
1898 ret = srp_map_sg_dma(&state, ch, req, scat, count);
1899 req->nmdesc = state.nmdesc;
1900 if (ret < 0)
1901 goto unmap;
cf368713 1902
509c5f33
BVA
1903 {
1904 DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1905 "Memory mapping consistency check");
1a1faf7a 1906 if (DYNAMIC_DEBUG_BRANCH(ddm))
509c5f33
BVA
1907 srp_check_mapping(&state, ch, req, scat, count);
1908 }
cf368713 1909
c07d424d
DD
1910 /* We've mapped the request, now pull as much of the indirect
1911 * descriptor table as we can into the command buffer. If this
1912 * target is not using an external indirect table, we are
1913 * guaranteed to fit into the command, as the SCSI layer won't
1914 * give us more S/G entries than we allow.
8f26c9ff 1915 */
8f26c9ff 1916 if (state.ndesc == 1) {
5cfb1782
BVA
1917 /*
1918 * Memory registration collapsed the sg-list into one entry,
8f26c9ff
DD
1919 * so use a direct descriptor.
1920 */
482fffc4 1921 struct srp_direct_buf *buf;
cf368713 1922
482fffc4 1923 buf = (void *)cmd->add_data + cmd->add_cdb_len;
c07d424d 1924 *buf = req->indirect_desc[0];
8f26c9ff 1925 goto map_complete;
aef9ec39
RD
1926 }
1927
c07d424d
DD
1928 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1929 !target->allow_ext_sg)) {
1930 shost_printk(KERN_ERR, target->scsi_host,
1931 "Could not fit S/G list into SRP_CMD\n");
e012f363
BVA
1932 ret = -EIO;
1933 goto unmap;
c07d424d
DD
1934 }
1935
1936 count = min(state.ndesc, target->cmd_sg_cnt);
8f26c9ff 1937 table_len = state.ndesc * sizeof (struct srp_direct_buf);
330179f2 1938 idb_len = sizeof(struct srp_indirect_buf) + table_len;
8f26c9ff
DD
1939
1940 fmt = SRP_DATA_DESC_INDIRECT;
482fffc4
BVA
1941 len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1942 sizeof(struct srp_indirect_buf);
c07d424d 1943 len += count * sizeof (struct srp_direct_buf);
8f26c9ff 1944
c07d424d
DD
1945 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1946 count * sizeof (struct srp_direct_buf));
8f26c9ff 1947
cee687b6 1948 if (!target->global_rkey) {
330179f2
BVA
1949 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1950 idb_len, &idb_rkey);
1951 if (ret < 0)
e012f363 1952 goto unmap;
330179f2
BVA
1953 req->nmdesc++;
1954 } else {
cee687b6 1955 idb_rkey = cpu_to_be32(target->global_rkey);
330179f2
BVA
1956 }
1957
c07d424d 1958 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
330179f2 1959 indirect_hdr->table_desc.key = idb_rkey;
8f26c9ff
DD
1960 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1961 indirect_hdr->len = cpu_to_be32(state.total_len);
1962
1963 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
c07d424d 1964 cmd->data_out_desc_cnt = count;
8f26c9ff 1965 else
c07d424d
DD
1966 cmd->data_in_desc_cnt = count;
1967
1968 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1969 DMA_TO_DEVICE);
8f26c9ff
DD
1970
1971map_complete:
aef9ec39
RD
1972 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1973 cmd->buf_fmt = fmt << 4;
1974 else
1975 cmd->buf_fmt = fmt;
1976
aef9ec39 1977 return len;
e012f363
BVA
1978
1979unmap:
1980 srp_unmap_data(scmnd, ch, req);
ffc548bb
BVA
1981 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1982 ret = -E2BIG;
e012f363 1983 return ret;
aef9ec39
RD
1984}
1985
76c75b25
BVA
1986/*
1987 * Return an IU and possible credit to the free pool
1988 */
509c07bc 1989static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
76c75b25
BVA
1990 enum srp_iu_type iu_type)
1991{
1992 unsigned long flags;
1993
509c07bc
BVA
1994 spin_lock_irqsave(&ch->lock, flags);
1995 list_add(&iu->list, &ch->free_tx);
76c75b25 1996 if (iu_type != SRP_IU_RSP)
509c07bc
BVA
1997 ++ch->req_lim;
1998 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25
BVA
1999}
2000
05a1d750 2001/*
509c07bc 2002 * Must be called with ch->lock held to protect req_lim and free_tx.
e9684678 2003 * If IU is not sent, it must be returned using srp_put_tx_iu().
05a1d750
DD
2004 *
2005 * Note:
2006 * An upper limit for the number of allocated information units for each
2007 * request type is:
2008 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
2009 * more than Scsi_Host.can_queue requests.
2010 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
2011 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
2012 * one unanswered SRP request to an initiator.
2013 */
509c07bc 2014static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
05a1d750
DD
2015 enum srp_iu_type iu_type)
2016{
509c07bc 2017 struct srp_target_port *target = ch->target;
05a1d750
DD
2018 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
2019 struct srp_iu *iu;
2020
93c76dbb
BVA
2021 lockdep_assert_held(&ch->lock);
2022
1dc7b1f1 2023 ib_process_cq_direct(ch->send_cq, -1);
05a1d750 2024
509c07bc 2025 if (list_empty(&ch->free_tx))
05a1d750
DD
2026 return NULL;
2027
2028 /* Initiator responses to target requests do not consume credits */
76c75b25 2029 if (iu_type != SRP_IU_RSP) {
509c07bc 2030 if (ch->req_lim <= rsv) {
76c75b25
BVA
2031 ++target->zero_req_lim;
2032 return NULL;
2033 }
2034
509c07bc 2035 --ch->req_lim;
05a1d750
DD
2036 }
2037
509c07bc 2038 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
76c75b25 2039 list_del(&iu->list);
05a1d750
DD
2040 return iu;
2041}
2042
9294000d
BVA
2043/*
2044 * Note: if this function is called from inside ib_drain_sq() then it will
2045 * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
2046 * with status IB_WC_SUCCESS then that's a bug.
2047 */
1dc7b1f1
CH
2048static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
2049{
2050 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2051 struct srp_rdma_ch *ch = cq->cq_context;
2052
2053 if (unlikely(wc->status != IB_WC_SUCCESS)) {
2054 srp_handle_qp_err(cq, wc, "SEND");
2055 return;
2056 }
2057
93c76dbb
BVA
2058 lockdep_assert_held(&ch->lock);
2059
1dc7b1f1
CH
2060 list_add(&iu->list, &ch->free_tx);
2061}
2062
882981f4
BVA
2063/**
2064 * srp_post_send() - send an SRP information unit
2065 * @ch: RDMA channel over which to send the information unit.
2066 * @iu: Information unit to send.
2067 * @len: Length of the information unit excluding immediate data.
2068 */
509c07bc 2069static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
05a1d750 2070{
509c07bc 2071 struct srp_target_port *target = ch->target;
71347b0c 2072 struct ib_send_wr wr;
05a1d750 2073
882981f4
BVA
2074 if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE))
2075 return -EINVAL;
2076
2077 iu->sge[0].addr = iu->dma;
2078 iu->sge[0].length = len;
2079 iu->sge[0].lkey = target->lkey;
05a1d750 2080
1dc7b1f1
CH
2081 iu->cqe.done = srp_send_done;
2082
05a1d750 2083 wr.next = NULL;
1dc7b1f1 2084 wr.wr_cqe = &iu->cqe;
882981f4
BVA
2085 wr.sg_list = &iu->sge[0];
2086 wr.num_sge = iu->num_sge;
05a1d750
DD
2087 wr.opcode = IB_WR_SEND;
2088 wr.send_flags = IB_SEND_SIGNALED;
2089
71347b0c 2090 return ib_post_send(ch->qp, &wr, NULL);
05a1d750
DD
2091}
2092
509c07bc 2093static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
c996bb47 2094{
509c07bc 2095 struct srp_target_port *target = ch->target;
71347b0c 2096 struct ib_recv_wr wr;
dcb4cb85 2097 struct ib_sge list;
c996bb47
BVA
2098
2099 list.addr = iu->dma;
2100 list.length = iu->size;
9af76271 2101 list.lkey = target->lkey;
c996bb47 2102
1dc7b1f1
CH
2103 iu->cqe.done = srp_recv_done;
2104
c996bb47 2105 wr.next = NULL;
1dc7b1f1 2106 wr.wr_cqe = &iu->cqe;
c996bb47
BVA
2107 wr.sg_list = &list;
2108 wr.num_sge = 1;
2109
71347b0c 2110 return ib_post_recv(ch->qp, &wr, NULL);
c996bb47
BVA
2111}
2112
509c07bc 2113static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
aef9ec39 2114{
509c07bc 2115 struct srp_target_port *target = ch->target;
aef9ec39
RD
2116 struct srp_request *req;
2117 struct scsi_cmnd *scmnd;
2118 unsigned long flags;
aef9ec39 2119
aef9ec39 2120 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
509c07bc
BVA
2121 spin_lock_irqsave(&ch->lock, flags);
2122 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
0a6fdbde
BVA
2123 if (rsp->tag == ch->tsk_mgmt_tag) {
2124 ch->tsk_mgmt_status = -1;
2125 if (be32_to_cpu(rsp->resp_data_len) >= 4)
2126 ch->tsk_mgmt_status = rsp->data[3];
2127 complete(&ch->tsk_mgmt_done);
2128 } else {
2129 shost_printk(KERN_ERR, target->scsi_host,
2130 "Received tsk mgmt response too late for tag %#llx\n",
2131 rsp->tag);
2132 }
509c07bc 2133 spin_unlock_irqrestore(&ch->lock, flags);
aef9ec39 2134 } else {
77f2c1a4 2135 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
6cb72bc1 2136 if (scmnd && scmnd->host_scribble) {
77f2c1a4
BVA
2137 req = (void *)scmnd->host_scribble;
2138 scmnd = srp_claim_req(ch, req, NULL, scmnd);
6cb72bc1
BVA
2139 } else {
2140 scmnd = NULL;
77f2c1a4 2141 }
22032991 2142 if (!scmnd) {
7aa54bd7 2143 shost_printk(KERN_ERR, target->scsi_host,
d92c0da7
BVA
2144 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
2145 rsp->tag, ch - target->ch, ch->qp->qp_num);
22032991 2146
509c07bc
BVA
2147 spin_lock_irqsave(&ch->lock, flags);
2148 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
2149 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
2150
2151 return;
2152 }
aef9ec39
RD
2153 scmnd->result = rsp->status;
2154
2155 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
2156 memcpy(scmnd->sense_buffer, rsp->data +
2157 be32_to_cpu(rsp->resp_data_len),
2158 min_t(int, be32_to_cpu(rsp->sense_data_len),
2159 SCSI_SENSE_BUFFERSIZE));
2160 }
2161
e714531a 2162 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
bb350d1d 2163 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
e714531a
BVA
2164 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
2165 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
2166 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
2167 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
2168 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
2169 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
aef9ec39 2170
509c07bc 2171 srp_free_req(ch, req, scmnd,
22032991
BVA
2172 be32_to_cpu(rsp->req_lim_delta));
2173
f8b6e31e
DD
2174 scmnd->host_scribble = NULL;
2175 scmnd->scsi_done(scmnd);
aef9ec39 2176 }
aef9ec39
RD
2177}
2178
509c07bc 2179static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
bb12588a
DD
2180 void *rsp, int len)
2181{
509c07bc 2182 struct srp_target_port *target = ch->target;
76c75b25 2183 struct ib_device *dev = target->srp_host->srp_dev->dev;
bb12588a
DD
2184 unsigned long flags;
2185 struct srp_iu *iu;
76c75b25 2186 int err;
bb12588a 2187
509c07bc
BVA
2188 spin_lock_irqsave(&ch->lock, flags);
2189 ch->req_lim += req_delta;
2190 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
2191 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25 2192
bb12588a
DD
2193 if (!iu) {
2194 shost_printk(KERN_ERR, target->scsi_host, PFX
2195 "no IU available to send response\n");
76c75b25 2196 return 1;
bb12588a
DD
2197 }
2198
882981f4 2199 iu->num_sge = 1;
bb12588a
DD
2200 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
2201 memcpy(iu->buf, rsp, len);
2202 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
2203
509c07bc 2204 err = srp_post_send(ch, iu, len);
76c75b25 2205 if (err) {
bb12588a
DD
2206 shost_printk(KERN_ERR, target->scsi_host, PFX
2207 "unable to post response: %d\n", err);
509c07bc 2208 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
76c75b25 2209 }
bb12588a 2210
bb12588a
DD
2211 return err;
2212}
2213
509c07bc 2214static void srp_process_cred_req(struct srp_rdma_ch *ch,
bb12588a
DD
2215 struct srp_cred_req *req)
2216{
2217 struct srp_cred_rsp rsp = {
2218 .opcode = SRP_CRED_RSP,
2219 .tag = req->tag,
2220 };
2221 s32 delta = be32_to_cpu(req->req_lim_delta);
2222
509c07bc
BVA
2223 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2224 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
bb12588a
DD
2225 "problems processing SRP_CRED_REQ\n");
2226}
2227
509c07bc 2228static void srp_process_aer_req(struct srp_rdma_ch *ch,
bb12588a
DD
2229 struct srp_aer_req *req)
2230{
509c07bc 2231 struct srp_target_port *target = ch->target;
bb12588a
DD
2232 struct srp_aer_rsp rsp = {
2233 .opcode = SRP_AER_RSP,
2234 .tag = req->tag,
2235 };
2236 s32 delta = be32_to_cpu(req->req_lim_delta);
2237
2238 shost_printk(KERN_ERR, target->scsi_host, PFX
985aa495 2239 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
bb12588a 2240
509c07bc 2241 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
bb12588a
DD
2242 shost_printk(KERN_ERR, target->scsi_host, PFX
2243 "problems processing SRP_AER_REQ\n");
2244}
2245
1dc7b1f1 2246static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
aef9ec39 2247{
1dc7b1f1
CH
2248 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2249 struct srp_rdma_ch *ch = cq->cq_context;
509c07bc 2250 struct srp_target_port *target = ch->target;
dcb4cb85 2251 struct ib_device *dev = target->srp_host->srp_dev->dev;
c996bb47 2252 int res;
aef9ec39
RD
2253 u8 opcode;
2254
1dc7b1f1
CH
2255 if (unlikely(wc->status != IB_WC_SUCCESS)) {
2256 srp_handle_qp_err(cq, wc, "RECV");
2257 return;
2258 }
2259
509c07bc 2260 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 2261 DMA_FROM_DEVICE);
aef9ec39
RD
2262
2263 opcode = *(u8 *) iu->buf;
2264
2265 if (0) {
7aa54bd7
DD
2266 shost_printk(KERN_ERR, target->scsi_host,
2267 PFX "recv completion, opcode 0x%02x\n", opcode);
7a700811
BVA
2268 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2269 iu->buf, wc->byte_len, true);
aef9ec39
RD
2270 }
2271
2272 switch (opcode) {
2273 case SRP_RSP:
509c07bc 2274 srp_process_rsp(ch, iu->buf);
aef9ec39
RD
2275 break;
2276
bb12588a 2277 case SRP_CRED_REQ:
509c07bc 2278 srp_process_cred_req(ch, iu->buf);
bb12588a
DD
2279 break;
2280
2281 case SRP_AER_REQ:
509c07bc 2282 srp_process_aer_req(ch, iu->buf);
bb12588a
DD
2283 break;
2284
aef9ec39
RD
2285 case SRP_T_LOGOUT:
2286 /* XXX Handle target logout */
7aa54bd7
DD
2287 shost_printk(KERN_WARNING, target->scsi_host,
2288 PFX "Got target logout request\n");
aef9ec39
RD
2289 break;
2290
2291 default:
7aa54bd7
DD
2292 shost_printk(KERN_WARNING, target->scsi_host,
2293 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
aef9ec39
RD
2294 break;
2295 }
2296
509c07bc 2297 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 2298 DMA_FROM_DEVICE);
c996bb47 2299
509c07bc 2300 res = srp_post_recv(ch, iu);
c996bb47
BVA
2301 if (res != 0)
2302 shost_printk(KERN_ERR, target->scsi_host,
2303 PFX "Recv failed with error code %d\n", res);
aef9ec39
RD
2304}
2305
c1120f89
BVA
2306/**
2307 * srp_tl_err_work() - handle a transport layer error
af24663b 2308 * @work: Work structure embedded in an SRP target port.
c1120f89
BVA
2309 *
2310 * Note: This function may get invoked before the rport has been created,
2311 * hence the target->rport test.
2312 */
2313static void srp_tl_err_work(struct work_struct *work)
2314{
2315 struct srp_target_port *target;
2316
2317 target = container_of(work, struct srp_target_port, tl_err_work);
2318 if (target->rport)
2319 srp_start_tl_fail_timers(target->rport);
2320}
2321
1dc7b1f1
CH
2322static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2323 const char *opname)
948d1e88 2324{
1dc7b1f1 2325 struct srp_rdma_ch *ch = cq->cq_context;
7dad6b2e
BVA
2326 struct srp_target_port *target = ch->target;
2327
c014c8cd 2328 if (ch->connected && !target->qp_in_error) {
1dc7b1f1
CH
2329 shost_printk(KERN_ERR, target->scsi_host,
2330 PFX "failed %s status %s (%d) for CQE %p\n",
2331 opname, ib_wc_status_msg(wc->status), wc->status,
2332 wc->wr_cqe);
c1120f89 2333 queue_work(system_long_wq, &target->tl_err_work);
4f0af697 2334 }
948d1e88
BVA
2335 target->qp_in_error = true;
2336}
2337
76c75b25 2338static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
aef9ec39 2339{
76c75b25 2340 struct srp_target_port *target = host_to_target(shost);
509c07bc 2341 struct srp_rdma_ch *ch;
aef9ec39
RD
2342 struct srp_request *req;
2343 struct srp_iu *iu;
2344 struct srp_cmd *cmd;
85507bcc 2345 struct ib_device *dev;
76c75b25 2346 unsigned long flags;
77f2c1a4
BVA
2347 u32 tag;
2348 u16 idx;
d1b4289e 2349 int len, ret;
aef9ec39 2350
d1b4289e
BVA
2351 scmnd->result = srp_chkready(target->rport);
2352 if (unlikely(scmnd->result))
2353 goto err;
2ce19e72 2354
77f2c1a4
BVA
2355 WARN_ON_ONCE(scmnd->request->tag < 0);
2356 tag = blk_mq_unique_tag(scmnd->request);
d92c0da7 2357 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
77f2c1a4
BVA
2358 idx = blk_mq_unique_tag_to_tag(tag);
2359 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2360 dev_name(&shost->shost_gendev), tag, idx,
2361 target->req_ring_size);
509c07bc
BVA
2362
2363 spin_lock_irqsave(&ch->lock, flags);
2364 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
509c07bc 2365 spin_unlock_irqrestore(&ch->lock, flags);
aef9ec39 2366
77f2c1a4
BVA
2367 if (!iu)
2368 goto err;
2369
2370 req = &ch->req_ring[idx];
05321937 2371 dev = target->srp_host->srp_dev->dev;
513d5647 2372 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len,
85507bcc 2373 DMA_TO_DEVICE);
aef9ec39 2374
f8b6e31e 2375 scmnd->host_scribble = (void *) req;
aef9ec39
RD
2376
2377 cmd = iu->buf;
2378 memset(cmd, 0, sizeof *cmd);
2379
2380 cmd->opcode = SRP_CMD;
985aa495 2381 int_to_scsilun(scmnd->device->lun, &cmd->lun);
77f2c1a4 2382 cmd->tag = tag;
aef9ec39 2383 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
482fffc4
BVA
2384 if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) {
2385 cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb),
2386 4);
2387 if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN))
2388 goto err_iu;
2389 }
aef9ec39 2390
aef9ec39
RD
2391 req->scmnd = scmnd;
2392 req->cmd = iu;
aef9ec39 2393
509c07bc 2394 len = srp_map_data(scmnd, ch, req);
aef9ec39 2395 if (len < 0) {
7aa54bd7 2396 shost_printk(KERN_ERR, target->scsi_host,
d1b4289e
BVA
2397 PFX "Failed to map data (%d)\n", len);
2398 /*
2399 * If we ran out of memory descriptors (-ENOMEM) because an
2400 * application is queuing many requests with more than
52ede08f 2401 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
d1b4289e
BVA
2402 * to reduce queue depth temporarily.
2403 */
2404 scmnd->result = len == -ENOMEM ?
2405 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
76c75b25 2406 goto err_iu;
aef9ec39
RD
2407 }
2408
513d5647 2409 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len,
85507bcc 2410 DMA_TO_DEVICE);
aef9ec39 2411
509c07bc 2412 if (srp_post_send(ch, iu, len)) {
7aa54bd7 2413 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2ee00f6a 2414 scmnd->result = DID_ERROR << 16;
aef9ec39
RD
2415 goto err_unmap;
2416 }
2417
fd561412 2418 return 0;
aef9ec39
RD
2419
2420err_unmap:
509c07bc 2421 srp_unmap_data(scmnd, ch, req);
aef9ec39 2422
76c75b25 2423err_iu:
509c07bc 2424 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
76c75b25 2425
024ca901
BVA
2426 /*
2427 * Avoid that the loops that iterate over the request ring can
2428 * encounter a dangling SCSI command pointer.
2429 */
2430 req->scmnd = NULL;
2431
d1b4289e
BVA
2432err:
2433 if (scmnd->result) {
2434 scmnd->scsi_done(scmnd);
2435 ret = 0;
2436 } else {
2437 ret = SCSI_MLQUEUE_HOST_BUSY;
2438 }
a95cadb9 2439
fd561412 2440 return ret;
aef9ec39
RD
2441}
2442
4d73f95f
BVA
2443/*
2444 * Note: the resources allocated in this function are freed in
509c07bc 2445 * srp_free_ch_ib().
4d73f95f 2446 */
509c07bc 2447static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
aef9ec39 2448{
509c07bc 2449 struct srp_target_port *target = ch->target;
aef9ec39
RD
2450 int i;
2451
509c07bc
BVA
2452 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2453 GFP_KERNEL);
2454 if (!ch->rx_ring)
4d73f95f 2455 goto err_no_ring;
509c07bc
BVA
2456 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2457 GFP_KERNEL);
2458 if (!ch->tx_ring)
4d73f95f
BVA
2459 goto err_no_ring;
2460
2461 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2462 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2463 ch->max_ti_iu_len,
2464 GFP_KERNEL, DMA_FROM_DEVICE);
2465 if (!ch->rx_ring[i])
aef9ec39
RD
2466 goto err;
2467 }
2468
4d73f95f 2469 for (i = 0; i < target->queue_size; ++i) {
509c07bc 2470 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
513d5647 2471 ch->max_it_iu_len,
509c07bc
BVA
2472 GFP_KERNEL, DMA_TO_DEVICE);
2473 if (!ch->tx_ring[i])
aef9ec39 2474 goto err;
dcb4cb85 2475
509c07bc 2476 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
aef9ec39
RD
2477 }
2478
2479 return 0;
2480
2481err:
4d73f95f 2482 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2483 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2484 srp_free_iu(target->srp_host, ch->tx_ring[i]);
aef9ec39
RD
2485 }
2486
4d73f95f
BVA
2487
2488err_no_ring:
509c07bc
BVA
2489 kfree(ch->tx_ring);
2490 ch->tx_ring = NULL;
2491 kfree(ch->rx_ring);
2492 ch->rx_ring = NULL;
4d73f95f 2493
aef9ec39
RD
2494 return -ENOMEM;
2495}
2496
c9b03c1a
BVA
2497static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2498{
2499 uint64_t T_tr_ns, max_compl_time_ms;
2500 uint32_t rq_tmo_jiffies;
2501
2502 /*
2503 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2504 * table 91), both the QP timeout and the retry count have to be set
2505 * for RC QP's during the RTR to RTS transition.
2506 */
2507 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2508 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2509
2510 /*
2511 * Set target->rq_tmo_jiffies to one second more than the largest time
2512 * it can take before an error completion is generated. See also
2513 * C9-140..142 in the IBTA spec for more information about how to
2514 * convert the QP Local ACK Timeout value to nanoseconds.
2515 */
2516 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2517 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2518 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2519 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2520
2521 return rq_tmo_jiffies;
2522}
2523
961e0be8 2524static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
e6300cbd 2525 const struct srp_login_rsp *lrsp,
509c07bc 2526 struct srp_rdma_ch *ch)
961e0be8 2527{
509c07bc 2528 struct srp_target_port *target = ch->target;
961e0be8
DD
2529 struct ib_qp_attr *qp_attr = NULL;
2530 int attr_mask = 0;
19f31343 2531 int ret = 0;
961e0be8
DD
2532 int i;
2533
2534 if (lrsp->opcode == SRP_LOGIN_RSP) {
509c07bc
BVA
2535 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2536 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
882981f4
BVA
2537 ch->use_imm_data = lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP;
2538 ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
2539 ch->use_imm_data);
513d5647
BVA
2540 WARN_ON_ONCE(ch->max_it_iu_len >
2541 be32_to_cpu(lrsp->max_it_iu_len));
961e0be8 2542
882981f4
BVA
2543 if (ch->use_imm_data)
2544 shost_printk(KERN_DEBUG, target->scsi_host,
2545 PFX "using immediate data\n");
961e0be8
DD
2546
2547 /*
2548 * Reserve credits for task management so we don't
2549 * bounce requests back to the SCSI mid-layer.
2550 */
2551 target->scsi_host->can_queue
509c07bc 2552 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
961e0be8 2553 target->scsi_host->can_queue);
4d73f95f
BVA
2554 target->scsi_host->cmd_per_lun
2555 = min_t(int, target->scsi_host->can_queue,
2556 target->scsi_host->cmd_per_lun);
961e0be8
DD
2557 } else {
2558 shost_printk(KERN_WARNING, target->scsi_host,
2559 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2560 ret = -ECONNRESET;
2561 goto error;
2562 }
2563
509c07bc
BVA
2564 if (!ch->rx_ring) {
2565 ret = srp_alloc_iu_bufs(ch);
961e0be8
DD
2566 if (ret)
2567 goto error;
2568 }
2569
4d73f95f 2570 for (i = 0; i < target->queue_size; i++) {
509c07bc
BVA
2571 struct srp_iu *iu = ch->rx_ring[i];
2572
2573 ret = srp_post_recv(ch, iu);
961e0be8 2574 if (ret)
19f31343 2575 goto error;
961e0be8
DD
2576 }
2577
19f31343
BVA
2578 if (!target->using_rdma_cm) {
2579 ret = -ENOMEM;
2580 qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
2581 if (!qp_attr)
2582 goto error;
2583
2584 qp_attr->qp_state = IB_QPS_RTR;
2585 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2586 if (ret)
2587 goto error_free;
2588
2589 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2590 if (ret)
2591 goto error_free;
961e0be8 2592
19f31343
BVA
2593 qp_attr->qp_state = IB_QPS_RTS;
2594 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2595 if (ret)
2596 goto error_free;
c9b03c1a 2597
19f31343 2598 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
961e0be8 2599
19f31343
BVA
2600 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2601 if (ret)
2602 goto error_free;
2603
2604 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2605 }
961e0be8
DD
2606
2607error_free:
2608 kfree(qp_attr);
2609
2610error:
509c07bc 2611 ch->status = ret;
961e0be8
DD
2612}
2613
19f31343 2614static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
e7ff98ae 2615 const struct ib_cm_event *event,
19f31343 2616 struct srp_rdma_ch *ch)
aef9ec39 2617{
509c07bc 2618 struct srp_target_port *target = ch->target;
7aa54bd7 2619 struct Scsi_Host *shost = target->scsi_host;
aef9ec39
RD
2620 struct ib_class_port_info *cpi;
2621 int opcode;
19f31343 2622 u16 dlid;
aef9ec39
RD
2623
2624 switch (event->param.rej_rcvd.reason) {
2625 case IB_CM_REJ_PORT_CM_REDIRECT:
2626 cpi = event->param.rej_rcvd.ari;
19f31343
BVA
2627 dlid = be16_to_cpu(cpi->redirect_lid);
2628 sa_path_set_dlid(&ch->ib_cm.path, dlid);
2629 ch->ib_cm.path.pkey = cpi->redirect_pkey;
aef9ec39 2630 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
19f31343 2631 memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16);
aef9ec39 2632
19f31343 2633 ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
aef9ec39
RD
2634 break;
2635
2636 case IB_CM_REJ_PORT_REDIRECT:
5d7cbfd6 2637 if (srp_target_is_topspin(target)) {
19f31343
BVA
2638 union ib_gid *dgid = &ch->ib_cm.path.dgid;
2639
aef9ec39
RD
2640 /*
2641 * Topspin/Cisco SRP gateways incorrectly send
2642 * reject reason code 25 when they mean 24
2643 * (port redirect).
2644 */
19f31343 2645 memcpy(dgid->raw, event->param.rej_rcvd.ari, 16);
aef9ec39 2646
7aa54bd7
DD
2647 shost_printk(KERN_DEBUG, shost,
2648 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
19f31343
BVA
2649 be64_to_cpu(dgid->global.subnet_prefix),
2650 be64_to_cpu(dgid->global.interface_id));
aef9ec39 2651
509c07bc 2652 ch->status = SRP_PORT_REDIRECT;
aef9ec39 2653 } else {
7aa54bd7
DD
2654 shost_printk(KERN_WARNING, shost,
2655 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
509c07bc 2656 ch->status = -ECONNRESET;
aef9ec39
RD
2657 }
2658 break;
2659
2660 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
7aa54bd7
DD
2661 shost_printk(KERN_WARNING, shost,
2662 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
509c07bc 2663 ch->status = -ECONNRESET;
aef9ec39
RD
2664 break;
2665
2666 case IB_CM_REJ_CONSUMER_DEFINED:
2667 opcode = *(u8 *) event->private_data;
2668 if (opcode == SRP_LOGIN_REJ) {
2669 struct srp_login_rej *rej = event->private_data;
2670 u32 reason = be32_to_cpu(rej->reason);
2671
2672 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
7aa54bd7
DD
2673 shost_printk(KERN_WARNING, shost,
2674 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
aef9ec39 2675 else
e7ffde01
BVA
2676 shost_printk(KERN_WARNING, shost, PFX
2677 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
747fe000 2678 target->sgid.raw,
19f31343
BVA
2679 target->ib_cm.orig_dgid.raw,
2680 reason);
aef9ec39 2681 } else
7aa54bd7
DD
2682 shost_printk(KERN_WARNING, shost,
2683 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2684 " opcode 0x%02x\n", opcode);
509c07bc 2685 ch->status = -ECONNRESET;
aef9ec39
RD
2686 break;
2687
9fe4bcf4
DD
2688 case IB_CM_REJ_STALE_CONN:
2689 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
509c07bc 2690 ch->status = SRP_STALE_CONN;
9fe4bcf4
DD
2691 break;
2692
aef9ec39 2693 default:
7aa54bd7
DD
2694 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2695 event->param.rej_rcvd.reason);
509c07bc 2696 ch->status = -ECONNRESET;
aef9ec39
RD
2697 }
2698}
2699
e7ff98ae
PP
2700static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
2701 const struct ib_cm_event *event)
aef9ec39 2702{
509c07bc
BVA
2703 struct srp_rdma_ch *ch = cm_id->context;
2704 struct srp_target_port *target = ch->target;
aef9ec39 2705 int comp = 0;
aef9ec39
RD
2706
2707 switch (event->event) {
2708 case IB_CM_REQ_ERROR:
7aa54bd7
DD
2709 shost_printk(KERN_DEBUG, target->scsi_host,
2710 PFX "Sending CM REQ failed\n");
aef9ec39 2711 comp = 1;
509c07bc 2712 ch->status = -ECONNRESET;
aef9ec39
RD
2713 break;
2714
2715 case IB_CM_REP_RECEIVED:
2716 comp = 1;
509c07bc 2717 srp_cm_rep_handler(cm_id, event->private_data, ch);
aef9ec39
RD
2718 break;
2719
2720 case IB_CM_REJ_RECEIVED:
7aa54bd7 2721 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
aef9ec39
RD
2722 comp = 1;
2723
19f31343 2724 srp_ib_cm_rej_handler(cm_id, event, ch);
aef9ec39
RD
2725 break;
2726
b7ac4ab4 2727 case IB_CM_DREQ_RECEIVED:
7aa54bd7
DD
2728 shost_printk(KERN_WARNING, target->scsi_host,
2729 PFX "DREQ received - connection closed\n");
c014c8cd 2730 ch->connected = false;
b7ac4ab4 2731 if (ib_send_cm_drep(cm_id, NULL, 0))
7aa54bd7
DD
2732 shost_printk(KERN_ERR, target->scsi_host,
2733 PFX "Sending CM DREP failed\n");
c1120f89 2734 queue_work(system_long_wq, &target->tl_err_work);
aef9ec39
RD
2735 break;
2736
2737 case IB_CM_TIMEWAIT_EXIT:
7aa54bd7
DD
2738 shost_printk(KERN_ERR, target->scsi_host,
2739 PFX "connection closed\n");
ac72d766 2740 comp = 1;
aef9ec39 2741
509c07bc 2742 ch->status = 0;
aef9ec39
RD
2743 break;
2744
b7ac4ab4
IR
2745 case IB_CM_MRA_RECEIVED:
2746 case IB_CM_DREQ_ERROR:
2747 case IB_CM_DREP_RECEIVED:
2748 break;
2749
aef9ec39 2750 default:
7aa54bd7
DD
2751 shost_printk(KERN_WARNING, target->scsi_host,
2752 PFX "Unhandled CM event %d\n", event->event);
aef9ec39
RD
2753 break;
2754 }
2755
2756 if (comp)
509c07bc 2757 complete(&ch->done);
aef9ec39 2758
aef9ec39
RD
2759 return 0;
2760}
2761
19f31343
BVA
2762static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
2763 struct rdma_cm_event *event)
2764{
2765 struct srp_target_port *target = ch->target;
2766 struct Scsi_Host *shost = target->scsi_host;
2767 int opcode;
2768
2769 switch (event->status) {
2770 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2771 shost_printk(KERN_WARNING, shost,
2772 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2773 ch->status = -ECONNRESET;
2774 break;
2775
2776 case IB_CM_REJ_CONSUMER_DEFINED:
2777 opcode = *(u8 *) event->param.conn.private_data;
2778 if (opcode == SRP_LOGIN_REJ) {
2779 struct srp_login_rej *rej =
2780 (struct srp_login_rej *)
2781 event->param.conn.private_data;
2782 u32 reason = be32_to_cpu(rej->reason);
2783
2784 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2785 shost_printk(KERN_WARNING, shost,
2786 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2787 else
2788 shost_printk(KERN_WARNING, shost,
2789 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
2790 } else {
2791 shost_printk(KERN_WARNING, shost,
2792 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n",
2793 opcode);
2794 }
2795 ch->status = -ECONNRESET;
2796 break;
2797
2798 case IB_CM_REJ_STALE_CONN:
2799 shost_printk(KERN_WARNING, shost,
2800 " REJ reason: stale connection\n");
2801 ch->status = SRP_STALE_CONN;
2802 break;
2803
2804 default:
2805 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2806 event->status);
2807 ch->status = -ECONNRESET;
2808 break;
2809 }
2810}
2811
2812static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
2813 struct rdma_cm_event *event)
2814{
2815 struct srp_rdma_ch *ch = cm_id->context;
2816 struct srp_target_port *target = ch->target;
2817 int comp = 0;
2818
2819 switch (event->event) {
2820 case RDMA_CM_EVENT_ADDR_RESOLVED:
2821 ch->status = 0;
2822 comp = 1;
2823 break;
2824
2825 case RDMA_CM_EVENT_ADDR_ERROR:
2826 ch->status = -ENXIO;
2827 comp = 1;
2828 break;
2829
2830 case RDMA_CM_EVENT_ROUTE_RESOLVED:
2831 ch->status = 0;
2832 comp = 1;
2833 break;
2834
2835 case RDMA_CM_EVENT_ROUTE_ERROR:
2836 case RDMA_CM_EVENT_UNREACHABLE:
2837 ch->status = -EHOSTUNREACH;
2838 comp = 1;
2839 break;
2840
2841 case RDMA_CM_EVENT_CONNECT_ERROR:
2842 shost_printk(KERN_DEBUG, target->scsi_host,
2843 PFX "Sending CM REQ failed\n");
2844 comp = 1;
2845 ch->status = -ECONNRESET;
2846 break;
2847
2848 case RDMA_CM_EVENT_ESTABLISHED:
2849 comp = 1;
2850 srp_cm_rep_handler(NULL, event->param.conn.private_data, ch);
2851 break;
2852
2853 case RDMA_CM_EVENT_REJECTED:
2854 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2855 comp = 1;
2856
2857 srp_rdma_cm_rej_handler(ch, event);
2858 break;
2859
2860 case RDMA_CM_EVENT_DISCONNECTED:
2861 if (ch->connected) {
2862 shost_printk(KERN_WARNING, target->scsi_host,
2863 PFX "received DREQ\n");
2864 rdma_disconnect(ch->rdma_cm.cm_id);
2865 comp = 1;
2866 ch->status = 0;
2867 queue_work(system_long_wq, &target->tl_err_work);
2868 }
2869 break;
2870
2871 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2872 shost_printk(KERN_ERR, target->scsi_host,
2873 PFX "connection closed\n");
2874
2875 comp = 1;
2876 ch->status = 0;
2877 break;
2878
2879 default:
2880 shost_printk(KERN_WARNING, target->scsi_host,
2881 PFX "Unhandled CM event %d\n", event->event);
2882 break;
2883 }
2884
2885 if (comp)
2886 complete(&ch->done);
2887
2888 return 0;
2889}
2890
71444b97
JW
2891/**
2892 * srp_change_queue_depth - setting device queue depth
2893 * @sdev: scsi device struct
2894 * @qdepth: requested queue depth
71444b97
JW
2895 *
2896 * Returns queue depth.
2897 */
2898static int
db5ed4df 2899srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
71444b97 2900{
c40ecc12 2901 if (!sdev->tagged_supported)
1e6f2416 2902 qdepth = 1;
db5ed4df 2903 return scsi_change_queue_depth(sdev, qdepth);
71444b97
JW
2904}
2905
985aa495 2906static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
0a6fdbde 2907 u8 func, u8 *status)
aef9ec39 2908{
509c07bc 2909 struct srp_target_port *target = ch->target;
a95cadb9 2910 struct srp_rport *rport = target->rport;
19081f31 2911 struct ib_device *dev = target->srp_host->srp_dev->dev;
aef9ec39
RD
2912 struct srp_iu *iu;
2913 struct srp_tsk_mgmt *tsk_mgmt;
0a6fdbde 2914 int res;
aef9ec39 2915
c014c8cd 2916 if (!ch->connected || target->qp_in_error)
3780d1f0
BVA
2917 return -1;
2918
a95cadb9 2919 /*
509c07bc 2920 * Lock the rport mutex to avoid that srp_create_ch_ib() is
a95cadb9
BVA
2921 * invoked while a task management function is being sent.
2922 */
2923 mutex_lock(&rport->mutex);
509c07bc
BVA
2924 spin_lock_irq(&ch->lock);
2925 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2926 spin_unlock_irq(&ch->lock);
76c75b25 2927
a95cadb9
BVA
2928 if (!iu) {
2929 mutex_unlock(&rport->mutex);
2930
76c75b25 2931 return -1;
a95cadb9 2932 }
aef9ec39 2933
882981f4
BVA
2934 iu->num_sge = 1;
2935
19081f31
DD
2936 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2937 DMA_TO_DEVICE);
aef9ec39
RD
2938 tsk_mgmt = iu->buf;
2939 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2940
2941 tsk_mgmt->opcode = SRP_TSK_MGMT;
985aa495 2942 int_to_scsilun(lun, &tsk_mgmt->lun);
aef9ec39 2943 tsk_mgmt->tsk_mgmt_func = func;
f8b6e31e 2944 tsk_mgmt->task_tag = req_tag;
aef9ec39 2945
0a6fdbde
BVA
2946 spin_lock_irq(&ch->lock);
2947 ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
2948 tsk_mgmt->tag = ch->tsk_mgmt_tag;
2949 spin_unlock_irq(&ch->lock);
2950
2951 init_completion(&ch->tsk_mgmt_done);
2952
19081f31
DD
2953 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2954 DMA_TO_DEVICE);
509c07bc
BVA
2955 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2956 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
a95cadb9
BVA
2957 mutex_unlock(&rport->mutex);
2958
76c75b25
BVA
2959 return -1;
2960 }
0a6fdbde
BVA
2961 res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
2962 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
2963 if (res > 0 && status)
2964 *status = ch->tsk_mgmt_status;
a95cadb9 2965 mutex_unlock(&rport->mutex);
d945e1df 2966
0a6fdbde 2967 WARN_ON_ONCE(res < 0);
aef9ec39 2968
0a6fdbde 2969 return res > 0 ? 0 : -1;
d945e1df
RD
2970}
2971
aef9ec39
RD
2972static int srp_abort(struct scsi_cmnd *scmnd)
2973{
d945e1df 2974 struct srp_target_port *target = host_to_target(scmnd->device->host);
f8b6e31e 2975 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
77f2c1a4 2976 u32 tag;
d92c0da7 2977 u16 ch_idx;
509c07bc 2978 struct srp_rdma_ch *ch;
086f44f5 2979 int ret;
d945e1df 2980
7aa54bd7 2981 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
aef9ec39 2982
d92c0da7 2983 if (!req)
99b6697a 2984 return SUCCESS;
77f2c1a4 2985 tag = blk_mq_unique_tag(scmnd->request);
d92c0da7
BVA
2986 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2987 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2988 return SUCCESS;
2989 ch = &target->ch[ch_idx];
2990 if (!srp_claim_req(ch, req, NULL, scmnd))
2991 return SUCCESS;
2992 shost_printk(KERN_ERR, target->scsi_host,
2993 "Sending SRP abort for tag %#x\n", tag);
77f2c1a4 2994 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
0a6fdbde 2995 SRP_TSK_ABORT_TASK, NULL) == 0)
086f44f5 2996 ret = SUCCESS;
ed9b2264 2997 else if (target->rport->state == SRP_RPORT_LOST)
99e1c139 2998 ret = FAST_IO_FAIL;
086f44f5
BVA
2999 else
3000 ret = FAILED;
e68088e7
BVA
3001 if (ret == SUCCESS) {
3002 srp_free_req(ch, req, scmnd, 0);
3003 scmnd->result = DID_ABORT << 16;
3004 scmnd->scsi_done(scmnd);
3005 }
d945e1df 3006
086f44f5 3007 return ret;
aef9ec39
RD
3008}
3009
3010static int srp_reset_device(struct scsi_cmnd *scmnd)
3011{
d945e1df 3012 struct srp_target_port *target = host_to_target(scmnd->device->host);
d92c0da7 3013 struct srp_rdma_ch *ch;
0a6fdbde 3014 u8 status;
d945e1df 3015
7aa54bd7 3016 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
aef9ec39 3017
d92c0da7 3018 ch = &target->ch[0];
509c07bc 3019 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
0a6fdbde 3020 SRP_TSK_LUN_RESET, &status))
d945e1df 3021 return FAILED;
0a6fdbde 3022 if (status)
d945e1df
RD
3023 return FAILED;
3024
d945e1df 3025 return SUCCESS;
aef9ec39
RD
3026}
3027
3028static int srp_reset_host(struct scsi_cmnd *scmnd)
3029{
3030 struct srp_target_port *target = host_to_target(scmnd->device->host);
aef9ec39 3031
7aa54bd7 3032 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
aef9ec39 3033
ed9b2264 3034 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
aef9ec39
RD
3035}
3036
b0780ee5
BVA
3037static int srp_target_alloc(struct scsi_target *starget)
3038{
3039 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3040 struct srp_target_port *target = host_to_target(shost);
3041
3042 if (target->target_can_queue)
3043 starget->can_queue = target->target_can_queue;
3044 return 0;
3045}
3046
509c5f33
BVA
3047static int srp_slave_alloc(struct scsi_device *sdev)
3048{
3049 struct Scsi_Host *shost = sdev->host;
3050 struct srp_target_port *target = host_to_target(shost);
3051 struct srp_device *srp_dev = target->srp_host->srp_dev;
fbd36818 3052 struct ib_device *ibdev = srp_dev->dev;
509c5f33 3053
fbd36818 3054 if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
509c5f33
BVA
3055 blk_queue_virt_boundary(sdev->request_queue,
3056 ~srp_dev->mr_page_mask);
3057
3058 return 0;
3059}
3060
c9b03c1a
BVA
3061static int srp_slave_configure(struct scsi_device *sdev)
3062{
3063 struct Scsi_Host *shost = sdev->host;
3064 struct srp_target_port *target = host_to_target(shost);
3065 struct request_queue *q = sdev->request_queue;
3066 unsigned long timeout;
3067
3068 if (sdev->type == TYPE_DISK) {
3069 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
3070 blk_queue_rq_timeout(q, timeout);
3071 }
3072
3073 return 0;
3074}
3075
ee959b00
TJ
3076static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
3077 char *buf)
6ecb0c84 3078{
ee959b00 3079 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 3080
45c37cad 3081 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
6ecb0c84
RD
3082}
3083
ee959b00
TJ
3084static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
3085 char *buf)
6ecb0c84 3086{
ee959b00 3087 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 3088
45c37cad 3089 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
6ecb0c84
RD
3090}
3091
ee959b00
TJ
3092static ssize_t show_service_id(struct device *dev,
3093 struct device_attribute *attr, char *buf)
6ecb0c84 3094{
ee959b00 3095 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 3096
19f31343
BVA
3097 if (target->using_rdma_cm)
3098 return -ENOENT;
3099 return sprintf(buf, "0x%016llx\n",
3100 be64_to_cpu(target->ib_cm.service_id));
6ecb0c84
RD
3101}
3102
ee959b00
TJ
3103static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
3104 char *buf)
6ecb0c84 3105{
ee959b00 3106 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 3107
19f31343
BVA
3108 if (target->using_rdma_cm)
3109 return -ENOENT;
3110 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
6ecb0c84
RD
3111}
3112
848b3082
BVA
3113static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
3114 char *buf)
3115{
3116 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3117
747fe000 3118 return sprintf(buf, "%pI6\n", target->sgid.raw);
848b3082
BVA
3119}
3120
ee959b00
TJ
3121static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
3122 char *buf)
6ecb0c84 3123{
ee959b00 3124 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7 3125 struct srp_rdma_ch *ch = &target->ch[0];
6ecb0c84 3126
19f31343
BVA
3127 if (target->using_rdma_cm)
3128 return -ENOENT;
3129 return sprintf(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
6ecb0c84
RD
3130}
3131
ee959b00
TJ
3132static ssize_t show_orig_dgid(struct device *dev,
3133 struct device_attribute *attr, char *buf)
3633b3d0 3134{
ee959b00 3135 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3633b3d0 3136
19f31343
BVA
3137 if (target->using_rdma_cm)
3138 return -ENOENT;
3139 return sprintf(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
3633b3d0
IR
3140}
3141
89de7486
BVA
3142static ssize_t show_req_lim(struct device *dev,
3143 struct device_attribute *attr, char *buf)
3144{
3145 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7
BVA
3146 struct srp_rdma_ch *ch;
3147 int i, req_lim = INT_MAX;
89de7486 3148
d92c0da7
BVA
3149 for (i = 0; i < target->ch_count; i++) {
3150 ch = &target->ch[i];
3151 req_lim = min(req_lim, ch->req_lim);
3152 }
3153 return sprintf(buf, "%d\n", req_lim);
89de7486
BVA
3154}
3155
ee959b00
TJ
3156static ssize_t show_zero_req_lim(struct device *dev,
3157 struct device_attribute *attr, char *buf)
6bfa24fa 3158{
ee959b00 3159 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6bfa24fa 3160
6bfa24fa
RD
3161 return sprintf(buf, "%d\n", target->zero_req_lim);
3162}
3163
ee959b00
TJ
3164static ssize_t show_local_ib_port(struct device *dev,
3165 struct device_attribute *attr, char *buf)
ded7f1a1 3166{
ee959b00 3167 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1
IR
3168
3169 return sprintf(buf, "%d\n", target->srp_host->port);
3170}
3171
ee959b00
TJ
3172static ssize_t show_local_ib_device(struct device *dev,
3173 struct device_attribute *attr, char *buf)
ded7f1a1 3174{
ee959b00 3175 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1 3176
6c854111
JG
3177 return sprintf(buf, "%s\n",
3178 dev_name(&target->srp_host->srp_dev->dev->dev));
ded7f1a1
IR
3179}
3180
d92c0da7
BVA
3181static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
3182 char *buf)
3183{
3184 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3185
3186 return sprintf(buf, "%d\n", target->ch_count);
3187}
3188
4b5e5f41
BVA
3189static ssize_t show_comp_vector(struct device *dev,
3190 struct device_attribute *attr, char *buf)
3191{
3192 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3193
3194 return sprintf(buf, "%d\n", target->comp_vector);
3195}
3196
7bb312e4
VP
3197static ssize_t show_tl_retry_count(struct device *dev,
3198 struct device_attribute *attr, char *buf)
3199{
3200 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3201
3202 return sprintf(buf, "%d\n", target->tl_retry_count);
3203}
3204
49248644
DD
3205static ssize_t show_cmd_sg_entries(struct device *dev,
3206 struct device_attribute *attr, char *buf)
3207{
3208 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3209
3210 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
3211}
3212
c07d424d
DD
3213static ssize_t show_allow_ext_sg(struct device *dev,
3214 struct device_attribute *attr, char *buf)
3215{
3216 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3217
3218 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
3219}
3220
ee959b00
TJ
3221static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
3222static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
3223static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
3224static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
848b3082 3225static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
ee959b00
TJ
3226static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
3227static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
89de7486 3228static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
ee959b00
TJ
3229static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
3230static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
3231static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
d92c0da7 3232static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
4b5e5f41 3233static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
7bb312e4 3234static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
49248644 3235static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
c07d424d 3236static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
ee959b00
TJ
3237
3238static struct device_attribute *srp_host_attrs[] = {
3239 &dev_attr_id_ext,
3240 &dev_attr_ioc_guid,
3241 &dev_attr_service_id,
3242 &dev_attr_pkey,
848b3082 3243 &dev_attr_sgid,
ee959b00
TJ
3244 &dev_attr_dgid,
3245 &dev_attr_orig_dgid,
89de7486 3246 &dev_attr_req_lim,
ee959b00
TJ
3247 &dev_attr_zero_req_lim,
3248 &dev_attr_local_ib_port,
3249 &dev_attr_local_ib_device,
d92c0da7 3250 &dev_attr_ch_count,
4b5e5f41 3251 &dev_attr_comp_vector,
7bb312e4 3252 &dev_attr_tl_retry_count,
49248644 3253 &dev_attr_cmd_sg_entries,
c07d424d 3254 &dev_attr_allow_ext_sg,
6ecb0c84
RD
3255 NULL
3256};
3257
aef9ec39
RD
3258static struct scsi_host_template srp_template = {
3259 .module = THIS_MODULE,
b7f008fd
RD
3260 .name = "InfiniBand SRP initiator",
3261 .proc_name = DRV_NAME,
b0780ee5 3262 .target_alloc = srp_target_alloc,
509c5f33 3263 .slave_alloc = srp_slave_alloc,
c9b03c1a 3264 .slave_configure = srp_slave_configure,
aef9ec39
RD
3265 .info = srp_target_info,
3266 .queuecommand = srp_queuecommand,
71444b97 3267 .change_queue_depth = srp_change_queue_depth,
b6a05c82 3268 .eh_timed_out = srp_timed_out,
aef9ec39
RD
3269 .eh_abort_handler = srp_abort,
3270 .eh_device_reset_handler = srp_reset_device,
3271 .eh_host_reset_handler = srp_reset_host,
2742c1da 3272 .skip_settle_delay = true,
49248644 3273 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
4d73f95f 3274 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
aef9ec39 3275 .this_id = -1,
4d73f95f 3276 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
77f2c1a4 3277 .shost_attrs = srp_host_attrs,
c40ecc12 3278 .track_queue_depth = 1,
aef9ec39
RD
3279};
3280
34aa654e
BVA
3281static int srp_sdev_count(struct Scsi_Host *host)
3282{
3283 struct scsi_device *sdev;
3284 int c = 0;
3285
3286 shost_for_each_device(sdev, host)
3287 c++;
3288
3289 return c;
3290}
3291
bc44bd1d
BVA
3292/*
3293 * Return values:
3294 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
3295 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
3296 * removal has been scheduled.
3297 * 0 and target->state != SRP_TARGET_REMOVED upon success.
3298 */
aef9ec39
RD
3299static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
3300{
3236822b
FT
3301 struct srp_rport_identifiers ids;
3302 struct srp_rport *rport;
3303
34aa654e 3304 target->state = SRP_TARGET_SCANNING;
aef9ec39 3305 sprintf(target->target_name, "SRP.T10:%016llX",
45c37cad 3306 be64_to_cpu(target->id_ext));
aef9ec39 3307
dee2b82a 3308 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent))
aef9ec39
RD
3309 return -ENODEV;
3310
3236822b
FT
3311 memcpy(ids.port_id, &target->id_ext, 8);
3312 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
aebd5e47 3313 ids.roles = SRP_RPORT_ROLE_TARGET;
3236822b
FT
3314 rport = srp_rport_add(target->scsi_host, &ids);
3315 if (IS_ERR(rport)) {
3316 scsi_remove_host(target->scsi_host);
3317 return PTR_ERR(rport);
3318 }
3319
dc1bdbd9 3320 rport->lld_data = target;
9dd69a60 3321 target->rport = rport;
dc1bdbd9 3322
b3589fd4 3323 spin_lock(&host->target_lock);
aef9ec39 3324 list_add_tail(&target->list, &host->target_list);
b3589fd4 3325 spin_unlock(&host->target_lock);
aef9ec39 3326
aef9ec39 3327 scsi_scan_target(&target->scsi_host->shost_gendev,
1d645088 3328 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
aef9ec39 3329
c014c8cd
BVA
3330 if (srp_connected_ch(target) < target->ch_count ||
3331 target->qp_in_error) {
34aa654e
BVA
3332 shost_printk(KERN_INFO, target->scsi_host,
3333 PFX "SCSI scan failed - removing SCSI host\n");
3334 srp_queue_remove_work(target);
3335 goto out;
3336 }
3337
cf1acab7 3338 pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
34aa654e
BVA
3339 dev_name(&target->scsi_host->shost_gendev),
3340 srp_sdev_count(target->scsi_host));
3341
3342 spin_lock_irq(&target->lock);
3343 if (target->state == SRP_TARGET_SCANNING)
3344 target->state = SRP_TARGET_LIVE;
3345 spin_unlock_irq(&target->lock);
3346
3347out:
aef9ec39
RD
3348 return 0;
3349}
3350
ee959b00 3351static void srp_release_dev(struct device *dev)
aef9ec39
RD
3352{
3353 struct srp_host *host =
ee959b00 3354 container_of(dev, struct srp_host, dev);
aef9ec39
RD
3355
3356 complete(&host->released);
3357}
3358
3359static struct class srp_class = {
3360 .name = "infiniband_srp",
ee959b00 3361 .dev_release = srp_release_dev
aef9ec39
RD
3362};
3363
96fc248a
BVA
3364/**
3365 * srp_conn_unique() - check whether the connection to a target is unique
af24663b
BVA
3366 * @host: SRP host.
3367 * @target: SRP target port.
96fc248a
BVA
3368 */
3369static bool srp_conn_unique(struct srp_host *host,
3370 struct srp_target_port *target)
3371{
3372 struct srp_target_port *t;
3373 bool ret = false;
3374
3375 if (target->state == SRP_TARGET_REMOVED)
3376 goto out;
3377
3378 ret = true;
3379
3380 spin_lock(&host->target_lock);
3381 list_for_each_entry(t, &host->target_list, list) {
3382 if (t != target &&
3383 target->id_ext == t->id_ext &&
3384 target->ioc_guid == t->ioc_guid &&
3385 target->initiator_ext == t->initiator_ext) {
3386 ret = false;
3387 break;
3388 }
3389 }
3390 spin_unlock(&host->target_lock);
3391
3392out:
3393 return ret;
3394}
3395
aef9ec39
RD
3396/*
3397 * Target ports are added by writing
3398 *
3399 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
3400 * pkey=<P_Key>,service_id=<service ID>
19f31343
BVA
3401 * or
3402 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,
3403 * [src=<IPv4 address>,]dest=<IPv4 address>:<port number>
aef9ec39
RD
3404 *
3405 * to the add_target sysfs attribute.
3406 */
3407enum {
3408 SRP_OPT_ERR = 0,
3409 SRP_OPT_ID_EXT = 1 << 0,
3410 SRP_OPT_IOC_GUID = 1 << 1,
3411 SRP_OPT_DGID = 1 << 2,
3412 SRP_OPT_PKEY = 1 << 3,
3413 SRP_OPT_SERVICE_ID = 1 << 4,
3414 SRP_OPT_MAX_SECT = 1 << 5,
52fb2b50 3415 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
0c0450db 3416 SRP_OPT_IO_CLASS = 1 << 7,
01cb9bcb 3417 SRP_OPT_INITIATOR_EXT = 1 << 8,
49248644 3418 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
c07d424d
DD
3419 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
3420 SRP_OPT_SG_TABLESIZE = 1 << 11,
4b5e5f41 3421 SRP_OPT_COMP_VECTOR = 1 << 12,
7bb312e4 3422 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
4d73f95f 3423 SRP_OPT_QUEUE_SIZE = 1 << 14,
19f31343
BVA
3424 SRP_OPT_IP_SRC = 1 << 15,
3425 SRP_OPT_IP_DEST = 1 << 16,
b0780ee5 3426 SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
19f31343
BVA
3427};
3428
3429static unsigned int srp_opt_mandatory[] = {
3430 SRP_OPT_ID_EXT |
3431 SRP_OPT_IOC_GUID |
3432 SRP_OPT_DGID |
3433 SRP_OPT_PKEY |
3434 SRP_OPT_SERVICE_ID,
3435 SRP_OPT_ID_EXT |
3436 SRP_OPT_IOC_GUID |
3437 SRP_OPT_IP_DEST,
aef9ec39
RD
3438};
3439
a447c093 3440static const match_table_t srp_opt_tokens = {
52fb2b50
VP
3441 { SRP_OPT_ID_EXT, "id_ext=%s" },
3442 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
3443 { SRP_OPT_DGID, "dgid=%s" },
3444 { SRP_OPT_PKEY, "pkey=%x" },
3445 { SRP_OPT_SERVICE_ID, "service_id=%s" },
3446 { SRP_OPT_MAX_SECT, "max_sect=%d" },
3447 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
b0780ee5 3448 { SRP_OPT_TARGET_CAN_QUEUE, "target_can_queue=%d" },
0c0450db 3449 { SRP_OPT_IO_CLASS, "io_class=%x" },
01cb9bcb 3450 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
49248644 3451 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
c07d424d
DD
3452 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
3453 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
4b5e5f41 3454 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
7bb312e4 3455 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
4d73f95f 3456 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
19f31343
BVA
3457 { SRP_OPT_IP_SRC, "src=%s" },
3458 { SRP_OPT_IP_DEST, "dest=%s" },
52fb2b50 3459 { SRP_OPT_ERR, NULL }
aef9ec39
RD
3460};
3461
c62adb7d
BVA
3462/**
3463 * srp_parse_in - parse an IP address and port number combination
e37df2d5
BVA
3464 * @net: [in] Network namespace.
3465 * @sa: [out] Address family, IP address and port number.
3466 * @addr_port_str: [in] IP address and port number.
c62adb7d
BVA
3467 *
3468 * Parse the following address formats:
3469 * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
3470 * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
3471 */
19f31343
BVA
3472static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
3473 const char *addr_port_str)
3474{
c62adb7d
BVA
3475 char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
3476 char *port_str;
19f31343
BVA
3477 int ret;
3478
3479 if (!addr)
3480 return -ENOMEM;
c62adb7d
BVA
3481 port_str = strrchr(addr, ':');
3482 if (!port_str)
3483 return -EINVAL;
3484 *port_str++ = '\0';
3485 ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
3486 if (ret && addr[0]) {
3487 addr_end = addr + strlen(addr) - 1;
3488 if (addr[0] == '[' && *addr_end == ']') {
3489 *addr_end = '\0';
3490 ret = inet_pton_with_scope(net, AF_INET6, addr + 1,
3491 port_str, sa);
3492 }
3493 }
19f31343 3494 kfree(addr);
c62adb7d 3495 pr_debug("%s -> %pISpfsc\n", addr_port_str, sa);
19f31343
BVA
3496 return ret;
3497}
3498
3499static int srp_parse_options(struct net *net, const char *buf,
3500 struct srp_target_port *target)
aef9ec39
RD
3501{
3502 char *options, *sep_opt;
3503 char *p;
aef9ec39 3504 substring_t args[MAX_OPT_ARGS];
2a174df0 3505 unsigned long long ull;
aef9ec39
RD
3506 int opt_mask = 0;
3507 int token;
3508 int ret = -EINVAL;
3509 int i;
3510
3511 options = kstrdup(buf, GFP_KERNEL);
3512 if (!options)
3513 return -ENOMEM;
3514
3515 sep_opt = options;
7dcf9c19 3516 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
aef9ec39
RD
3517 if (!*p)
3518 continue;
3519
3520 token = match_token(p, srp_opt_tokens, args);
3521 opt_mask |= token;
3522
3523 switch (token) {
3524 case SRP_OPT_ID_EXT:
3525 p = match_strdup(args);
a20f3a6d
IR
3526 if (!p) {
3527 ret = -ENOMEM;
3528 goto out;
3529 }
2a174df0
BVA
3530 ret = kstrtoull(p, 16, &ull);
3531 if (ret) {
3532 pr_warn("invalid id_ext parameter '%s'\n", p);
3533 kfree(p);
3534 goto out;
3535 }
3536 target->id_ext = cpu_to_be64(ull);
aef9ec39
RD
3537 kfree(p);
3538 break;
3539
3540 case SRP_OPT_IOC_GUID:
3541 p = match_strdup(args);
a20f3a6d
IR
3542 if (!p) {
3543 ret = -ENOMEM;
3544 goto out;
3545 }
2a174df0
BVA
3546 ret = kstrtoull(p, 16, &ull);
3547 if (ret) {
3548 pr_warn("invalid ioc_guid parameter '%s'\n", p);
3549 kfree(p);
3550 goto out;
3551 }
3552 target->ioc_guid = cpu_to_be64(ull);
aef9ec39
RD
3553 kfree(p);
3554 break;
3555
3556 case SRP_OPT_DGID:
3557 p = match_strdup(args);
a20f3a6d
IR
3558 if (!p) {
3559 ret = -ENOMEM;
3560 goto out;
3561 }
aef9ec39 3562 if (strlen(p) != 32) {
e0bda7d8 3563 pr_warn("bad dest GID parameter '%s'\n", p);
ce1823f0 3564 kfree(p);
aef9ec39
RD
3565 goto out;
3566 }
3567
19f31343 3568 ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16);
bf17c1c7 3569 kfree(p);
e711f968
AS
3570 if (ret < 0)
3571 goto out;
aef9ec39
RD
3572 break;
3573
3574 case SRP_OPT_PKEY:
3575 if (match_hex(args, &token)) {
e0bda7d8 3576 pr_warn("bad P_Key parameter '%s'\n", p);
aef9ec39
RD
3577 goto out;
3578 }
19f31343 3579 target->ib_cm.pkey = cpu_to_be16(token);
aef9ec39
RD
3580 break;
3581
3582 case SRP_OPT_SERVICE_ID:
3583 p = match_strdup(args);
a20f3a6d
IR
3584 if (!p) {
3585 ret = -ENOMEM;
3586 goto out;
3587 }
2a174df0
BVA
3588 ret = kstrtoull(p, 16, &ull);
3589 if (ret) {
3590 pr_warn("bad service_id parameter '%s'\n", p);
3591 kfree(p);
3592 goto out;
3593 }
19f31343
BVA
3594 target->ib_cm.service_id = cpu_to_be64(ull);
3595 kfree(p);
3596 break;
3597
3598 case SRP_OPT_IP_SRC:
3599 p = match_strdup(args);
3600 if (!p) {
3601 ret = -ENOMEM;
3602 goto out;
3603 }
3604 ret = srp_parse_in(net, &target->rdma_cm.src.ss, p);
3605 if (ret < 0) {
3606 pr_warn("bad source parameter '%s'\n", p);
3607 kfree(p);
3608 goto out;
3609 }
3610 target->rdma_cm.src_specified = true;
3611 kfree(p);
3612 break;
3613
3614 case SRP_OPT_IP_DEST:
3615 p = match_strdup(args);
3616 if (!p) {
3617 ret = -ENOMEM;
3618 goto out;
3619 }
3620 ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p);
3621 if (ret < 0) {
3622 pr_warn("bad dest parameter '%s'\n", p);
3623 kfree(p);
3624 goto out;
3625 }
3626 target->using_rdma_cm = true;
aef9ec39
RD
3627 kfree(p);
3628 break;
3629
3630 case SRP_OPT_MAX_SECT:
3631 if (match_int(args, &token)) {
e0bda7d8 3632 pr_warn("bad max sect parameter '%s'\n", p);
aef9ec39
RD
3633 goto out;
3634 }
3635 target->scsi_host->max_sectors = token;
3636 break;
3637
4d73f95f
BVA
3638 case SRP_OPT_QUEUE_SIZE:
3639 if (match_int(args, &token) || token < 1) {
3640 pr_warn("bad queue_size parameter '%s'\n", p);
3641 goto out;
3642 }
3643 target->scsi_host->can_queue = token;
3644 target->queue_size = token + SRP_RSP_SQ_SIZE +
3645 SRP_TSK_MGMT_SQ_SIZE;
3646 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3647 target->scsi_host->cmd_per_lun = token;
3648 break;
3649
52fb2b50 3650 case SRP_OPT_MAX_CMD_PER_LUN:
4d73f95f 3651 if (match_int(args, &token) || token < 1) {
e0bda7d8
BVA
3652 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3653 p);
52fb2b50
VP
3654 goto out;
3655 }
4d73f95f 3656 target->scsi_host->cmd_per_lun = token;
52fb2b50
VP
3657 break;
3658
b0780ee5
BVA
3659 case SRP_OPT_TARGET_CAN_QUEUE:
3660 if (match_int(args, &token) || token < 1) {
3661 pr_warn("bad max target_can_queue parameter '%s'\n",
3662 p);
3663 goto out;
3664 }
3665 target->target_can_queue = token;
3666 break;
3667
0c0450db
R
3668 case SRP_OPT_IO_CLASS:
3669 if (match_hex(args, &token)) {
e0bda7d8 3670 pr_warn("bad IO class parameter '%s'\n", p);
0c0450db
R
3671 goto out;
3672 }
3673 if (token != SRP_REV10_IB_IO_CLASS &&
3674 token != SRP_REV16A_IB_IO_CLASS) {
e0bda7d8
BVA
3675 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3676 token, SRP_REV10_IB_IO_CLASS,
3677 SRP_REV16A_IB_IO_CLASS);
0c0450db
R
3678 goto out;
3679 }
3680 target->io_class = token;
3681 break;
3682
01cb9bcb
IR
3683 case SRP_OPT_INITIATOR_EXT:
3684 p = match_strdup(args);
a20f3a6d
IR
3685 if (!p) {
3686 ret = -ENOMEM;
3687 goto out;
3688 }
2a174df0
BVA
3689 ret = kstrtoull(p, 16, &ull);
3690 if (ret) {
3691 pr_warn("bad initiator_ext value '%s'\n", p);
3692 kfree(p);
3693 goto out;
3694 }
3695 target->initiator_ext = cpu_to_be64(ull);
01cb9bcb
IR
3696 kfree(p);
3697 break;
3698
49248644
DD
3699 case SRP_OPT_CMD_SG_ENTRIES:
3700 if (match_int(args, &token) || token < 1 || token > 255) {
e0bda7d8
BVA
3701 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3702 p);
49248644
DD
3703 goto out;
3704 }
3705 target->cmd_sg_cnt = token;
3706 break;
3707
c07d424d
DD
3708 case SRP_OPT_ALLOW_EXT_SG:
3709 if (match_int(args, &token)) {
e0bda7d8 3710 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
c07d424d
DD
3711 goto out;
3712 }
3713 target->allow_ext_sg = !!token;
3714 break;
3715
3716 case SRP_OPT_SG_TABLESIZE:
3717 if (match_int(args, &token) || token < 1 ||
65e8617f 3718 token > SG_MAX_SEGMENTS) {
e0bda7d8
BVA
3719 pr_warn("bad max sg_tablesize parameter '%s'\n",
3720 p);
c07d424d
DD
3721 goto out;
3722 }
3723 target->sg_tablesize = token;
3724 break;
3725
4b5e5f41
BVA
3726 case SRP_OPT_COMP_VECTOR:
3727 if (match_int(args, &token) || token < 0) {
3728 pr_warn("bad comp_vector parameter '%s'\n", p);
3729 goto out;
3730 }
3731 target->comp_vector = token;
3732 break;
3733
7bb312e4
VP
3734 case SRP_OPT_TL_RETRY_COUNT:
3735 if (match_int(args, &token) || token < 2 || token > 7) {
3736 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3737 p);
3738 goto out;
3739 }
3740 target->tl_retry_count = token;
3741 break;
3742
aef9ec39 3743 default:
e0bda7d8
BVA
3744 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3745 p);
aef9ec39
RD
3746 goto out;
3747 }
3748 }
3749
19f31343
BVA
3750 for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) {
3751 if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) {
3752 ret = 0;
3753 break;
3754 }
3755 }
3756 if (ret)
3757 pr_warn("target creation request is missing one or more parameters\n");
aef9ec39 3758
4d73f95f
BVA
3759 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3760 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3761 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3762 target->scsi_host->cmd_per_lun,
3763 target->scsi_host->can_queue);
3764
aef9ec39
RD
3765out:
3766 kfree(options);
3767 return ret;
3768}
3769
ee959b00
TJ
3770static ssize_t srp_create_target(struct device *dev,
3771 struct device_attribute *attr,
aef9ec39
RD
3772 const char *buf, size_t count)
3773{
3774 struct srp_host *host =
ee959b00 3775 container_of(dev, struct srp_host, dev);
aef9ec39
RD
3776 struct Scsi_Host *target_host;
3777 struct srp_target_port *target;
509c07bc 3778 struct srp_rdma_ch *ch;
d1b4289e
BVA
3779 struct srp_device *srp_dev = host->srp_dev;
3780 struct ib_device *ibdev = srp_dev->dev;
d92c0da7 3781 int ret, node_idx, node, cpu, i;
509c5f33 3782 unsigned int max_sectors_per_mr, mr_per_cmd = 0;
d92c0da7 3783 bool multich = false;
513d5647 3784 uint32_t max_iu_len;
aef9ec39
RD
3785
3786 target_host = scsi_host_alloc(&srp_template,
3787 sizeof (struct srp_target_port));
3788 if (!target_host)
3789 return -ENOMEM;
3790
49248644 3791 target_host->transportt = ib_srp_transport_template;
fd1b6c4a
BVA
3792 target_host->max_channel = 0;
3793 target_host->max_id = 1;
985aa495 3794 target_host->max_lun = -1LL;
3c8edf0e 3795 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
0b5cb330 3796 target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
5f068992 3797
aef9ec39 3798 target = host_to_target(target_host);
aef9ec39 3799
19f31343 3800 target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
49248644
DD
3801 target->io_class = SRP_REV16A_IB_IO_CLASS;
3802 target->scsi_host = target_host;
3803 target->srp_host = host;
e6bf5f48 3804 target->lkey = host->srp_dev->pd->local_dma_lkey;
cee687b6 3805 target->global_rkey = host->srp_dev->global_rkey;
49248644 3806 target->cmd_sg_cnt = cmd_sg_entries;
c07d424d
DD
3807 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3808 target->allow_ext_sg = allow_ext_sg;
7bb312e4 3809 target->tl_retry_count = 7;
4d73f95f 3810 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
aef9ec39 3811
34aa654e
BVA
3812 /*
3813 * Avoid that the SCSI host can be removed by srp_remove_target()
3814 * before this function returns.
3815 */
3816 scsi_host_get(target->scsi_host);
3817
4fa354c9
BVA
3818 ret = mutex_lock_interruptible(&host->add_target_mutex);
3819 if (ret < 0)
3820 goto put;
2d7091bc 3821
19f31343 3822 ret = srp_parse_options(target->net, buf, target);
aef9ec39 3823 if (ret)
fb49c8bb 3824 goto out;
aef9ec39 3825
4d73f95f
BVA
3826 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3827
96fc248a 3828 if (!srp_conn_unique(target->srp_host, target)) {
19f31343 3829 if (target->using_rdma_cm) {
19f31343 3830 shost_printk(KERN_INFO, target->scsi_host,
7da09af9 3831 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%pIS\n",
19f31343
BVA
3832 be64_to_cpu(target->id_ext),
3833 be64_to_cpu(target->ioc_guid),
7da09af9 3834 &target->rdma_cm.dst);
19f31343
BVA
3835 } else {
3836 shost_printk(KERN_INFO, target->scsi_host,
3837 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3838 be64_to_cpu(target->id_ext),
3839 be64_to_cpu(target->ioc_guid),
3840 be64_to_cpu(target->initiator_ext));
3841 }
96fc248a 3842 ret = -EEXIST;
fb49c8bb 3843 goto out;
96fc248a
BVA
3844 }
3845
5cfb1782 3846 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
d1b4289e 3847 target->cmd_sg_cnt < target->sg_tablesize) {
5cfb1782 3848 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
c07d424d
DD
3849 target->sg_tablesize = target->cmd_sg_cnt;
3850 }
3851
509c5f33 3852 if (srp_dev->use_fast_reg || srp_dev->use_fmr) {
fbd36818
SG
3853 bool gaps_reg = (ibdev->attrs.device_cap_flags &
3854 IB_DEVICE_SG_GAPS_REG);
3855
509c5f33
BVA
3856 max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3857 (ilog2(srp_dev->mr_page_size) - 9);
fbd36818
SG
3858 if (!gaps_reg) {
3859 /*
3860 * FR and FMR can only map one HCA page per entry. If
3861 * the start address is not aligned on a HCA page
3862 * boundary two entries will be used for the head and
3863 * the tail although these two entries combined
3864 * contain at most one HCA page of data. Hence the "+
3865 * 1" in the calculation below.
3866 *
3867 * The indirect data buffer descriptor is contiguous
3868 * so the memory for that buffer will only be
3869 * registered if register_always is true. Hence add
3870 * one to mr_per_cmd if register_always has been set.
3871 */
3872 mr_per_cmd = register_always +
3873 (target->scsi_host->max_sectors + 1 +
3874 max_sectors_per_mr - 1) / max_sectors_per_mr;
3875 } else {
3876 mr_per_cmd = register_always +
3877 (target->sg_tablesize +
3878 srp_dev->max_pages_per_mr - 1) /
3879 srp_dev->max_pages_per_mr;
3880 }
509c5f33 3881 pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
fbd36818 3882 target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
509c5f33
BVA
3883 max_sectors_per_mr, mr_per_cmd);
3884 }
3885
c07d424d 3886 target_host->sg_tablesize = target->sg_tablesize;
509c5f33
BVA
3887 target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3888 target->mr_per_cmd = mr_per_cmd;
c07d424d
DD
3889 target->indirect_size = target->sg_tablesize *
3890 sizeof (struct srp_direct_buf);
882981f4 3891 max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, srp_use_imm_data);
49248644 3892
c1120f89 3893 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
ef6c49d8 3894 INIT_WORK(&target->remove_work, srp_remove_work);
8f26c9ff 3895 spin_lock_init(&target->lock);
1dfce294 3896 ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid);
2088ca66 3897 if (ret)
fb49c8bb 3898 goto out;
aef9ec39 3899
d92c0da7
BVA
3900 ret = -ENOMEM;
3901 target->ch_count = max_t(unsigned, num_online_nodes(),
3902 min(ch_count ? :
3903 min(4 * num_online_nodes(),
3904 ibdev->num_comp_vectors),
3905 num_online_cpus()));
3906 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3907 GFP_KERNEL);
3908 if (!target->ch)
fb49c8bb 3909 goto out;
aef9ec39 3910
d92c0da7
BVA
3911 node_idx = 0;
3912 for_each_online_node(node) {
3913 const int ch_start = (node_idx * target->ch_count /
3914 num_online_nodes());
3915 const int ch_end = ((node_idx + 1) * target->ch_count /
3916 num_online_nodes());
3a148896
BVA
3917 const int cv_start = node_idx * ibdev->num_comp_vectors /
3918 num_online_nodes();
3919 const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors /
3920 num_online_nodes();
d92c0da7
BVA
3921 int cpu_idx = 0;
3922
3923 for_each_online_cpu(cpu) {
3924 if (cpu_to_node(cpu) != node)
3925 continue;
3926 if (ch_start + cpu_idx >= ch_end)
3927 continue;
3928 ch = &target->ch[ch_start + cpu_idx];
3929 ch->target = target;
3930 ch->comp_vector = cv_start == cv_end ? cv_start :
3931 cv_start + cpu_idx % (cv_end - cv_start);
3932 spin_lock_init(&ch->lock);
3933 INIT_LIST_HEAD(&ch->free_tx);
3934 ret = srp_new_cm_id(ch);
3935 if (ret)
3936 goto err_disconnect;
aef9ec39 3937
d92c0da7
BVA
3938 ret = srp_create_ch_ib(ch);
3939 if (ret)
3940 goto err_disconnect;
3941
3942 ret = srp_alloc_req_data(ch);
3943 if (ret)
3944 goto err_disconnect;
3945
513d5647 3946 ret = srp_connect_ch(ch, max_iu_len, multich);
d92c0da7 3947 if (ret) {
19f31343
BVA
3948 char dst[64];
3949
3950 if (target->using_rdma_cm)
7da09af9
BVA
3951 snprintf(dst, sizeof(dst), "%pIS",
3952 &target->rdma_cm.dst);
19f31343
BVA
3953 else
3954 snprintf(dst, sizeof(dst), "%pI6",
3955 target->ib_cm.orig_dgid.raw);
d92c0da7 3956 shost_printk(KERN_ERR, target->scsi_host,
19f31343 3957 PFX "Connection %d/%d to %s failed\n",
d92c0da7 3958 ch_start + cpu_idx,
19f31343 3959 target->ch_count, dst);
d92c0da7 3960 if (node_idx == 0 && cpu_idx == 0) {
b02c1536 3961 goto free_ch;
d92c0da7
BVA
3962 } else {
3963 srp_free_ch_ib(target, ch);
3964 srp_free_req_data(target, ch);
3965 target->ch_count = ch - target->ch;
c257ea6f 3966 goto connected;
d92c0da7
BVA
3967 }
3968 }
3969
3970 multich = true;
3971 cpu_idx++;
3972 }
3973 node_idx++;
aef9ec39
RD
3974 }
3975
c257ea6f 3976connected:
d92c0da7
BVA
3977 target->scsi_host->nr_hw_queues = target->ch_count;
3978
aef9ec39
RD
3979 ret = srp_add_target(host, target);
3980 if (ret)
3981 goto err_disconnect;
3982
34aa654e 3983 if (target->state != SRP_TARGET_REMOVED) {
19f31343 3984 if (target->using_rdma_cm) {
19f31343 3985 shost_printk(KERN_DEBUG, target->scsi_host, PFX
7da09af9 3986 "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %pIS\n",
19f31343
BVA
3987 be64_to_cpu(target->id_ext),
3988 be64_to_cpu(target->ioc_guid),
7da09af9 3989 target->sgid.raw, &target->rdma_cm.dst);
19f31343
BVA
3990 } else {
3991 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3992 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3993 be64_to_cpu(target->id_ext),
3994 be64_to_cpu(target->ioc_guid),
3995 be16_to_cpu(target->ib_cm.pkey),
3996 be64_to_cpu(target->ib_cm.service_id),
3997 target->sgid.raw,
3998 target->ib_cm.orig_dgid.raw);
3999 }
34aa654e 4000 }
e7ffde01 4001
2d7091bc
BVA
4002 ret = count;
4003
4004out:
4005 mutex_unlock(&host->add_target_mutex);
34aa654e 4006
4fa354c9 4007put:
34aa654e 4008 scsi_host_put(target->scsi_host);
19f31343
BVA
4009 if (ret < 0) {
4010 /*
4011 * If a call to srp_remove_target() has not been scheduled,
4012 * drop the network namespace reference now that was obtained
4013 * earlier in this function.
4014 */
4015 if (target->state != SRP_TARGET_REMOVED)
4016 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
bc44bd1d 4017 scsi_host_put(target->scsi_host);
19f31343 4018 }
34aa654e 4019
2d7091bc 4020 return ret;
aef9ec39
RD
4021
4022err_disconnect:
4023 srp_disconnect_target(target);
4024
b02c1536 4025free_ch:
d92c0da7
BVA
4026 for (i = 0; i < target->ch_count; i++) {
4027 ch = &target->ch[i];
4028 srp_free_ch_ib(target, ch);
4029 srp_free_req_data(target, ch);
4030 }
aef9ec39 4031
d92c0da7 4032 kfree(target->ch);
2d7091bc 4033 goto out;
aef9ec39
RD
4034}
4035
ee959b00 4036static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
aef9ec39 4037
ee959b00
TJ
4038static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
4039 char *buf)
aef9ec39 4040{
ee959b00 4041 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39 4042
6c854111 4043 return sprintf(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
aef9ec39
RD
4044}
4045
ee959b00 4046static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
aef9ec39 4047
ee959b00
TJ
4048static ssize_t show_port(struct device *dev, struct device_attribute *attr,
4049 char *buf)
aef9ec39 4050{
ee959b00 4051 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39
RD
4052
4053 return sprintf(buf, "%d\n", host->port);
4054}
4055
ee959b00 4056static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
aef9ec39 4057
f5358a17 4058static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
aef9ec39
RD
4059{
4060 struct srp_host *host;
4061
4062 host = kzalloc(sizeof *host, GFP_KERNEL);
4063 if (!host)
4064 return NULL;
4065
4066 INIT_LIST_HEAD(&host->target_list);
b3589fd4 4067 spin_lock_init(&host->target_lock);
aef9ec39 4068 init_completion(&host->released);
2d7091bc 4069 mutex_init(&host->add_target_mutex);
05321937 4070 host->srp_dev = device;
aef9ec39
RD
4071 host->port = port;
4072
ee959b00 4073 host->dev.class = &srp_class;
dee2b82a 4074 host->dev.parent = device->dev->dev.parent;
6c854111
JG
4075 dev_set_name(&host->dev, "srp-%s-%d", dev_name(&device->dev->dev),
4076 port);
aef9ec39 4077
ee959b00 4078 if (device_register(&host->dev))
f5358a17 4079 goto free_host;
ee959b00 4080 if (device_create_file(&host->dev, &dev_attr_add_target))
aef9ec39 4081 goto err_class;
ee959b00 4082 if (device_create_file(&host->dev, &dev_attr_ibdev))
aef9ec39 4083 goto err_class;
ee959b00 4084 if (device_create_file(&host->dev, &dev_attr_port))
aef9ec39
RD
4085 goto err_class;
4086
4087 return host;
4088
4089err_class:
ee959b00 4090 device_unregister(&host->dev);
aef9ec39 4091
f5358a17 4092free_host:
aef9ec39
RD
4093 kfree(host);
4094
4095 return NULL;
4096}
4097
4098static void srp_add_one(struct ib_device *device)
4099{
f5358a17 4100 struct srp_device *srp_dev;
042dd765 4101 struct ib_device_attr *attr = &device->attrs;
aef9ec39 4102 struct srp_host *host;
ea1075ed
JG
4103 int mr_page_shift;
4104 unsigned int p;
52ede08f 4105 u64 max_pages_per_mr;
5f071777 4106 unsigned int flags = 0;
aef9ec39 4107
249f0656 4108 srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
f5358a17 4109 if (!srp_dev)
4a061b28 4110 return;
f5358a17
RD
4111
4112 /*
4113 * Use the smallest page size supported by the HCA, down to a
8f26c9ff
DD
4114 * minimum of 4096 bytes. We're unlikely to build large sglists
4115 * out of smaller entries.
f5358a17 4116 */
042dd765 4117 mr_page_shift = max(12, ffs(attr->page_size_cap) - 1);
52ede08f
BVA
4118 srp_dev->mr_page_size = 1 << mr_page_shift;
4119 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
042dd765 4120 max_pages_per_mr = attr->max_mr_size;
52ede08f 4121 do_div(max_pages_per_mr, srp_dev->mr_page_size);
509c5f33 4122 pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
042dd765 4123 attr->max_mr_size, srp_dev->mr_page_size,
509c5f33 4124 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
52ede08f
BVA
4125 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
4126 max_pages_per_mr);
835ee624 4127
3023a1e9
KH
4128 srp_dev->has_fmr = (device->ops.alloc_fmr &&
4129 device->ops.dealloc_fmr &&
4130 device->ops.map_phys_fmr &&
4131 device->ops.unmap_fmr);
042dd765 4132 srp_dev->has_fr = (attr->device_cap_flags &
835ee624 4133 IB_DEVICE_MEM_MGT_EXTENSIONS);
c222a39f 4134 if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
835ee624 4135 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
c222a39f 4136 } else if (!never_register &&
042dd765 4137 attr->max_mr_size >= 2 * srp_dev->mr_page_size) {
509c5f33
BVA
4138 srp_dev->use_fast_reg = (srp_dev->has_fr &&
4139 (!srp_dev->has_fmr || prefer_fr));
4140 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
4141 }
835ee624 4142
5f071777
CH
4143 if (never_register || !register_always ||
4144 (!srp_dev->has_fmr && !srp_dev->has_fr))
4145 flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
4146
5cfb1782
BVA
4147 if (srp_dev->use_fast_reg) {
4148 srp_dev->max_pages_per_mr =
4149 min_t(u32, srp_dev->max_pages_per_mr,
042dd765 4150 attr->max_fast_reg_page_list_len);
5cfb1782 4151 }
52ede08f
BVA
4152 srp_dev->mr_max_size = srp_dev->mr_page_size *
4153 srp_dev->max_pages_per_mr;
4a061b28 4154 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
6c854111 4155 dev_name(&device->dev), mr_page_shift, attr->max_mr_size,
042dd765 4156 attr->max_fast_reg_page_list_len,
52ede08f 4157 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
f5358a17
RD
4158
4159 INIT_LIST_HEAD(&srp_dev->dev_list);
4160
4161 srp_dev->dev = device;
5f071777 4162 srp_dev->pd = ib_alloc_pd(device, flags);
f5358a17
RD
4163 if (IS_ERR(srp_dev->pd))
4164 goto free_dev;
4165
cee687b6
BVA
4166 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
4167 srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
4168 WARN_ON_ONCE(srp_dev->global_rkey == 0);
4169 }
f5358a17 4170
ea1075ed 4171 rdma_for_each_port (device, p) {
f5358a17 4172 host = srp_add_port(srp_dev, p);
aef9ec39 4173 if (host)
f5358a17 4174 list_add_tail(&host->list, &srp_dev->dev_list);
aef9ec39
RD
4175 }
4176
f5358a17 4177 ib_set_client_data(device, &srp_client, srp_dev);
4a061b28 4178 return;
f5358a17 4179
f5358a17
RD
4180free_dev:
4181 kfree(srp_dev);
aef9ec39
RD
4182}
4183
7c1eb45a 4184static void srp_remove_one(struct ib_device *device, void *client_data)
aef9ec39 4185{
f5358a17 4186 struct srp_device *srp_dev;
aef9ec39 4187 struct srp_host *host, *tmp_host;
ef6c49d8 4188 struct srp_target_port *target;
aef9ec39 4189
7c1eb45a 4190 srp_dev = client_data;
1fe0cb84
DB
4191 if (!srp_dev)
4192 return;
aef9ec39 4193
f5358a17 4194 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
ee959b00 4195 device_unregister(&host->dev);
aef9ec39
RD
4196 /*
4197 * Wait for the sysfs entry to go away, so that no new
4198 * target ports can be created.
4199 */
4200 wait_for_completion(&host->released);
4201
4202 /*
ef6c49d8 4203 * Remove all target ports.
aef9ec39 4204 */
b3589fd4 4205 spin_lock(&host->target_lock);
ef6c49d8
BVA
4206 list_for_each_entry(target, &host->target_list, list)
4207 srp_queue_remove_work(target);
b3589fd4 4208 spin_unlock(&host->target_lock);
aef9ec39
RD
4209
4210 /*
bcc05910 4211 * Wait for tl_err and target port removal tasks.
aef9ec39 4212 */
ef6c49d8 4213 flush_workqueue(system_long_wq);
bcc05910 4214 flush_workqueue(srp_remove_wq);
aef9ec39 4215
aef9ec39
RD
4216 kfree(host);
4217 }
4218
f5358a17
RD
4219 ib_dealloc_pd(srp_dev->pd);
4220
4221 kfree(srp_dev);
aef9ec39
RD
4222}
4223
3236822b 4224static struct srp_function_template ib_srp_transport_functions = {
ed9b2264
BVA
4225 .has_rport_state = true,
4226 .reset_timer_if_blocked = true,
a95cadb9 4227 .reconnect_delay = &srp_reconnect_delay,
ed9b2264
BVA
4228 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
4229 .dev_loss_tmo = &srp_dev_loss_tmo,
4230 .reconnect = srp_rport_reconnect,
dc1bdbd9 4231 .rport_delete = srp_rport_delete,
ed9b2264 4232 .terminate_rport_io = srp_terminate_io,
3236822b
FT
4233};
4234
aef9ec39
RD
4235static int __init srp_init_module(void)
4236{
4237 int ret;
4238
16d14e01
BVA
4239 BUILD_BUG_ON(sizeof(struct srp_imm_buf) != 4);
4240 BUILD_BUG_ON(sizeof(struct srp_login_req) != 64);
4241 BUILD_BUG_ON(sizeof(struct srp_login_req_rdma) != 56);
4242 BUILD_BUG_ON(sizeof(struct srp_cmd) != 48);
4243
49248644 4244 if (srp_sg_tablesize) {
e0bda7d8 4245 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
49248644
DD
4246 if (!cmd_sg_entries)
4247 cmd_sg_entries = srp_sg_tablesize;
4248 }
4249
4250 if (!cmd_sg_entries)
4251 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
4252
4253 if (cmd_sg_entries > 255) {
e0bda7d8 4254 pr_warn("Clamping cmd_sg_entries to 255\n");
49248644 4255 cmd_sg_entries = 255;
1e89a194
DD
4256 }
4257
c07d424d
DD
4258 if (!indirect_sg_entries)
4259 indirect_sg_entries = cmd_sg_entries;
4260 else if (indirect_sg_entries < cmd_sg_entries) {
e0bda7d8
BVA
4261 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
4262 cmd_sg_entries);
c07d424d
DD
4263 indirect_sg_entries = cmd_sg_entries;
4264 }
4265
0a475ef4
IR
4266 if (indirect_sg_entries > SG_MAX_SEGMENTS) {
4267 pr_warn("Clamping indirect_sg_entries to %u\n",
4268 SG_MAX_SEGMENTS);
4269 indirect_sg_entries = SG_MAX_SEGMENTS;
4270 }
4271
bcc05910 4272 srp_remove_wq = create_workqueue("srp_remove");
da05be29
WY
4273 if (!srp_remove_wq) {
4274 ret = -ENOMEM;
bcc05910
BVA
4275 goto out;
4276 }
4277
4278 ret = -ENOMEM;
3236822b
FT
4279 ib_srp_transport_template =
4280 srp_attach_transport(&ib_srp_transport_functions);
4281 if (!ib_srp_transport_template)
bcc05910 4282 goto destroy_wq;
3236822b 4283
aef9ec39
RD
4284 ret = class_register(&srp_class);
4285 if (ret) {
e0bda7d8 4286 pr_err("couldn't register class infiniband_srp\n");
bcc05910 4287 goto release_tr;
aef9ec39
RD
4288 }
4289
c1a0b23b
MT
4290 ib_sa_register_client(&srp_sa_client);
4291
aef9ec39
RD
4292 ret = ib_register_client(&srp_client);
4293 if (ret) {
e0bda7d8 4294 pr_err("couldn't register IB client\n");
bcc05910 4295 goto unreg_sa;
aef9ec39
RD
4296 }
4297
bcc05910
BVA
4298out:
4299 return ret;
4300
4301unreg_sa:
4302 ib_sa_unregister_client(&srp_sa_client);
4303 class_unregister(&srp_class);
4304
4305release_tr:
4306 srp_release_transport(ib_srp_transport_template);
4307
4308destroy_wq:
4309 destroy_workqueue(srp_remove_wq);
4310 goto out;
aef9ec39
RD
4311}
4312
4313static void __exit srp_cleanup_module(void)
4314{
4315 ib_unregister_client(&srp_client);
c1a0b23b 4316 ib_sa_unregister_client(&srp_sa_client);
aef9ec39 4317 class_unregister(&srp_class);
3236822b 4318 srp_release_transport(ib_srp_transport_template);
bcc05910 4319 destroy_workqueue(srp_remove_wq);
aef9ec39
RD
4320}
4321
4322module_init(srp_init_module);
4323module_exit(srp_cleanup_module);