]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/infiniband/ulp/srp/ib_srp.c
IB/srp: Introduce srp_device.use_fmr
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / ulp / srp / ib_srp.c
1 /*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <rdma/ib_cache.h>
44
45 #include <linux/atomic.h>
46
47 #include <scsi/scsi.h>
48 #include <scsi/scsi_device.h>
49 #include <scsi/scsi_dbg.h>
50 #include <scsi/scsi_tcq.h>
51 #include <scsi/srp.h>
52 #include <scsi/scsi_transport_srp.h>
53
54 #include "ib_srp.h"
55
56 #define DRV_NAME "ib_srp"
57 #define PFX DRV_NAME ": "
58 #define DRV_VERSION "2.0"
59 #define DRV_RELDATE "July 26, 2015"
60
61 MODULE_AUTHOR("Roland Dreier");
62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63 MODULE_LICENSE("Dual BSD/GPL");
64 MODULE_VERSION(DRV_VERSION);
65 MODULE_INFO(release_date, DRV_RELDATE);
66
67 static unsigned int srp_sg_tablesize;
68 static unsigned int cmd_sg_entries;
69 static unsigned int indirect_sg_entries;
70 static bool allow_ext_sg;
71 static bool prefer_fr;
72 static bool register_always;
73 static int topspin_workarounds = 1;
74
75 module_param(srp_sg_tablesize, uint, 0444);
76 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
77
78 module_param(cmd_sg_entries, uint, 0444);
79 MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
81
82 module_param(indirect_sg_entries, uint, 0444);
83 MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85
86 module_param(allow_ext_sg, bool, 0444);
87 MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89
90 module_param(topspin_workarounds, int, 0444);
91 MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93
94 module_param(prefer_fr, bool, 0444);
95 MODULE_PARM_DESC(prefer_fr,
96 "Whether to use fast registration if both FMR and fast registration are supported");
97
98 module_param(register_always, bool, 0444);
99 MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
101
102 static const struct kernel_param_ops srp_tmo_ops;
103
104 static int srp_reconnect_delay = 10;
105 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106 S_IRUGO | S_IWUSR);
107 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108
109 static int srp_fast_io_fail_tmo = 15;
110 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111 S_IRUGO | S_IWUSR);
112 MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
116
117 static int srp_dev_loss_tmo = 600;
118 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119 S_IRUGO | S_IWUSR);
120 MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
127
128 static unsigned ch_count;
129 module_param(ch_count, uint, 0444);
130 MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132
133 static void srp_add_one(struct ib_device *device);
134 static void srp_remove_one(struct ib_device *device, void *client_data);
135 static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
136 static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
137 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
138
139 static struct scsi_transport_template *ib_srp_transport_template;
140 static struct workqueue_struct *srp_remove_wq;
141
142 static struct ib_client srp_client = {
143 .name = "srp",
144 .add = srp_add_one,
145 .remove = srp_remove_one
146 };
147
148 static struct ib_sa_client srp_sa_client;
149
150 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
151 {
152 int tmo = *(int *)kp->arg;
153
154 if (tmo >= 0)
155 return sprintf(buffer, "%d", tmo);
156 else
157 return sprintf(buffer, "off");
158 }
159
160 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
161 {
162 int tmo, res;
163
164 res = srp_parse_tmo(&tmo, val);
165 if (res)
166 goto out;
167
168 if (kp->arg == &srp_reconnect_delay)
169 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
170 srp_dev_loss_tmo);
171 else if (kp->arg == &srp_fast_io_fail_tmo)
172 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
173 else
174 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
175 tmo);
176 if (res)
177 goto out;
178 *(int *)kp->arg = tmo;
179
180 out:
181 return res;
182 }
183
184 static const struct kernel_param_ops srp_tmo_ops = {
185 .get = srp_tmo_get,
186 .set = srp_tmo_set,
187 };
188
189 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
190 {
191 return (struct srp_target_port *) host->hostdata;
192 }
193
194 static const char *srp_target_info(struct Scsi_Host *host)
195 {
196 return host_to_target(host)->target_name;
197 }
198
199 static int srp_target_is_topspin(struct srp_target_port *target)
200 {
201 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
202 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
203
204 return topspin_workarounds &&
205 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
206 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
207 }
208
209 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
210 gfp_t gfp_mask,
211 enum dma_data_direction direction)
212 {
213 struct srp_iu *iu;
214
215 iu = kmalloc(sizeof *iu, gfp_mask);
216 if (!iu)
217 goto out;
218
219 iu->buf = kzalloc(size, gfp_mask);
220 if (!iu->buf)
221 goto out_free_iu;
222
223 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
224 direction);
225 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
226 goto out_free_buf;
227
228 iu->size = size;
229 iu->direction = direction;
230
231 return iu;
232
233 out_free_buf:
234 kfree(iu->buf);
235 out_free_iu:
236 kfree(iu);
237 out:
238 return NULL;
239 }
240
241 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
242 {
243 if (!iu)
244 return;
245
246 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
247 iu->direction);
248 kfree(iu->buf);
249 kfree(iu);
250 }
251
252 static void srp_qp_event(struct ib_event *event, void *context)
253 {
254 pr_debug("QP event %s (%d)\n",
255 ib_event_msg(event->event), event->event);
256 }
257
258 static int srp_init_qp(struct srp_target_port *target,
259 struct ib_qp *qp)
260 {
261 struct ib_qp_attr *attr;
262 int ret;
263
264 attr = kmalloc(sizeof *attr, GFP_KERNEL);
265 if (!attr)
266 return -ENOMEM;
267
268 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
269 target->srp_host->port,
270 be16_to_cpu(target->pkey),
271 &attr->pkey_index);
272 if (ret)
273 goto out;
274
275 attr->qp_state = IB_QPS_INIT;
276 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
277 IB_ACCESS_REMOTE_WRITE);
278 attr->port_num = target->srp_host->port;
279
280 ret = ib_modify_qp(qp, attr,
281 IB_QP_STATE |
282 IB_QP_PKEY_INDEX |
283 IB_QP_ACCESS_FLAGS |
284 IB_QP_PORT);
285
286 out:
287 kfree(attr);
288 return ret;
289 }
290
291 static int srp_new_cm_id(struct srp_rdma_ch *ch)
292 {
293 struct srp_target_port *target = ch->target;
294 struct ib_cm_id *new_cm_id;
295
296 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
297 srp_cm_handler, ch);
298 if (IS_ERR(new_cm_id))
299 return PTR_ERR(new_cm_id);
300
301 if (ch->cm_id)
302 ib_destroy_cm_id(ch->cm_id);
303 ch->cm_id = new_cm_id;
304 ch->path.sgid = target->sgid;
305 ch->path.dgid = target->orig_dgid;
306 ch->path.pkey = target->pkey;
307 ch->path.service_id = target->service_id;
308
309 return 0;
310 }
311
312 static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
313 {
314 struct srp_device *dev = target->srp_host->srp_dev;
315 struct ib_fmr_pool_param fmr_param;
316
317 memset(&fmr_param, 0, sizeof(fmr_param));
318 fmr_param.pool_size = target->scsi_host->can_queue;
319 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
320 fmr_param.cache = 1;
321 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
322 fmr_param.page_shift = ilog2(dev->mr_page_size);
323 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
324 IB_ACCESS_REMOTE_WRITE |
325 IB_ACCESS_REMOTE_READ);
326
327 return ib_create_fmr_pool(dev->pd, &fmr_param);
328 }
329
330 /**
331 * srp_destroy_fr_pool() - free the resources owned by a pool
332 * @pool: Fast registration pool to be destroyed.
333 */
334 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
335 {
336 int i;
337 struct srp_fr_desc *d;
338
339 if (!pool)
340 return;
341
342 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
343 if (d->frpl)
344 ib_free_fast_reg_page_list(d->frpl);
345 if (d->mr)
346 ib_dereg_mr(d->mr);
347 }
348 kfree(pool);
349 }
350
351 /**
352 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
353 * @device: IB device to allocate fast registration descriptors for.
354 * @pd: Protection domain associated with the FR descriptors.
355 * @pool_size: Number of descriptors to allocate.
356 * @max_page_list_len: Maximum fast registration work request page list length.
357 */
358 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
359 struct ib_pd *pd, int pool_size,
360 int max_page_list_len)
361 {
362 struct srp_fr_pool *pool;
363 struct srp_fr_desc *d;
364 struct ib_mr *mr;
365 struct ib_fast_reg_page_list *frpl;
366 int i, ret = -EINVAL;
367
368 if (pool_size <= 0)
369 goto err;
370 ret = -ENOMEM;
371 pool = kzalloc(sizeof(struct srp_fr_pool) +
372 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
373 if (!pool)
374 goto err;
375 pool->size = pool_size;
376 pool->max_page_list_len = max_page_list_len;
377 spin_lock_init(&pool->lock);
378 INIT_LIST_HEAD(&pool->free_list);
379
380 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
381 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
382 max_page_list_len);
383 if (IS_ERR(mr)) {
384 ret = PTR_ERR(mr);
385 goto destroy_pool;
386 }
387 d->mr = mr;
388 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
389 if (IS_ERR(frpl)) {
390 ret = PTR_ERR(frpl);
391 goto destroy_pool;
392 }
393 d->frpl = frpl;
394 list_add_tail(&d->entry, &pool->free_list);
395 }
396
397 out:
398 return pool;
399
400 destroy_pool:
401 srp_destroy_fr_pool(pool);
402
403 err:
404 pool = ERR_PTR(ret);
405 goto out;
406 }
407
408 /**
409 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
410 * @pool: Pool to obtain descriptor from.
411 */
412 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
413 {
414 struct srp_fr_desc *d = NULL;
415 unsigned long flags;
416
417 spin_lock_irqsave(&pool->lock, flags);
418 if (!list_empty(&pool->free_list)) {
419 d = list_first_entry(&pool->free_list, typeof(*d), entry);
420 list_del(&d->entry);
421 }
422 spin_unlock_irqrestore(&pool->lock, flags);
423
424 return d;
425 }
426
427 /**
428 * srp_fr_pool_put() - put an FR descriptor back in the free list
429 * @pool: Pool the descriptor was allocated from.
430 * @desc: Pointer to an array of fast registration descriptor pointers.
431 * @n: Number of descriptors to put back.
432 *
433 * Note: The caller must already have queued an invalidation request for
434 * desc->mr->rkey before calling this function.
435 */
436 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
437 int n)
438 {
439 unsigned long flags;
440 int i;
441
442 spin_lock_irqsave(&pool->lock, flags);
443 for (i = 0; i < n; i++)
444 list_add(&desc[i]->entry, &pool->free_list);
445 spin_unlock_irqrestore(&pool->lock, flags);
446 }
447
448 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
449 {
450 struct srp_device *dev = target->srp_host->srp_dev;
451
452 return srp_create_fr_pool(dev->dev, dev->pd,
453 target->scsi_host->can_queue,
454 dev->max_pages_per_mr);
455 }
456
457 /**
458 * srp_destroy_qp() - destroy an RDMA queue pair
459 * @ch: SRP RDMA channel.
460 *
461 * Change a queue pair into the error state and wait until all receive
462 * completions have been processed before destroying it. This avoids that
463 * the receive completion handler can access the queue pair while it is
464 * being destroyed.
465 */
466 static void srp_destroy_qp(struct srp_rdma_ch *ch)
467 {
468 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
469 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
470 struct ib_recv_wr *bad_wr;
471 int ret;
472
473 /* Destroying a QP and reusing ch->done is only safe if not connected */
474 WARN_ON_ONCE(ch->connected);
475
476 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
477 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
478 if (ret)
479 goto out;
480
481 init_completion(&ch->done);
482 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
483 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
484 if (ret == 0)
485 wait_for_completion(&ch->done);
486
487 out:
488 ib_destroy_qp(ch->qp);
489 }
490
491 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
492 {
493 struct srp_target_port *target = ch->target;
494 struct srp_device *dev = target->srp_host->srp_dev;
495 struct ib_qp_init_attr *init_attr;
496 struct ib_cq *recv_cq, *send_cq;
497 struct ib_qp *qp;
498 struct ib_fmr_pool *fmr_pool = NULL;
499 struct srp_fr_pool *fr_pool = NULL;
500 const int m = 1 + dev->use_fast_reg;
501 struct ib_cq_init_attr cq_attr = {};
502 int ret;
503
504 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
505 if (!init_attr)
506 return -ENOMEM;
507
508 /* + 1 for SRP_LAST_WR_ID */
509 cq_attr.cqe = target->queue_size + 1;
510 cq_attr.comp_vector = ch->comp_vector;
511 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
512 &cq_attr);
513 if (IS_ERR(recv_cq)) {
514 ret = PTR_ERR(recv_cq);
515 goto err;
516 }
517
518 cq_attr.cqe = m * target->queue_size;
519 cq_attr.comp_vector = ch->comp_vector;
520 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
521 &cq_attr);
522 if (IS_ERR(send_cq)) {
523 ret = PTR_ERR(send_cq);
524 goto err_recv_cq;
525 }
526
527 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
528
529 init_attr->event_handler = srp_qp_event;
530 init_attr->cap.max_send_wr = m * target->queue_size;
531 init_attr->cap.max_recv_wr = target->queue_size + 1;
532 init_attr->cap.max_recv_sge = 1;
533 init_attr->cap.max_send_sge = 1;
534 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
535 init_attr->qp_type = IB_QPT_RC;
536 init_attr->send_cq = send_cq;
537 init_attr->recv_cq = recv_cq;
538
539 qp = ib_create_qp(dev->pd, init_attr);
540 if (IS_ERR(qp)) {
541 ret = PTR_ERR(qp);
542 goto err_send_cq;
543 }
544
545 ret = srp_init_qp(target, qp);
546 if (ret)
547 goto err_qp;
548
549 if (dev->use_fast_reg) {
550 fr_pool = srp_alloc_fr_pool(target);
551 if (IS_ERR(fr_pool)) {
552 ret = PTR_ERR(fr_pool);
553 shost_printk(KERN_WARNING, target->scsi_host, PFX
554 "FR pool allocation failed (%d)\n", ret);
555 goto err_qp;
556 }
557 if (ch->fr_pool)
558 srp_destroy_fr_pool(ch->fr_pool);
559 ch->fr_pool = fr_pool;
560 } else if (dev->use_fmr) {
561 fmr_pool = srp_alloc_fmr_pool(target);
562 if (IS_ERR(fmr_pool)) {
563 ret = PTR_ERR(fmr_pool);
564 shost_printk(KERN_WARNING, target->scsi_host, PFX
565 "FMR pool allocation failed (%d)\n", ret);
566 goto err_qp;
567 }
568 if (ch->fmr_pool)
569 ib_destroy_fmr_pool(ch->fmr_pool);
570 ch->fmr_pool = fmr_pool;
571 }
572
573 if (ch->qp)
574 srp_destroy_qp(ch);
575 if (ch->recv_cq)
576 ib_destroy_cq(ch->recv_cq);
577 if (ch->send_cq)
578 ib_destroy_cq(ch->send_cq);
579
580 ch->qp = qp;
581 ch->recv_cq = recv_cq;
582 ch->send_cq = send_cq;
583
584 kfree(init_attr);
585 return 0;
586
587 err_qp:
588 ib_destroy_qp(qp);
589
590 err_send_cq:
591 ib_destroy_cq(send_cq);
592
593 err_recv_cq:
594 ib_destroy_cq(recv_cq);
595
596 err:
597 kfree(init_attr);
598 return ret;
599 }
600
601 /*
602 * Note: this function may be called without srp_alloc_iu_bufs() having been
603 * invoked. Hence the ch->[rt]x_ring checks.
604 */
605 static void srp_free_ch_ib(struct srp_target_port *target,
606 struct srp_rdma_ch *ch)
607 {
608 struct srp_device *dev = target->srp_host->srp_dev;
609 int i;
610
611 if (!ch->target)
612 return;
613
614 if (ch->cm_id) {
615 ib_destroy_cm_id(ch->cm_id);
616 ch->cm_id = NULL;
617 }
618
619 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
620 if (!ch->qp)
621 return;
622
623 if (dev->use_fast_reg) {
624 if (ch->fr_pool)
625 srp_destroy_fr_pool(ch->fr_pool);
626 } else if (dev->use_fmr) {
627 if (ch->fmr_pool)
628 ib_destroy_fmr_pool(ch->fmr_pool);
629 }
630 srp_destroy_qp(ch);
631 ib_destroy_cq(ch->send_cq);
632 ib_destroy_cq(ch->recv_cq);
633
634 /*
635 * Avoid that the SCSI error handler tries to use this channel after
636 * it has been freed. The SCSI error handler can namely continue
637 * trying to perform recovery actions after scsi_remove_host()
638 * returned.
639 */
640 ch->target = NULL;
641
642 ch->qp = NULL;
643 ch->send_cq = ch->recv_cq = NULL;
644
645 if (ch->rx_ring) {
646 for (i = 0; i < target->queue_size; ++i)
647 srp_free_iu(target->srp_host, ch->rx_ring[i]);
648 kfree(ch->rx_ring);
649 ch->rx_ring = NULL;
650 }
651 if (ch->tx_ring) {
652 for (i = 0; i < target->queue_size; ++i)
653 srp_free_iu(target->srp_host, ch->tx_ring[i]);
654 kfree(ch->tx_ring);
655 ch->tx_ring = NULL;
656 }
657 }
658
659 static void srp_path_rec_completion(int status,
660 struct ib_sa_path_rec *pathrec,
661 void *ch_ptr)
662 {
663 struct srp_rdma_ch *ch = ch_ptr;
664 struct srp_target_port *target = ch->target;
665
666 ch->status = status;
667 if (status)
668 shost_printk(KERN_ERR, target->scsi_host,
669 PFX "Got failed path rec status %d\n", status);
670 else
671 ch->path = *pathrec;
672 complete(&ch->done);
673 }
674
675 static int srp_lookup_path(struct srp_rdma_ch *ch)
676 {
677 struct srp_target_port *target = ch->target;
678 int ret;
679
680 ch->path.numb_path = 1;
681
682 init_completion(&ch->done);
683
684 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
685 target->srp_host->srp_dev->dev,
686 target->srp_host->port,
687 &ch->path,
688 IB_SA_PATH_REC_SERVICE_ID |
689 IB_SA_PATH_REC_DGID |
690 IB_SA_PATH_REC_SGID |
691 IB_SA_PATH_REC_NUMB_PATH |
692 IB_SA_PATH_REC_PKEY,
693 SRP_PATH_REC_TIMEOUT_MS,
694 GFP_KERNEL,
695 srp_path_rec_completion,
696 ch, &ch->path_query);
697 if (ch->path_query_id < 0)
698 return ch->path_query_id;
699
700 ret = wait_for_completion_interruptible(&ch->done);
701 if (ret < 0)
702 return ret;
703
704 if (ch->status < 0)
705 shost_printk(KERN_WARNING, target->scsi_host,
706 PFX "Path record query failed\n");
707
708 return ch->status;
709 }
710
711 static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
712 {
713 struct srp_target_port *target = ch->target;
714 struct {
715 struct ib_cm_req_param param;
716 struct srp_login_req priv;
717 } *req = NULL;
718 int status;
719
720 req = kzalloc(sizeof *req, GFP_KERNEL);
721 if (!req)
722 return -ENOMEM;
723
724 req->param.primary_path = &ch->path;
725 req->param.alternate_path = NULL;
726 req->param.service_id = target->service_id;
727 req->param.qp_num = ch->qp->qp_num;
728 req->param.qp_type = ch->qp->qp_type;
729 req->param.private_data = &req->priv;
730 req->param.private_data_len = sizeof req->priv;
731 req->param.flow_control = 1;
732
733 get_random_bytes(&req->param.starting_psn, 4);
734 req->param.starting_psn &= 0xffffff;
735
736 /*
737 * Pick some arbitrary defaults here; we could make these
738 * module parameters if anyone cared about setting them.
739 */
740 req->param.responder_resources = 4;
741 req->param.remote_cm_response_timeout = 20;
742 req->param.local_cm_response_timeout = 20;
743 req->param.retry_count = target->tl_retry_count;
744 req->param.rnr_retry_count = 7;
745 req->param.max_cm_retries = 15;
746
747 req->priv.opcode = SRP_LOGIN_REQ;
748 req->priv.tag = 0;
749 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
750 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
751 SRP_BUF_FORMAT_INDIRECT);
752 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
753 SRP_MULTICHAN_SINGLE);
754 /*
755 * In the published SRP specification (draft rev. 16a), the
756 * port identifier format is 8 bytes of ID extension followed
757 * by 8 bytes of GUID. Older drafts put the two halves in the
758 * opposite order, so that the GUID comes first.
759 *
760 * Targets conforming to these obsolete drafts can be
761 * recognized by the I/O Class they report.
762 */
763 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
764 memcpy(req->priv.initiator_port_id,
765 &target->sgid.global.interface_id, 8);
766 memcpy(req->priv.initiator_port_id + 8,
767 &target->initiator_ext, 8);
768 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
769 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
770 } else {
771 memcpy(req->priv.initiator_port_id,
772 &target->initiator_ext, 8);
773 memcpy(req->priv.initiator_port_id + 8,
774 &target->sgid.global.interface_id, 8);
775 memcpy(req->priv.target_port_id, &target->id_ext, 8);
776 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
777 }
778
779 /*
780 * Topspin/Cisco SRP targets will reject our login unless we
781 * zero out the first 8 bytes of our initiator port ID and set
782 * the second 8 bytes to the local node GUID.
783 */
784 if (srp_target_is_topspin(target)) {
785 shost_printk(KERN_DEBUG, target->scsi_host,
786 PFX "Topspin/Cisco initiator port ID workaround "
787 "activated for target GUID %016llx\n",
788 be64_to_cpu(target->ioc_guid));
789 memset(req->priv.initiator_port_id, 0, 8);
790 memcpy(req->priv.initiator_port_id + 8,
791 &target->srp_host->srp_dev->dev->node_guid, 8);
792 }
793
794 status = ib_send_cm_req(ch->cm_id, &req->param);
795
796 kfree(req);
797
798 return status;
799 }
800
801 static bool srp_queue_remove_work(struct srp_target_port *target)
802 {
803 bool changed = false;
804
805 spin_lock_irq(&target->lock);
806 if (target->state != SRP_TARGET_REMOVED) {
807 target->state = SRP_TARGET_REMOVED;
808 changed = true;
809 }
810 spin_unlock_irq(&target->lock);
811
812 if (changed)
813 queue_work(srp_remove_wq, &target->remove_work);
814
815 return changed;
816 }
817
818 static void srp_disconnect_target(struct srp_target_port *target)
819 {
820 struct srp_rdma_ch *ch;
821 int i;
822
823 /* XXX should send SRP_I_LOGOUT request */
824
825 for (i = 0; i < target->ch_count; i++) {
826 ch = &target->ch[i];
827 ch->connected = false;
828 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
829 shost_printk(KERN_DEBUG, target->scsi_host,
830 PFX "Sending CM DREQ failed\n");
831 }
832 }
833 }
834
835 static void srp_free_req_data(struct srp_target_port *target,
836 struct srp_rdma_ch *ch)
837 {
838 struct srp_device *dev = target->srp_host->srp_dev;
839 struct ib_device *ibdev = dev->dev;
840 struct srp_request *req;
841 int i;
842
843 if (!ch->req_ring)
844 return;
845
846 for (i = 0; i < target->req_ring_size; ++i) {
847 req = &ch->req_ring[i];
848 if (dev->use_fast_reg)
849 kfree(req->fr_list);
850 else
851 kfree(req->fmr_list);
852 kfree(req->map_page);
853 if (req->indirect_dma_addr) {
854 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
855 target->indirect_size,
856 DMA_TO_DEVICE);
857 }
858 kfree(req->indirect_desc);
859 }
860
861 kfree(ch->req_ring);
862 ch->req_ring = NULL;
863 }
864
865 static int srp_alloc_req_data(struct srp_rdma_ch *ch)
866 {
867 struct srp_target_port *target = ch->target;
868 struct srp_device *srp_dev = target->srp_host->srp_dev;
869 struct ib_device *ibdev = srp_dev->dev;
870 struct srp_request *req;
871 void *mr_list;
872 dma_addr_t dma_addr;
873 int i, ret = -ENOMEM;
874
875 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
876 GFP_KERNEL);
877 if (!ch->req_ring)
878 goto out;
879
880 for (i = 0; i < target->req_ring_size; ++i) {
881 req = &ch->req_ring[i];
882 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
883 GFP_KERNEL);
884 if (!mr_list)
885 goto out;
886 if (srp_dev->use_fast_reg)
887 req->fr_list = mr_list;
888 else
889 req->fmr_list = mr_list;
890 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
891 sizeof(void *), GFP_KERNEL);
892 if (!req->map_page)
893 goto out;
894 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
895 if (!req->indirect_desc)
896 goto out;
897
898 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
899 target->indirect_size,
900 DMA_TO_DEVICE);
901 if (ib_dma_mapping_error(ibdev, dma_addr))
902 goto out;
903
904 req->indirect_dma_addr = dma_addr;
905 }
906 ret = 0;
907
908 out:
909 return ret;
910 }
911
912 /**
913 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
914 * @shost: SCSI host whose attributes to remove from sysfs.
915 *
916 * Note: Any attributes defined in the host template and that did not exist
917 * before invocation of this function will be ignored.
918 */
919 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
920 {
921 struct device_attribute **attr;
922
923 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
924 device_remove_file(&shost->shost_dev, *attr);
925 }
926
927 static void srp_remove_target(struct srp_target_port *target)
928 {
929 struct srp_rdma_ch *ch;
930 int i;
931
932 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
933
934 srp_del_scsi_host_attr(target->scsi_host);
935 srp_rport_get(target->rport);
936 srp_remove_host(target->scsi_host);
937 scsi_remove_host(target->scsi_host);
938 srp_stop_rport_timers(target->rport);
939 srp_disconnect_target(target);
940 for (i = 0; i < target->ch_count; i++) {
941 ch = &target->ch[i];
942 srp_free_ch_ib(target, ch);
943 }
944 cancel_work_sync(&target->tl_err_work);
945 srp_rport_put(target->rport);
946 for (i = 0; i < target->ch_count; i++) {
947 ch = &target->ch[i];
948 srp_free_req_data(target, ch);
949 }
950 kfree(target->ch);
951 target->ch = NULL;
952
953 spin_lock(&target->srp_host->target_lock);
954 list_del(&target->list);
955 spin_unlock(&target->srp_host->target_lock);
956
957 scsi_host_put(target->scsi_host);
958 }
959
960 static void srp_remove_work(struct work_struct *work)
961 {
962 struct srp_target_port *target =
963 container_of(work, struct srp_target_port, remove_work);
964
965 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
966
967 srp_remove_target(target);
968 }
969
970 static void srp_rport_delete(struct srp_rport *rport)
971 {
972 struct srp_target_port *target = rport->lld_data;
973
974 srp_queue_remove_work(target);
975 }
976
977 /**
978 * srp_connected_ch() - number of connected channels
979 * @target: SRP target port.
980 */
981 static int srp_connected_ch(struct srp_target_port *target)
982 {
983 int i, c = 0;
984
985 for (i = 0; i < target->ch_count; i++)
986 c += target->ch[i].connected;
987
988 return c;
989 }
990
991 static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
992 {
993 struct srp_target_port *target = ch->target;
994 int ret;
995
996 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
997
998 ret = srp_lookup_path(ch);
999 if (ret)
1000 return ret;
1001
1002 while (1) {
1003 init_completion(&ch->done);
1004 ret = srp_send_req(ch, multich);
1005 if (ret)
1006 return ret;
1007 ret = wait_for_completion_interruptible(&ch->done);
1008 if (ret < 0)
1009 return ret;
1010
1011 /*
1012 * The CM event handling code will set status to
1013 * SRP_PORT_REDIRECT if we get a port redirect REJ
1014 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1015 * redirect REJ back.
1016 */
1017 switch (ch->status) {
1018 case 0:
1019 ch->connected = true;
1020 return 0;
1021
1022 case SRP_PORT_REDIRECT:
1023 ret = srp_lookup_path(ch);
1024 if (ret)
1025 return ret;
1026 break;
1027
1028 case SRP_DLID_REDIRECT:
1029 break;
1030
1031 case SRP_STALE_CONN:
1032 shost_printk(KERN_ERR, target->scsi_host, PFX
1033 "giving up on stale connection\n");
1034 ch->status = -ECONNRESET;
1035 return ch->status;
1036
1037 default:
1038 return ch->status;
1039 }
1040 }
1041 }
1042
1043 static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
1044 {
1045 struct ib_send_wr *bad_wr;
1046 struct ib_send_wr wr = {
1047 .opcode = IB_WR_LOCAL_INV,
1048 .wr_id = LOCAL_INV_WR_ID_MASK,
1049 .next = NULL,
1050 .num_sge = 0,
1051 .send_flags = 0,
1052 .ex.invalidate_rkey = rkey,
1053 };
1054
1055 return ib_post_send(ch->qp, &wr, &bad_wr);
1056 }
1057
1058 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1059 struct srp_rdma_ch *ch,
1060 struct srp_request *req)
1061 {
1062 struct srp_target_port *target = ch->target;
1063 struct srp_device *dev = target->srp_host->srp_dev;
1064 struct ib_device *ibdev = dev->dev;
1065 int i, res;
1066
1067 if (!scsi_sglist(scmnd) ||
1068 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1069 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1070 return;
1071
1072 if (dev->use_fast_reg) {
1073 struct srp_fr_desc **pfr;
1074
1075 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1076 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
1077 if (res < 0) {
1078 shost_printk(KERN_ERR, target->scsi_host, PFX
1079 "Queueing INV WR for rkey %#x failed (%d)\n",
1080 (*pfr)->mr->rkey, res);
1081 queue_work(system_long_wq,
1082 &target->tl_err_work);
1083 }
1084 }
1085 if (req->nmdesc)
1086 srp_fr_pool_put(ch->fr_pool, req->fr_list,
1087 req->nmdesc);
1088 } else if (dev->use_fmr) {
1089 struct ib_pool_fmr **pfmr;
1090
1091 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1092 ib_fmr_pool_unmap(*pfmr);
1093 }
1094
1095 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1096 scmnd->sc_data_direction);
1097 }
1098
1099 /**
1100 * srp_claim_req - Take ownership of the scmnd associated with a request.
1101 * @ch: SRP RDMA channel.
1102 * @req: SRP request.
1103 * @sdev: If not NULL, only take ownership for this SCSI device.
1104 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1105 * ownership of @req->scmnd if it equals @scmnd.
1106 *
1107 * Return value:
1108 * Either NULL or a pointer to the SCSI command the caller became owner of.
1109 */
1110 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1111 struct srp_request *req,
1112 struct scsi_device *sdev,
1113 struct scsi_cmnd *scmnd)
1114 {
1115 unsigned long flags;
1116
1117 spin_lock_irqsave(&ch->lock, flags);
1118 if (req->scmnd &&
1119 (!sdev || req->scmnd->device == sdev) &&
1120 (!scmnd || req->scmnd == scmnd)) {
1121 scmnd = req->scmnd;
1122 req->scmnd = NULL;
1123 } else {
1124 scmnd = NULL;
1125 }
1126 spin_unlock_irqrestore(&ch->lock, flags);
1127
1128 return scmnd;
1129 }
1130
1131 /**
1132 * srp_free_req() - Unmap data and add request to the free request list.
1133 * @ch: SRP RDMA channel.
1134 * @req: Request to be freed.
1135 * @scmnd: SCSI command associated with @req.
1136 * @req_lim_delta: Amount to be added to @target->req_lim.
1137 */
1138 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1139 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1140 {
1141 unsigned long flags;
1142
1143 srp_unmap_data(scmnd, ch, req);
1144
1145 spin_lock_irqsave(&ch->lock, flags);
1146 ch->req_lim += req_lim_delta;
1147 spin_unlock_irqrestore(&ch->lock, flags);
1148 }
1149
1150 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1151 struct scsi_device *sdev, int result)
1152 {
1153 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1154
1155 if (scmnd) {
1156 srp_free_req(ch, req, scmnd, 0);
1157 scmnd->result = result;
1158 scmnd->scsi_done(scmnd);
1159 }
1160 }
1161
1162 static void srp_terminate_io(struct srp_rport *rport)
1163 {
1164 struct srp_target_port *target = rport->lld_data;
1165 struct srp_rdma_ch *ch;
1166 struct Scsi_Host *shost = target->scsi_host;
1167 struct scsi_device *sdev;
1168 int i, j;
1169
1170 /*
1171 * Invoking srp_terminate_io() while srp_queuecommand() is running
1172 * is not safe. Hence the warning statement below.
1173 */
1174 shost_for_each_device(sdev, shost)
1175 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1176
1177 for (i = 0; i < target->ch_count; i++) {
1178 ch = &target->ch[i];
1179
1180 for (j = 0; j < target->req_ring_size; ++j) {
1181 struct srp_request *req = &ch->req_ring[j];
1182
1183 srp_finish_req(ch, req, NULL,
1184 DID_TRANSPORT_FAILFAST << 16);
1185 }
1186 }
1187 }
1188
1189 /*
1190 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1191 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1192 * srp_reset_device() or srp_reset_host() calls will occur while this function
1193 * is in progress. One way to realize that is not to call this function
1194 * directly but to call srp_reconnect_rport() instead since that last function
1195 * serializes calls of this function via rport->mutex and also blocks
1196 * srp_queuecommand() calls before invoking this function.
1197 */
1198 static int srp_rport_reconnect(struct srp_rport *rport)
1199 {
1200 struct srp_target_port *target = rport->lld_data;
1201 struct srp_rdma_ch *ch;
1202 int i, j, ret = 0;
1203 bool multich = false;
1204
1205 srp_disconnect_target(target);
1206
1207 if (target->state == SRP_TARGET_SCANNING)
1208 return -ENODEV;
1209
1210 /*
1211 * Now get a new local CM ID so that we avoid confusing the target in
1212 * case things are really fouled up. Doing so also ensures that all CM
1213 * callbacks will have finished before a new QP is allocated.
1214 */
1215 for (i = 0; i < target->ch_count; i++) {
1216 ch = &target->ch[i];
1217 ret += srp_new_cm_id(ch);
1218 }
1219 for (i = 0; i < target->ch_count; i++) {
1220 ch = &target->ch[i];
1221 for (j = 0; j < target->req_ring_size; ++j) {
1222 struct srp_request *req = &ch->req_ring[j];
1223
1224 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1225 }
1226 }
1227 for (i = 0; i < target->ch_count; i++) {
1228 ch = &target->ch[i];
1229 /*
1230 * Whether or not creating a new CM ID succeeded, create a new
1231 * QP. This guarantees that all completion callback function
1232 * invocations have finished before request resetting starts.
1233 */
1234 ret += srp_create_ch_ib(ch);
1235
1236 INIT_LIST_HEAD(&ch->free_tx);
1237 for (j = 0; j < target->queue_size; ++j)
1238 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1239 }
1240
1241 target->qp_in_error = false;
1242
1243 for (i = 0; i < target->ch_count; i++) {
1244 ch = &target->ch[i];
1245 if (ret)
1246 break;
1247 ret = srp_connect_ch(ch, multich);
1248 multich = true;
1249 }
1250
1251 if (ret == 0)
1252 shost_printk(KERN_INFO, target->scsi_host,
1253 PFX "reconnect succeeded\n");
1254
1255 return ret;
1256 }
1257
1258 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1259 unsigned int dma_len, u32 rkey)
1260 {
1261 struct srp_direct_buf *desc = state->desc;
1262
1263 WARN_ON_ONCE(!dma_len);
1264
1265 desc->va = cpu_to_be64(dma_addr);
1266 desc->key = cpu_to_be32(rkey);
1267 desc->len = cpu_to_be32(dma_len);
1268
1269 state->total_len += dma_len;
1270 state->desc++;
1271 state->ndesc++;
1272 }
1273
1274 static int srp_map_finish_fmr(struct srp_map_state *state,
1275 struct srp_rdma_ch *ch)
1276 {
1277 struct srp_target_port *target = ch->target;
1278 struct srp_device *dev = target->srp_host->srp_dev;
1279 struct ib_pool_fmr *fmr;
1280 u64 io_addr = 0;
1281
1282 if (state->fmr.next >= state->fmr.end)
1283 return -ENOMEM;
1284
1285 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
1286 state->npages, io_addr);
1287 if (IS_ERR(fmr))
1288 return PTR_ERR(fmr);
1289
1290 *state->fmr.next++ = fmr;
1291 state->nmdesc++;
1292
1293 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1294 state->dma_len, fmr->fmr->rkey);
1295
1296 return 0;
1297 }
1298
1299 static int srp_map_finish_fr(struct srp_map_state *state,
1300 struct srp_rdma_ch *ch)
1301 {
1302 struct srp_target_port *target = ch->target;
1303 struct srp_device *dev = target->srp_host->srp_dev;
1304 struct ib_send_wr *bad_wr;
1305 struct ib_send_wr wr;
1306 struct srp_fr_desc *desc;
1307 u32 rkey;
1308
1309 if (state->fr.next >= state->fr.end)
1310 return -ENOMEM;
1311
1312 desc = srp_fr_pool_get(ch->fr_pool);
1313 if (!desc)
1314 return -ENOMEM;
1315
1316 rkey = ib_inc_rkey(desc->mr->rkey);
1317 ib_update_fast_reg_key(desc->mr, rkey);
1318
1319 memcpy(desc->frpl->page_list, state->pages,
1320 sizeof(state->pages[0]) * state->npages);
1321
1322 memset(&wr, 0, sizeof(wr));
1323 wr.opcode = IB_WR_FAST_REG_MR;
1324 wr.wr_id = FAST_REG_WR_ID_MASK;
1325 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1326 wr.wr.fast_reg.page_list = desc->frpl;
1327 wr.wr.fast_reg.page_list_len = state->npages;
1328 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1329 wr.wr.fast_reg.length = state->dma_len;
1330 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1331 IB_ACCESS_REMOTE_READ |
1332 IB_ACCESS_REMOTE_WRITE);
1333 wr.wr.fast_reg.rkey = desc->mr->lkey;
1334
1335 *state->fr.next++ = desc;
1336 state->nmdesc++;
1337
1338 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1339 desc->mr->rkey);
1340
1341 return ib_post_send(ch->qp, &wr, &bad_wr);
1342 }
1343
1344 static int srp_finish_mapping(struct srp_map_state *state,
1345 struct srp_rdma_ch *ch)
1346 {
1347 struct srp_target_port *target = ch->target;
1348 struct srp_device *dev = target->srp_host->srp_dev;
1349 int ret = 0;
1350
1351 WARN_ON_ONCE(!dev->use_fast_reg && !dev->use_fmr);
1352
1353 if (state->npages == 0)
1354 return 0;
1355
1356 if (state->npages == 1 && !register_always)
1357 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1358 target->rkey);
1359 else
1360 ret = dev->use_fast_reg ? srp_map_finish_fr(state, ch) :
1361 srp_map_finish_fmr(state, ch);
1362
1363 if (ret == 0) {
1364 state->npages = 0;
1365 state->dma_len = 0;
1366 }
1367
1368 return ret;
1369 }
1370
1371 static int srp_map_sg_entry(struct srp_map_state *state,
1372 struct srp_rdma_ch *ch,
1373 struct scatterlist *sg, int sg_index)
1374 {
1375 struct srp_target_port *target = ch->target;
1376 struct srp_device *dev = target->srp_host->srp_dev;
1377 struct ib_device *ibdev = dev->dev;
1378 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1379 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1380 unsigned int len = 0;
1381 int ret;
1382
1383 WARN_ON_ONCE(!dma_len);
1384
1385 while (dma_len) {
1386 unsigned offset = dma_addr & ~dev->mr_page_mask;
1387 if (state->npages == dev->max_pages_per_mr || offset != 0) {
1388 ret = srp_finish_mapping(state, ch);
1389 if (ret)
1390 return ret;
1391 }
1392
1393 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
1394
1395 if (!state->npages)
1396 state->base_dma_addr = dma_addr;
1397 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
1398 state->dma_len += len;
1399 dma_addr += len;
1400 dma_len -= len;
1401 }
1402
1403 /*
1404 * If the last entry of the MR wasn't a full page, then we need to
1405 * close it out and start a new one -- we can only merge at page
1406 * boundries.
1407 */
1408 ret = 0;
1409 if (len != dev->mr_page_size)
1410 ret = srp_finish_mapping(state, ch);
1411 return ret;
1412 }
1413
1414 static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1415 struct srp_request *req, struct scatterlist *scat,
1416 int count)
1417 {
1418 struct srp_target_port *target = ch->target;
1419 struct srp_device *dev = target->srp_host->srp_dev;
1420 struct scatterlist *sg;
1421 int i, ret;
1422
1423 state->desc = req->indirect_desc;
1424 state->pages = req->map_page;
1425 if (dev->use_fast_reg) {
1426 state->fr.next = req->fr_list;
1427 state->fr.end = req->fr_list + target->cmd_sg_cnt;
1428 } else if (dev->use_fmr) {
1429 state->fmr.next = req->fmr_list;
1430 state->fmr.end = req->fmr_list + target->cmd_sg_cnt;
1431 }
1432
1433 if (dev->use_fast_reg || dev->use_fmr) {
1434 for_each_sg(scat, sg, count, i) {
1435 ret = srp_map_sg_entry(state, ch, sg, i);
1436 if (ret)
1437 goto out;
1438 }
1439 ret = srp_finish_mapping(state, ch);
1440 if (ret)
1441 goto out;
1442 } else {
1443 for_each_sg(scat, sg, count, i) {
1444 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1445 ib_sg_dma_len(dev->dev, sg), target->rkey);
1446 }
1447 }
1448
1449 req->nmdesc = state->nmdesc;
1450 ret = 0;
1451
1452 out:
1453 return ret;
1454 }
1455
1456 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1457 struct srp_request *req)
1458 {
1459 struct srp_target_port *target = ch->target;
1460 struct scatterlist *scat;
1461 struct srp_cmd *cmd = req->cmd->buf;
1462 int len, nents, count;
1463 struct srp_device *dev;
1464 struct ib_device *ibdev;
1465 struct srp_map_state state;
1466 struct srp_indirect_buf *indirect_hdr;
1467 u32 table_len;
1468 u8 fmt;
1469
1470 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1471 return sizeof (struct srp_cmd);
1472
1473 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1474 scmnd->sc_data_direction != DMA_TO_DEVICE) {
1475 shost_printk(KERN_WARNING, target->scsi_host,
1476 PFX "Unhandled data direction %d\n",
1477 scmnd->sc_data_direction);
1478 return -EINVAL;
1479 }
1480
1481 nents = scsi_sg_count(scmnd);
1482 scat = scsi_sglist(scmnd);
1483
1484 dev = target->srp_host->srp_dev;
1485 ibdev = dev->dev;
1486
1487 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1488 if (unlikely(count == 0))
1489 return -EIO;
1490
1491 fmt = SRP_DATA_DESC_DIRECT;
1492 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
1493
1494 if (count == 1 && !register_always) {
1495 /*
1496 * The midlayer only generated a single gather/scatter
1497 * entry, or DMA mapping coalesced everything to a
1498 * single entry. So a direct descriptor along with
1499 * the DMA MR suffices.
1500 */
1501 struct srp_direct_buf *buf = (void *) cmd->add_data;
1502
1503 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1504 buf->key = cpu_to_be32(target->rkey);
1505 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1506
1507 req->nmdesc = 0;
1508 goto map_complete;
1509 }
1510
1511 /*
1512 * We have more than one scatter/gather entry, so build our indirect
1513 * descriptor table, trying to merge as many entries as we can.
1514 */
1515 indirect_hdr = (void *) cmd->add_data;
1516
1517 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1518 target->indirect_size, DMA_TO_DEVICE);
1519
1520 memset(&state, 0, sizeof(state));
1521 srp_map_sg(&state, ch, req, scat, count);
1522
1523 /* We've mapped the request, now pull as much of the indirect
1524 * descriptor table as we can into the command buffer. If this
1525 * target is not using an external indirect table, we are
1526 * guaranteed to fit into the command, as the SCSI layer won't
1527 * give us more S/G entries than we allow.
1528 */
1529 if (state.ndesc == 1) {
1530 /*
1531 * Memory registration collapsed the sg-list into one entry,
1532 * so use a direct descriptor.
1533 */
1534 struct srp_direct_buf *buf = (void *) cmd->add_data;
1535
1536 *buf = req->indirect_desc[0];
1537 goto map_complete;
1538 }
1539
1540 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1541 !target->allow_ext_sg)) {
1542 shost_printk(KERN_ERR, target->scsi_host,
1543 "Could not fit S/G list into SRP_CMD\n");
1544 return -EIO;
1545 }
1546
1547 count = min(state.ndesc, target->cmd_sg_cnt);
1548 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1549
1550 fmt = SRP_DATA_DESC_INDIRECT;
1551 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1552 len += count * sizeof (struct srp_direct_buf);
1553
1554 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1555 count * sizeof (struct srp_direct_buf));
1556
1557 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1558 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1559 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1560 indirect_hdr->len = cpu_to_be32(state.total_len);
1561
1562 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1563 cmd->data_out_desc_cnt = count;
1564 else
1565 cmd->data_in_desc_cnt = count;
1566
1567 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1568 DMA_TO_DEVICE);
1569
1570 map_complete:
1571 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1572 cmd->buf_fmt = fmt << 4;
1573 else
1574 cmd->buf_fmt = fmt;
1575
1576 return len;
1577 }
1578
1579 /*
1580 * Return an IU and possible credit to the free pool
1581 */
1582 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1583 enum srp_iu_type iu_type)
1584 {
1585 unsigned long flags;
1586
1587 spin_lock_irqsave(&ch->lock, flags);
1588 list_add(&iu->list, &ch->free_tx);
1589 if (iu_type != SRP_IU_RSP)
1590 ++ch->req_lim;
1591 spin_unlock_irqrestore(&ch->lock, flags);
1592 }
1593
1594 /*
1595 * Must be called with ch->lock held to protect req_lim and free_tx.
1596 * If IU is not sent, it must be returned using srp_put_tx_iu().
1597 *
1598 * Note:
1599 * An upper limit for the number of allocated information units for each
1600 * request type is:
1601 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1602 * more than Scsi_Host.can_queue requests.
1603 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1604 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1605 * one unanswered SRP request to an initiator.
1606 */
1607 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1608 enum srp_iu_type iu_type)
1609 {
1610 struct srp_target_port *target = ch->target;
1611 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1612 struct srp_iu *iu;
1613
1614 srp_send_completion(ch->send_cq, ch);
1615
1616 if (list_empty(&ch->free_tx))
1617 return NULL;
1618
1619 /* Initiator responses to target requests do not consume credits */
1620 if (iu_type != SRP_IU_RSP) {
1621 if (ch->req_lim <= rsv) {
1622 ++target->zero_req_lim;
1623 return NULL;
1624 }
1625
1626 --ch->req_lim;
1627 }
1628
1629 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1630 list_del(&iu->list);
1631 return iu;
1632 }
1633
1634 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1635 {
1636 struct srp_target_port *target = ch->target;
1637 struct ib_sge list;
1638 struct ib_send_wr wr, *bad_wr;
1639
1640 list.addr = iu->dma;
1641 list.length = len;
1642 list.lkey = target->lkey;
1643
1644 wr.next = NULL;
1645 wr.wr_id = (uintptr_t) iu;
1646 wr.sg_list = &list;
1647 wr.num_sge = 1;
1648 wr.opcode = IB_WR_SEND;
1649 wr.send_flags = IB_SEND_SIGNALED;
1650
1651 return ib_post_send(ch->qp, &wr, &bad_wr);
1652 }
1653
1654 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1655 {
1656 struct srp_target_port *target = ch->target;
1657 struct ib_recv_wr wr, *bad_wr;
1658 struct ib_sge list;
1659
1660 list.addr = iu->dma;
1661 list.length = iu->size;
1662 list.lkey = target->lkey;
1663
1664 wr.next = NULL;
1665 wr.wr_id = (uintptr_t) iu;
1666 wr.sg_list = &list;
1667 wr.num_sge = 1;
1668
1669 return ib_post_recv(ch->qp, &wr, &bad_wr);
1670 }
1671
1672 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1673 {
1674 struct srp_target_port *target = ch->target;
1675 struct srp_request *req;
1676 struct scsi_cmnd *scmnd;
1677 unsigned long flags;
1678
1679 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1680 spin_lock_irqsave(&ch->lock, flags);
1681 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1682 spin_unlock_irqrestore(&ch->lock, flags);
1683
1684 ch->tsk_mgmt_status = -1;
1685 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1686 ch->tsk_mgmt_status = rsp->data[3];
1687 complete(&ch->tsk_mgmt_done);
1688 } else {
1689 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1690 if (scmnd) {
1691 req = (void *)scmnd->host_scribble;
1692 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1693 }
1694 if (!scmnd) {
1695 shost_printk(KERN_ERR, target->scsi_host,
1696 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1697 rsp->tag, ch - target->ch, ch->qp->qp_num);
1698
1699 spin_lock_irqsave(&ch->lock, flags);
1700 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1701 spin_unlock_irqrestore(&ch->lock, flags);
1702
1703 return;
1704 }
1705 scmnd->result = rsp->status;
1706
1707 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1708 memcpy(scmnd->sense_buffer, rsp->data +
1709 be32_to_cpu(rsp->resp_data_len),
1710 min_t(int, be32_to_cpu(rsp->sense_data_len),
1711 SCSI_SENSE_BUFFERSIZE));
1712 }
1713
1714 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1715 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1716 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1717 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1718 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1719 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1720 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1721 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1722
1723 srp_free_req(ch, req, scmnd,
1724 be32_to_cpu(rsp->req_lim_delta));
1725
1726 scmnd->host_scribble = NULL;
1727 scmnd->scsi_done(scmnd);
1728 }
1729 }
1730
1731 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1732 void *rsp, int len)
1733 {
1734 struct srp_target_port *target = ch->target;
1735 struct ib_device *dev = target->srp_host->srp_dev->dev;
1736 unsigned long flags;
1737 struct srp_iu *iu;
1738 int err;
1739
1740 spin_lock_irqsave(&ch->lock, flags);
1741 ch->req_lim += req_delta;
1742 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1743 spin_unlock_irqrestore(&ch->lock, flags);
1744
1745 if (!iu) {
1746 shost_printk(KERN_ERR, target->scsi_host, PFX
1747 "no IU available to send response\n");
1748 return 1;
1749 }
1750
1751 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1752 memcpy(iu->buf, rsp, len);
1753 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1754
1755 err = srp_post_send(ch, iu, len);
1756 if (err) {
1757 shost_printk(KERN_ERR, target->scsi_host, PFX
1758 "unable to post response: %d\n", err);
1759 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
1760 }
1761
1762 return err;
1763 }
1764
1765 static void srp_process_cred_req(struct srp_rdma_ch *ch,
1766 struct srp_cred_req *req)
1767 {
1768 struct srp_cred_rsp rsp = {
1769 .opcode = SRP_CRED_RSP,
1770 .tag = req->tag,
1771 };
1772 s32 delta = be32_to_cpu(req->req_lim_delta);
1773
1774 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1775 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
1776 "problems processing SRP_CRED_REQ\n");
1777 }
1778
1779 static void srp_process_aer_req(struct srp_rdma_ch *ch,
1780 struct srp_aer_req *req)
1781 {
1782 struct srp_target_port *target = ch->target;
1783 struct srp_aer_rsp rsp = {
1784 .opcode = SRP_AER_RSP,
1785 .tag = req->tag,
1786 };
1787 s32 delta = be32_to_cpu(req->req_lim_delta);
1788
1789 shost_printk(KERN_ERR, target->scsi_host, PFX
1790 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
1791
1792 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1793 shost_printk(KERN_ERR, target->scsi_host, PFX
1794 "problems processing SRP_AER_REQ\n");
1795 }
1796
1797 static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
1798 {
1799 struct srp_target_port *target = ch->target;
1800 struct ib_device *dev = target->srp_host->srp_dev->dev;
1801 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1802 int res;
1803 u8 opcode;
1804
1805 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
1806 DMA_FROM_DEVICE);
1807
1808 opcode = *(u8 *) iu->buf;
1809
1810 if (0) {
1811 shost_printk(KERN_ERR, target->scsi_host,
1812 PFX "recv completion, opcode 0x%02x\n", opcode);
1813 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1814 iu->buf, wc->byte_len, true);
1815 }
1816
1817 switch (opcode) {
1818 case SRP_RSP:
1819 srp_process_rsp(ch, iu->buf);
1820 break;
1821
1822 case SRP_CRED_REQ:
1823 srp_process_cred_req(ch, iu->buf);
1824 break;
1825
1826 case SRP_AER_REQ:
1827 srp_process_aer_req(ch, iu->buf);
1828 break;
1829
1830 case SRP_T_LOGOUT:
1831 /* XXX Handle target logout */
1832 shost_printk(KERN_WARNING, target->scsi_host,
1833 PFX "Got target logout request\n");
1834 break;
1835
1836 default:
1837 shost_printk(KERN_WARNING, target->scsi_host,
1838 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1839 break;
1840 }
1841
1842 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
1843 DMA_FROM_DEVICE);
1844
1845 res = srp_post_recv(ch, iu);
1846 if (res != 0)
1847 shost_printk(KERN_ERR, target->scsi_host,
1848 PFX "Recv failed with error code %d\n", res);
1849 }
1850
1851 /**
1852 * srp_tl_err_work() - handle a transport layer error
1853 * @work: Work structure embedded in an SRP target port.
1854 *
1855 * Note: This function may get invoked before the rport has been created,
1856 * hence the target->rport test.
1857 */
1858 static void srp_tl_err_work(struct work_struct *work)
1859 {
1860 struct srp_target_port *target;
1861
1862 target = container_of(work, struct srp_target_port, tl_err_work);
1863 if (target->rport)
1864 srp_start_tl_fail_timers(target->rport);
1865 }
1866
1867 static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
1868 bool send_err, struct srp_rdma_ch *ch)
1869 {
1870 struct srp_target_port *target = ch->target;
1871
1872 if (wr_id == SRP_LAST_WR_ID) {
1873 complete(&ch->done);
1874 return;
1875 }
1876
1877 if (ch->connected && !target->qp_in_error) {
1878 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1879 shost_printk(KERN_ERR, target->scsi_host, PFX
1880 "LOCAL_INV failed with status %s (%d)\n",
1881 ib_wc_status_msg(wc_status), wc_status);
1882 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1883 shost_printk(KERN_ERR, target->scsi_host, PFX
1884 "FAST_REG_MR failed status %s (%d)\n",
1885 ib_wc_status_msg(wc_status), wc_status);
1886 } else {
1887 shost_printk(KERN_ERR, target->scsi_host,
1888 PFX "failed %s status %s (%d) for iu %p\n",
1889 send_err ? "send" : "receive",
1890 ib_wc_status_msg(wc_status), wc_status,
1891 (void *)(uintptr_t)wr_id);
1892 }
1893 queue_work(system_long_wq, &target->tl_err_work);
1894 }
1895 target->qp_in_error = true;
1896 }
1897
1898 static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
1899 {
1900 struct srp_rdma_ch *ch = ch_ptr;
1901 struct ib_wc wc;
1902
1903 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1904 while (ib_poll_cq(cq, 1, &wc) > 0) {
1905 if (likely(wc.status == IB_WC_SUCCESS)) {
1906 srp_handle_recv(ch, &wc);
1907 } else {
1908 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
1909 }
1910 }
1911 }
1912
1913 static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
1914 {
1915 struct srp_rdma_ch *ch = ch_ptr;
1916 struct ib_wc wc;
1917 struct srp_iu *iu;
1918
1919 while (ib_poll_cq(cq, 1, &wc) > 0) {
1920 if (likely(wc.status == IB_WC_SUCCESS)) {
1921 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1922 list_add(&iu->list, &ch->free_tx);
1923 } else {
1924 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
1925 }
1926 }
1927 }
1928
1929 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1930 {
1931 struct srp_target_port *target = host_to_target(shost);
1932 struct srp_rport *rport = target->rport;
1933 struct srp_rdma_ch *ch;
1934 struct srp_request *req;
1935 struct srp_iu *iu;
1936 struct srp_cmd *cmd;
1937 struct ib_device *dev;
1938 unsigned long flags;
1939 u32 tag;
1940 u16 idx;
1941 int len, ret;
1942 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1943
1944 /*
1945 * The SCSI EH thread is the only context from which srp_queuecommand()
1946 * can get invoked for blocked devices (SDEV_BLOCK /
1947 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1948 * locking the rport mutex if invoked from inside the SCSI EH.
1949 */
1950 if (in_scsi_eh)
1951 mutex_lock(&rport->mutex);
1952
1953 scmnd->result = srp_chkready(target->rport);
1954 if (unlikely(scmnd->result))
1955 goto err;
1956
1957 WARN_ON_ONCE(scmnd->request->tag < 0);
1958 tag = blk_mq_unique_tag(scmnd->request);
1959 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
1960 idx = blk_mq_unique_tag_to_tag(tag);
1961 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
1962 dev_name(&shost->shost_gendev), tag, idx,
1963 target->req_ring_size);
1964
1965 spin_lock_irqsave(&ch->lock, flags);
1966 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
1967 spin_unlock_irqrestore(&ch->lock, flags);
1968
1969 if (!iu)
1970 goto err;
1971
1972 req = &ch->req_ring[idx];
1973 dev = target->srp_host->srp_dev->dev;
1974 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
1975 DMA_TO_DEVICE);
1976
1977 scmnd->host_scribble = (void *) req;
1978
1979 cmd = iu->buf;
1980 memset(cmd, 0, sizeof *cmd);
1981
1982 cmd->opcode = SRP_CMD;
1983 int_to_scsilun(scmnd->device->lun, &cmd->lun);
1984 cmd->tag = tag;
1985 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1986
1987 req->scmnd = scmnd;
1988 req->cmd = iu;
1989
1990 len = srp_map_data(scmnd, ch, req);
1991 if (len < 0) {
1992 shost_printk(KERN_ERR, target->scsi_host,
1993 PFX "Failed to map data (%d)\n", len);
1994 /*
1995 * If we ran out of memory descriptors (-ENOMEM) because an
1996 * application is queuing many requests with more than
1997 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
1998 * to reduce queue depth temporarily.
1999 */
2000 scmnd->result = len == -ENOMEM ?
2001 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2002 goto err_iu;
2003 }
2004
2005 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
2006 DMA_TO_DEVICE);
2007
2008 if (srp_post_send(ch, iu, len)) {
2009 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2010 goto err_unmap;
2011 }
2012
2013 ret = 0;
2014
2015 unlock_rport:
2016 if (in_scsi_eh)
2017 mutex_unlock(&rport->mutex);
2018
2019 return ret;
2020
2021 err_unmap:
2022 srp_unmap_data(scmnd, ch, req);
2023
2024 err_iu:
2025 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2026
2027 /*
2028 * Avoid that the loops that iterate over the request ring can
2029 * encounter a dangling SCSI command pointer.
2030 */
2031 req->scmnd = NULL;
2032
2033 err:
2034 if (scmnd->result) {
2035 scmnd->scsi_done(scmnd);
2036 ret = 0;
2037 } else {
2038 ret = SCSI_MLQUEUE_HOST_BUSY;
2039 }
2040
2041 goto unlock_rport;
2042 }
2043
2044 /*
2045 * Note: the resources allocated in this function are freed in
2046 * srp_free_ch_ib().
2047 */
2048 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2049 {
2050 struct srp_target_port *target = ch->target;
2051 int i;
2052
2053 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2054 GFP_KERNEL);
2055 if (!ch->rx_ring)
2056 goto err_no_ring;
2057 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2058 GFP_KERNEL);
2059 if (!ch->tx_ring)
2060 goto err_no_ring;
2061
2062 for (i = 0; i < target->queue_size; ++i) {
2063 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2064 ch->max_ti_iu_len,
2065 GFP_KERNEL, DMA_FROM_DEVICE);
2066 if (!ch->rx_ring[i])
2067 goto err;
2068 }
2069
2070 for (i = 0; i < target->queue_size; ++i) {
2071 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2072 target->max_iu_len,
2073 GFP_KERNEL, DMA_TO_DEVICE);
2074 if (!ch->tx_ring[i])
2075 goto err;
2076
2077 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2078 }
2079
2080 return 0;
2081
2082 err:
2083 for (i = 0; i < target->queue_size; ++i) {
2084 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2085 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2086 }
2087
2088
2089 err_no_ring:
2090 kfree(ch->tx_ring);
2091 ch->tx_ring = NULL;
2092 kfree(ch->rx_ring);
2093 ch->rx_ring = NULL;
2094
2095 return -ENOMEM;
2096 }
2097
2098 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2099 {
2100 uint64_t T_tr_ns, max_compl_time_ms;
2101 uint32_t rq_tmo_jiffies;
2102
2103 /*
2104 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2105 * table 91), both the QP timeout and the retry count have to be set
2106 * for RC QP's during the RTR to RTS transition.
2107 */
2108 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2109 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2110
2111 /*
2112 * Set target->rq_tmo_jiffies to one second more than the largest time
2113 * it can take before an error completion is generated. See also
2114 * C9-140..142 in the IBTA spec for more information about how to
2115 * convert the QP Local ACK Timeout value to nanoseconds.
2116 */
2117 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2118 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2119 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2120 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2121
2122 return rq_tmo_jiffies;
2123 }
2124
2125 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2126 const struct srp_login_rsp *lrsp,
2127 struct srp_rdma_ch *ch)
2128 {
2129 struct srp_target_port *target = ch->target;
2130 struct ib_qp_attr *qp_attr = NULL;
2131 int attr_mask = 0;
2132 int ret;
2133 int i;
2134
2135 if (lrsp->opcode == SRP_LOGIN_RSP) {
2136 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2137 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2138
2139 /*
2140 * Reserve credits for task management so we don't
2141 * bounce requests back to the SCSI mid-layer.
2142 */
2143 target->scsi_host->can_queue
2144 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2145 target->scsi_host->can_queue);
2146 target->scsi_host->cmd_per_lun
2147 = min_t(int, target->scsi_host->can_queue,
2148 target->scsi_host->cmd_per_lun);
2149 } else {
2150 shost_printk(KERN_WARNING, target->scsi_host,
2151 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2152 ret = -ECONNRESET;
2153 goto error;
2154 }
2155
2156 if (!ch->rx_ring) {
2157 ret = srp_alloc_iu_bufs(ch);
2158 if (ret)
2159 goto error;
2160 }
2161
2162 ret = -ENOMEM;
2163 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2164 if (!qp_attr)
2165 goto error;
2166
2167 qp_attr->qp_state = IB_QPS_RTR;
2168 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2169 if (ret)
2170 goto error_free;
2171
2172 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2173 if (ret)
2174 goto error_free;
2175
2176 for (i = 0; i < target->queue_size; i++) {
2177 struct srp_iu *iu = ch->rx_ring[i];
2178
2179 ret = srp_post_recv(ch, iu);
2180 if (ret)
2181 goto error_free;
2182 }
2183
2184 qp_attr->qp_state = IB_QPS_RTS;
2185 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2186 if (ret)
2187 goto error_free;
2188
2189 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2190
2191 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2192 if (ret)
2193 goto error_free;
2194
2195 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2196
2197 error_free:
2198 kfree(qp_attr);
2199
2200 error:
2201 ch->status = ret;
2202 }
2203
2204 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2205 struct ib_cm_event *event,
2206 struct srp_rdma_ch *ch)
2207 {
2208 struct srp_target_port *target = ch->target;
2209 struct Scsi_Host *shost = target->scsi_host;
2210 struct ib_class_port_info *cpi;
2211 int opcode;
2212
2213 switch (event->param.rej_rcvd.reason) {
2214 case IB_CM_REJ_PORT_CM_REDIRECT:
2215 cpi = event->param.rej_rcvd.ari;
2216 ch->path.dlid = cpi->redirect_lid;
2217 ch->path.pkey = cpi->redirect_pkey;
2218 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2219 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2220
2221 ch->status = ch->path.dlid ?
2222 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2223 break;
2224
2225 case IB_CM_REJ_PORT_REDIRECT:
2226 if (srp_target_is_topspin(target)) {
2227 /*
2228 * Topspin/Cisco SRP gateways incorrectly send
2229 * reject reason code 25 when they mean 24
2230 * (port redirect).
2231 */
2232 memcpy(ch->path.dgid.raw,
2233 event->param.rej_rcvd.ari, 16);
2234
2235 shost_printk(KERN_DEBUG, shost,
2236 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2237 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2238 be64_to_cpu(ch->path.dgid.global.interface_id));
2239
2240 ch->status = SRP_PORT_REDIRECT;
2241 } else {
2242 shost_printk(KERN_WARNING, shost,
2243 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2244 ch->status = -ECONNRESET;
2245 }
2246 break;
2247
2248 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2249 shost_printk(KERN_WARNING, shost,
2250 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2251 ch->status = -ECONNRESET;
2252 break;
2253
2254 case IB_CM_REJ_CONSUMER_DEFINED:
2255 opcode = *(u8 *) event->private_data;
2256 if (opcode == SRP_LOGIN_REJ) {
2257 struct srp_login_rej *rej = event->private_data;
2258 u32 reason = be32_to_cpu(rej->reason);
2259
2260 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2261 shost_printk(KERN_WARNING, shost,
2262 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2263 else
2264 shost_printk(KERN_WARNING, shost, PFX
2265 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2266 target->sgid.raw,
2267 target->orig_dgid.raw, reason);
2268 } else
2269 shost_printk(KERN_WARNING, shost,
2270 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2271 " opcode 0x%02x\n", opcode);
2272 ch->status = -ECONNRESET;
2273 break;
2274
2275 case IB_CM_REJ_STALE_CONN:
2276 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
2277 ch->status = SRP_STALE_CONN;
2278 break;
2279
2280 default:
2281 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2282 event->param.rej_rcvd.reason);
2283 ch->status = -ECONNRESET;
2284 }
2285 }
2286
2287 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2288 {
2289 struct srp_rdma_ch *ch = cm_id->context;
2290 struct srp_target_port *target = ch->target;
2291 int comp = 0;
2292
2293 switch (event->event) {
2294 case IB_CM_REQ_ERROR:
2295 shost_printk(KERN_DEBUG, target->scsi_host,
2296 PFX "Sending CM REQ failed\n");
2297 comp = 1;
2298 ch->status = -ECONNRESET;
2299 break;
2300
2301 case IB_CM_REP_RECEIVED:
2302 comp = 1;
2303 srp_cm_rep_handler(cm_id, event->private_data, ch);
2304 break;
2305
2306 case IB_CM_REJ_RECEIVED:
2307 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2308 comp = 1;
2309
2310 srp_cm_rej_handler(cm_id, event, ch);
2311 break;
2312
2313 case IB_CM_DREQ_RECEIVED:
2314 shost_printk(KERN_WARNING, target->scsi_host,
2315 PFX "DREQ received - connection closed\n");
2316 ch->connected = false;
2317 if (ib_send_cm_drep(cm_id, NULL, 0))
2318 shost_printk(KERN_ERR, target->scsi_host,
2319 PFX "Sending CM DREP failed\n");
2320 queue_work(system_long_wq, &target->tl_err_work);
2321 break;
2322
2323 case IB_CM_TIMEWAIT_EXIT:
2324 shost_printk(KERN_ERR, target->scsi_host,
2325 PFX "connection closed\n");
2326 comp = 1;
2327
2328 ch->status = 0;
2329 break;
2330
2331 case IB_CM_MRA_RECEIVED:
2332 case IB_CM_DREQ_ERROR:
2333 case IB_CM_DREP_RECEIVED:
2334 break;
2335
2336 default:
2337 shost_printk(KERN_WARNING, target->scsi_host,
2338 PFX "Unhandled CM event %d\n", event->event);
2339 break;
2340 }
2341
2342 if (comp)
2343 complete(&ch->done);
2344
2345 return 0;
2346 }
2347
2348 /**
2349 * srp_change_queue_depth - setting device queue depth
2350 * @sdev: scsi device struct
2351 * @qdepth: requested queue depth
2352 *
2353 * Returns queue depth.
2354 */
2355 static int
2356 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2357 {
2358 if (!sdev->tagged_supported)
2359 qdepth = 1;
2360 return scsi_change_queue_depth(sdev, qdepth);
2361 }
2362
2363 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2364 u8 func)
2365 {
2366 struct srp_target_port *target = ch->target;
2367 struct srp_rport *rport = target->rport;
2368 struct ib_device *dev = target->srp_host->srp_dev->dev;
2369 struct srp_iu *iu;
2370 struct srp_tsk_mgmt *tsk_mgmt;
2371
2372 if (!ch->connected || target->qp_in_error)
2373 return -1;
2374
2375 init_completion(&ch->tsk_mgmt_done);
2376
2377 /*
2378 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2379 * invoked while a task management function is being sent.
2380 */
2381 mutex_lock(&rport->mutex);
2382 spin_lock_irq(&ch->lock);
2383 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2384 spin_unlock_irq(&ch->lock);
2385
2386 if (!iu) {
2387 mutex_unlock(&rport->mutex);
2388
2389 return -1;
2390 }
2391
2392 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2393 DMA_TO_DEVICE);
2394 tsk_mgmt = iu->buf;
2395 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2396
2397 tsk_mgmt->opcode = SRP_TSK_MGMT;
2398 int_to_scsilun(lun, &tsk_mgmt->lun);
2399 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
2400 tsk_mgmt->tsk_mgmt_func = func;
2401 tsk_mgmt->task_tag = req_tag;
2402
2403 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2404 DMA_TO_DEVICE);
2405 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2406 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2407 mutex_unlock(&rport->mutex);
2408
2409 return -1;
2410 }
2411 mutex_unlock(&rport->mutex);
2412
2413 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
2414 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2415 return -1;
2416
2417 return 0;
2418 }
2419
2420 static int srp_abort(struct scsi_cmnd *scmnd)
2421 {
2422 struct srp_target_port *target = host_to_target(scmnd->device->host);
2423 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2424 u32 tag;
2425 u16 ch_idx;
2426 struct srp_rdma_ch *ch;
2427 int ret;
2428
2429 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2430
2431 if (!req)
2432 return SUCCESS;
2433 tag = blk_mq_unique_tag(scmnd->request);
2434 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2435 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2436 return SUCCESS;
2437 ch = &target->ch[ch_idx];
2438 if (!srp_claim_req(ch, req, NULL, scmnd))
2439 return SUCCESS;
2440 shost_printk(KERN_ERR, target->scsi_host,
2441 "Sending SRP abort for tag %#x\n", tag);
2442 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2443 SRP_TSK_ABORT_TASK) == 0)
2444 ret = SUCCESS;
2445 else if (target->rport->state == SRP_RPORT_LOST)
2446 ret = FAST_IO_FAIL;
2447 else
2448 ret = FAILED;
2449 srp_free_req(ch, req, scmnd, 0);
2450 scmnd->result = DID_ABORT << 16;
2451 scmnd->scsi_done(scmnd);
2452
2453 return ret;
2454 }
2455
2456 static int srp_reset_device(struct scsi_cmnd *scmnd)
2457 {
2458 struct srp_target_port *target = host_to_target(scmnd->device->host);
2459 struct srp_rdma_ch *ch;
2460 int i;
2461
2462 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2463
2464 ch = &target->ch[0];
2465 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2466 SRP_TSK_LUN_RESET))
2467 return FAILED;
2468 if (ch->tsk_mgmt_status)
2469 return FAILED;
2470
2471 for (i = 0; i < target->ch_count; i++) {
2472 ch = &target->ch[i];
2473 for (i = 0; i < target->req_ring_size; ++i) {
2474 struct srp_request *req = &ch->req_ring[i];
2475
2476 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2477 }
2478 }
2479
2480 return SUCCESS;
2481 }
2482
2483 static int srp_reset_host(struct scsi_cmnd *scmnd)
2484 {
2485 struct srp_target_port *target = host_to_target(scmnd->device->host);
2486
2487 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2488
2489 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2490 }
2491
2492 static int srp_slave_configure(struct scsi_device *sdev)
2493 {
2494 struct Scsi_Host *shost = sdev->host;
2495 struct srp_target_port *target = host_to_target(shost);
2496 struct request_queue *q = sdev->request_queue;
2497 unsigned long timeout;
2498
2499 if (sdev->type == TYPE_DISK) {
2500 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2501 blk_queue_rq_timeout(q, timeout);
2502 }
2503
2504 return 0;
2505 }
2506
2507 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2508 char *buf)
2509 {
2510 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2511
2512 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2513 }
2514
2515 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2516 char *buf)
2517 {
2518 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2519
2520 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2521 }
2522
2523 static ssize_t show_service_id(struct device *dev,
2524 struct device_attribute *attr, char *buf)
2525 {
2526 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2527
2528 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
2529 }
2530
2531 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2532 char *buf)
2533 {
2534 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2535
2536 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
2537 }
2538
2539 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2540 char *buf)
2541 {
2542 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2543
2544 return sprintf(buf, "%pI6\n", target->sgid.raw);
2545 }
2546
2547 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2548 char *buf)
2549 {
2550 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2551 struct srp_rdma_ch *ch = &target->ch[0];
2552
2553 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
2554 }
2555
2556 static ssize_t show_orig_dgid(struct device *dev,
2557 struct device_attribute *attr, char *buf)
2558 {
2559 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2560
2561 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
2562 }
2563
2564 static ssize_t show_req_lim(struct device *dev,
2565 struct device_attribute *attr, char *buf)
2566 {
2567 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2568 struct srp_rdma_ch *ch;
2569 int i, req_lim = INT_MAX;
2570
2571 for (i = 0; i < target->ch_count; i++) {
2572 ch = &target->ch[i];
2573 req_lim = min(req_lim, ch->req_lim);
2574 }
2575 return sprintf(buf, "%d\n", req_lim);
2576 }
2577
2578 static ssize_t show_zero_req_lim(struct device *dev,
2579 struct device_attribute *attr, char *buf)
2580 {
2581 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2582
2583 return sprintf(buf, "%d\n", target->zero_req_lim);
2584 }
2585
2586 static ssize_t show_local_ib_port(struct device *dev,
2587 struct device_attribute *attr, char *buf)
2588 {
2589 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2590
2591 return sprintf(buf, "%d\n", target->srp_host->port);
2592 }
2593
2594 static ssize_t show_local_ib_device(struct device *dev,
2595 struct device_attribute *attr, char *buf)
2596 {
2597 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2598
2599 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2600 }
2601
2602 static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2603 char *buf)
2604 {
2605 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2606
2607 return sprintf(buf, "%d\n", target->ch_count);
2608 }
2609
2610 static ssize_t show_comp_vector(struct device *dev,
2611 struct device_attribute *attr, char *buf)
2612 {
2613 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2614
2615 return sprintf(buf, "%d\n", target->comp_vector);
2616 }
2617
2618 static ssize_t show_tl_retry_count(struct device *dev,
2619 struct device_attribute *attr, char *buf)
2620 {
2621 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2622
2623 return sprintf(buf, "%d\n", target->tl_retry_count);
2624 }
2625
2626 static ssize_t show_cmd_sg_entries(struct device *dev,
2627 struct device_attribute *attr, char *buf)
2628 {
2629 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2630
2631 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2632 }
2633
2634 static ssize_t show_allow_ext_sg(struct device *dev,
2635 struct device_attribute *attr, char *buf)
2636 {
2637 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2638
2639 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2640 }
2641
2642 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2643 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2644 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2645 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
2646 static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
2647 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2648 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
2649 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
2650 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2651 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2652 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2653 static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
2654 static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
2655 static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
2656 static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
2657 static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
2658
2659 static struct device_attribute *srp_host_attrs[] = {
2660 &dev_attr_id_ext,
2661 &dev_attr_ioc_guid,
2662 &dev_attr_service_id,
2663 &dev_attr_pkey,
2664 &dev_attr_sgid,
2665 &dev_attr_dgid,
2666 &dev_attr_orig_dgid,
2667 &dev_attr_req_lim,
2668 &dev_attr_zero_req_lim,
2669 &dev_attr_local_ib_port,
2670 &dev_attr_local_ib_device,
2671 &dev_attr_ch_count,
2672 &dev_attr_comp_vector,
2673 &dev_attr_tl_retry_count,
2674 &dev_attr_cmd_sg_entries,
2675 &dev_attr_allow_ext_sg,
2676 NULL
2677 };
2678
2679 static struct scsi_host_template srp_template = {
2680 .module = THIS_MODULE,
2681 .name = "InfiniBand SRP initiator",
2682 .proc_name = DRV_NAME,
2683 .slave_configure = srp_slave_configure,
2684 .info = srp_target_info,
2685 .queuecommand = srp_queuecommand,
2686 .change_queue_depth = srp_change_queue_depth,
2687 .eh_abort_handler = srp_abort,
2688 .eh_device_reset_handler = srp_reset_device,
2689 .eh_host_reset_handler = srp_reset_host,
2690 .skip_settle_delay = true,
2691 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
2692 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
2693 .this_id = -1,
2694 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
2695 .use_clustering = ENABLE_CLUSTERING,
2696 .shost_attrs = srp_host_attrs,
2697 .use_blk_tags = 1,
2698 .track_queue_depth = 1,
2699 };
2700
2701 static int srp_sdev_count(struct Scsi_Host *host)
2702 {
2703 struct scsi_device *sdev;
2704 int c = 0;
2705
2706 shost_for_each_device(sdev, host)
2707 c++;
2708
2709 return c;
2710 }
2711
2712 /*
2713 * Return values:
2714 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2715 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2716 * removal has been scheduled.
2717 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2718 */
2719 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2720 {
2721 struct srp_rport_identifiers ids;
2722 struct srp_rport *rport;
2723
2724 target->state = SRP_TARGET_SCANNING;
2725 sprintf(target->target_name, "SRP.T10:%016llX",
2726 be64_to_cpu(target->id_ext));
2727
2728 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2729 return -ENODEV;
2730
2731 memcpy(ids.port_id, &target->id_ext, 8);
2732 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2733 ids.roles = SRP_RPORT_ROLE_TARGET;
2734 rport = srp_rport_add(target->scsi_host, &ids);
2735 if (IS_ERR(rport)) {
2736 scsi_remove_host(target->scsi_host);
2737 return PTR_ERR(rport);
2738 }
2739
2740 rport->lld_data = target;
2741 target->rport = rport;
2742
2743 spin_lock(&host->target_lock);
2744 list_add_tail(&target->list, &host->target_list);
2745 spin_unlock(&host->target_lock);
2746
2747 scsi_scan_target(&target->scsi_host->shost_gendev,
2748 0, target->scsi_id, SCAN_WILD_CARD, 0);
2749
2750 if (srp_connected_ch(target) < target->ch_count ||
2751 target->qp_in_error) {
2752 shost_printk(KERN_INFO, target->scsi_host,
2753 PFX "SCSI scan failed - removing SCSI host\n");
2754 srp_queue_remove_work(target);
2755 goto out;
2756 }
2757
2758 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2759 dev_name(&target->scsi_host->shost_gendev),
2760 srp_sdev_count(target->scsi_host));
2761
2762 spin_lock_irq(&target->lock);
2763 if (target->state == SRP_TARGET_SCANNING)
2764 target->state = SRP_TARGET_LIVE;
2765 spin_unlock_irq(&target->lock);
2766
2767 out:
2768 return 0;
2769 }
2770
2771 static void srp_release_dev(struct device *dev)
2772 {
2773 struct srp_host *host =
2774 container_of(dev, struct srp_host, dev);
2775
2776 complete(&host->released);
2777 }
2778
2779 static struct class srp_class = {
2780 .name = "infiniband_srp",
2781 .dev_release = srp_release_dev
2782 };
2783
2784 /**
2785 * srp_conn_unique() - check whether the connection to a target is unique
2786 * @host: SRP host.
2787 * @target: SRP target port.
2788 */
2789 static bool srp_conn_unique(struct srp_host *host,
2790 struct srp_target_port *target)
2791 {
2792 struct srp_target_port *t;
2793 bool ret = false;
2794
2795 if (target->state == SRP_TARGET_REMOVED)
2796 goto out;
2797
2798 ret = true;
2799
2800 spin_lock(&host->target_lock);
2801 list_for_each_entry(t, &host->target_list, list) {
2802 if (t != target &&
2803 target->id_ext == t->id_ext &&
2804 target->ioc_guid == t->ioc_guid &&
2805 target->initiator_ext == t->initiator_ext) {
2806 ret = false;
2807 break;
2808 }
2809 }
2810 spin_unlock(&host->target_lock);
2811
2812 out:
2813 return ret;
2814 }
2815
2816 /*
2817 * Target ports are added by writing
2818 *
2819 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2820 * pkey=<P_Key>,service_id=<service ID>
2821 *
2822 * to the add_target sysfs attribute.
2823 */
2824 enum {
2825 SRP_OPT_ERR = 0,
2826 SRP_OPT_ID_EXT = 1 << 0,
2827 SRP_OPT_IOC_GUID = 1 << 1,
2828 SRP_OPT_DGID = 1 << 2,
2829 SRP_OPT_PKEY = 1 << 3,
2830 SRP_OPT_SERVICE_ID = 1 << 4,
2831 SRP_OPT_MAX_SECT = 1 << 5,
2832 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
2833 SRP_OPT_IO_CLASS = 1 << 7,
2834 SRP_OPT_INITIATOR_EXT = 1 << 8,
2835 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
2836 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2837 SRP_OPT_SG_TABLESIZE = 1 << 11,
2838 SRP_OPT_COMP_VECTOR = 1 << 12,
2839 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
2840 SRP_OPT_QUEUE_SIZE = 1 << 14,
2841 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2842 SRP_OPT_IOC_GUID |
2843 SRP_OPT_DGID |
2844 SRP_OPT_PKEY |
2845 SRP_OPT_SERVICE_ID),
2846 };
2847
2848 static const match_table_t srp_opt_tokens = {
2849 { SRP_OPT_ID_EXT, "id_ext=%s" },
2850 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2851 { SRP_OPT_DGID, "dgid=%s" },
2852 { SRP_OPT_PKEY, "pkey=%x" },
2853 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2854 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2855 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
2856 { SRP_OPT_IO_CLASS, "io_class=%x" },
2857 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
2858 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
2859 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2860 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
2861 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
2862 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
2863 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
2864 { SRP_OPT_ERR, NULL }
2865 };
2866
2867 static int srp_parse_options(const char *buf, struct srp_target_port *target)
2868 {
2869 char *options, *sep_opt;
2870 char *p;
2871 char dgid[3];
2872 substring_t args[MAX_OPT_ARGS];
2873 int opt_mask = 0;
2874 int token;
2875 int ret = -EINVAL;
2876 int i;
2877
2878 options = kstrdup(buf, GFP_KERNEL);
2879 if (!options)
2880 return -ENOMEM;
2881
2882 sep_opt = options;
2883 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
2884 if (!*p)
2885 continue;
2886
2887 token = match_token(p, srp_opt_tokens, args);
2888 opt_mask |= token;
2889
2890 switch (token) {
2891 case SRP_OPT_ID_EXT:
2892 p = match_strdup(args);
2893 if (!p) {
2894 ret = -ENOMEM;
2895 goto out;
2896 }
2897 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2898 kfree(p);
2899 break;
2900
2901 case SRP_OPT_IOC_GUID:
2902 p = match_strdup(args);
2903 if (!p) {
2904 ret = -ENOMEM;
2905 goto out;
2906 }
2907 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2908 kfree(p);
2909 break;
2910
2911 case SRP_OPT_DGID:
2912 p = match_strdup(args);
2913 if (!p) {
2914 ret = -ENOMEM;
2915 goto out;
2916 }
2917 if (strlen(p) != 32) {
2918 pr_warn("bad dest GID parameter '%s'\n", p);
2919 kfree(p);
2920 goto out;
2921 }
2922
2923 for (i = 0; i < 16; ++i) {
2924 strlcpy(dgid, p + i * 2, sizeof(dgid));
2925 if (sscanf(dgid, "%hhx",
2926 &target->orig_dgid.raw[i]) < 1) {
2927 ret = -EINVAL;
2928 kfree(p);
2929 goto out;
2930 }
2931 }
2932 kfree(p);
2933 break;
2934
2935 case SRP_OPT_PKEY:
2936 if (match_hex(args, &token)) {
2937 pr_warn("bad P_Key parameter '%s'\n", p);
2938 goto out;
2939 }
2940 target->pkey = cpu_to_be16(token);
2941 break;
2942
2943 case SRP_OPT_SERVICE_ID:
2944 p = match_strdup(args);
2945 if (!p) {
2946 ret = -ENOMEM;
2947 goto out;
2948 }
2949 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2950 kfree(p);
2951 break;
2952
2953 case SRP_OPT_MAX_SECT:
2954 if (match_int(args, &token)) {
2955 pr_warn("bad max sect parameter '%s'\n", p);
2956 goto out;
2957 }
2958 target->scsi_host->max_sectors = token;
2959 break;
2960
2961 case SRP_OPT_QUEUE_SIZE:
2962 if (match_int(args, &token) || token < 1) {
2963 pr_warn("bad queue_size parameter '%s'\n", p);
2964 goto out;
2965 }
2966 target->scsi_host->can_queue = token;
2967 target->queue_size = token + SRP_RSP_SQ_SIZE +
2968 SRP_TSK_MGMT_SQ_SIZE;
2969 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
2970 target->scsi_host->cmd_per_lun = token;
2971 break;
2972
2973 case SRP_OPT_MAX_CMD_PER_LUN:
2974 if (match_int(args, &token) || token < 1) {
2975 pr_warn("bad max cmd_per_lun parameter '%s'\n",
2976 p);
2977 goto out;
2978 }
2979 target->scsi_host->cmd_per_lun = token;
2980 break;
2981
2982 case SRP_OPT_IO_CLASS:
2983 if (match_hex(args, &token)) {
2984 pr_warn("bad IO class parameter '%s'\n", p);
2985 goto out;
2986 }
2987 if (token != SRP_REV10_IB_IO_CLASS &&
2988 token != SRP_REV16A_IB_IO_CLASS) {
2989 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
2990 token, SRP_REV10_IB_IO_CLASS,
2991 SRP_REV16A_IB_IO_CLASS);
2992 goto out;
2993 }
2994 target->io_class = token;
2995 break;
2996
2997 case SRP_OPT_INITIATOR_EXT:
2998 p = match_strdup(args);
2999 if (!p) {
3000 ret = -ENOMEM;
3001 goto out;
3002 }
3003 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3004 kfree(p);
3005 break;
3006
3007 case SRP_OPT_CMD_SG_ENTRIES:
3008 if (match_int(args, &token) || token < 1 || token > 255) {
3009 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3010 p);
3011 goto out;
3012 }
3013 target->cmd_sg_cnt = token;
3014 break;
3015
3016 case SRP_OPT_ALLOW_EXT_SG:
3017 if (match_int(args, &token)) {
3018 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3019 goto out;
3020 }
3021 target->allow_ext_sg = !!token;
3022 break;
3023
3024 case SRP_OPT_SG_TABLESIZE:
3025 if (match_int(args, &token) || token < 1 ||
3026 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
3027 pr_warn("bad max sg_tablesize parameter '%s'\n",
3028 p);
3029 goto out;
3030 }
3031 target->sg_tablesize = token;
3032 break;
3033
3034 case SRP_OPT_COMP_VECTOR:
3035 if (match_int(args, &token) || token < 0) {
3036 pr_warn("bad comp_vector parameter '%s'\n", p);
3037 goto out;
3038 }
3039 target->comp_vector = token;
3040 break;
3041
3042 case SRP_OPT_TL_RETRY_COUNT:
3043 if (match_int(args, &token) || token < 2 || token > 7) {
3044 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3045 p);
3046 goto out;
3047 }
3048 target->tl_retry_count = token;
3049 break;
3050
3051 default:
3052 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3053 p);
3054 goto out;
3055 }
3056 }
3057
3058 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3059 ret = 0;
3060 else
3061 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3062 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3063 !(srp_opt_tokens[i].token & opt_mask))
3064 pr_warn("target creation request is missing parameter '%s'\n",
3065 srp_opt_tokens[i].pattern);
3066
3067 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3068 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3069 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3070 target->scsi_host->cmd_per_lun,
3071 target->scsi_host->can_queue);
3072
3073 out:
3074 kfree(options);
3075 return ret;
3076 }
3077
3078 static ssize_t srp_create_target(struct device *dev,
3079 struct device_attribute *attr,
3080 const char *buf, size_t count)
3081 {
3082 struct srp_host *host =
3083 container_of(dev, struct srp_host, dev);
3084 struct Scsi_Host *target_host;
3085 struct srp_target_port *target;
3086 struct srp_rdma_ch *ch;
3087 struct srp_device *srp_dev = host->srp_dev;
3088 struct ib_device *ibdev = srp_dev->dev;
3089 int ret, node_idx, node, cpu, i;
3090 bool multich = false;
3091
3092 target_host = scsi_host_alloc(&srp_template,
3093 sizeof (struct srp_target_port));
3094 if (!target_host)
3095 return -ENOMEM;
3096
3097 target_host->transportt = ib_srp_transport_template;
3098 target_host->max_channel = 0;
3099 target_host->max_id = 1;
3100 target_host->max_lun = -1LL;
3101 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3102
3103 target = host_to_target(target_host);
3104
3105 target->io_class = SRP_REV16A_IB_IO_CLASS;
3106 target->scsi_host = target_host;
3107 target->srp_host = host;
3108 target->lkey = host->srp_dev->pd->local_dma_lkey;
3109 target->rkey = host->srp_dev->mr->rkey;
3110 target->cmd_sg_cnt = cmd_sg_entries;
3111 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3112 target->allow_ext_sg = allow_ext_sg;
3113 target->tl_retry_count = 7;
3114 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
3115
3116 /*
3117 * Avoid that the SCSI host can be removed by srp_remove_target()
3118 * before this function returns.
3119 */
3120 scsi_host_get(target->scsi_host);
3121
3122 mutex_lock(&host->add_target_mutex);
3123
3124 ret = srp_parse_options(buf, target);
3125 if (ret)
3126 goto out;
3127
3128 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3129 if (ret)
3130 goto out;
3131
3132 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3133
3134 if (!srp_conn_unique(target->srp_host, target)) {
3135 shost_printk(KERN_INFO, target->scsi_host,
3136 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3137 be64_to_cpu(target->id_ext),
3138 be64_to_cpu(target->ioc_guid),
3139 be64_to_cpu(target->initiator_ext));
3140 ret = -EEXIST;
3141 goto out;
3142 }
3143
3144 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3145 target->cmd_sg_cnt < target->sg_tablesize) {
3146 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3147 target->sg_tablesize = target->cmd_sg_cnt;
3148 }
3149
3150 target_host->sg_tablesize = target->sg_tablesize;
3151 target->indirect_size = target->sg_tablesize *
3152 sizeof (struct srp_direct_buf);
3153 target->max_iu_len = sizeof (struct srp_cmd) +
3154 sizeof (struct srp_indirect_buf) +
3155 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3156
3157 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3158 INIT_WORK(&target->remove_work, srp_remove_work);
3159 spin_lock_init(&target->lock);
3160 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
3161 if (ret)
3162 goto out;
3163
3164 ret = -ENOMEM;
3165 target->ch_count = max_t(unsigned, num_online_nodes(),
3166 min(ch_count ? :
3167 min(4 * num_online_nodes(),
3168 ibdev->num_comp_vectors),
3169 num_online_cpus()));
3170 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3171 GFP_KERNEL);
3172 if (!target->ch)
3173 goto out;
3174
3175 node_idx = 0;
3176 for_each_online_node(node) {
3177 const int ch_start = (node_idx * target->ch_count /
3178 num_online_nodes());
3179 const int ch_end = ((node_idx + 1) * target->ch_count /
3180 num_online_nodes());
3181 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3182 num_online_nodes() + target->comp_vector)
3183 % ibdev->num_comp_vectors;
3184 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3185 num_online_nodes() + target->comp_vector)
3186 % ibdev->num_comp_vectors;
3187 int cpu_idx = 0;
3188
3189 for_each_online_cpu(cpu) {
3190 if (cpu_to_node(cpu) != node)
3191 continue;
3192 if (ch_start + cpu_idx >= ch_end)
3193 continue;
3194 ch = &target->ch[ch_start + cpu_idx];
3195 ch->target = target;
3196 ch->comp_vector = cv_start == cv_end ? cv_start :
3197 cv_start + cpu_idx % (cv_end - cv_start);
3198 spin_lock_init(&ch->lock);
3199 INIT_LIST_HEAD(&ch->free_tx);
3200 ret = srp_new_cm_id(ch);
3201 if (ret)
3202 goto err_disconnect;
3203
3204 ret = srp_create_ch_ib(ch);
3205 if (ret)
3206 goto err_disconnect;
3207
3208 ret = srp_alloc_req_data(ch);
3209 if (ret)
3210 goto err_disconnect;
3211
3212 ret = srp_connect_ch(ch, multich);
3213 if (ret) {
3214 shost_printk(KERN_ERR, target->scsi_host,
3215 PFX "Connection %d/%d failed\n",
3216 ch_start + cpu_idx,
3217 target->ch_count);
3218 if (node_idx == 0 && cpu_idx == 0) {
3219 goto err_disconnect;
3220 } else {
3221 srp_free_ch_ib(target, ch);
3222 srp_free_req_data(target, ch);
3223 target->ch_count = ch - target->ch;
3224 goto connected;
3225 }
3226 }
3227
3228 multich = true;
3229 cpu_idx++;
3230 }
3231 node_idx++;
3232 }
3233
3234 connected:
3235 target->scsi_host->nr_hw_queues = target->ch_count;
3236
3237 ret = srp_add_target(host, target);
3238 if (ret)
3239 goto err_disconnect;
3240
3241 if (target->state != SRP_TARGET_REMOVED) {
3242 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3243 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3244 be64_to_cpu(target->id_ext),
3245 be64_to_cpu(target->ioc_guid),
3246 be16_to_cpu(target->pkey),
3247 be64_to_cpu(target->service_id),
3248 target->sgid.raw, target->orig_dgid.raw);
3249 }
3250
3251 ret = count;
3252
3253 out:
3254 mutex_unlock(&host->add_target_mutex);
3255
3256 scsi_host_put(target->scsi_host);
3257 if (ret < 0)
3258 scsi_host_put(target->scsi_host);
3259
3260 return ret;
3261
3262 err_disconnect:
3263 srp_disconnect_target(target);
3264
3265 for (i = 0; i < target->ch_count; i++) {
3266 ch = &target->ch[i];
3267 srp_free_ch_ib(target, ch);
3268 srp_free_req_data(target, ch);
3269 }
3270
3271 kfree(target->ch);
3272 goto out;
3273 }
3274
3275 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3276
3277 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3278 char *buf)
3279 {
3280 struct srp_host *host = container_of(dev, struct srp_host, dev);
3281
3282 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3283 }
3284
3285 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3286
3287 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3288 char *buf)
3289 {
3290 struct srp_host *host = container_of(dev, struct srp_host, dev);
3291
3292 return sprintf(buf, "%d\n", host->port);
3293 }
3294
3295 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3296
3297 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3298 {
3299 struct srp_host *host;
3300
3301 host = kzalloc(sizeof *host, GFP_KERNEL);
3302 if (!host)
3303 return NULL;
3304
3305 INIT_LIST_HEAD(&host->target_list);
3306 spin_lock_init(&host->target_lock);
3307 init_completion(&host->released);
3308 mutex_init(&host->add_target_mutex);
3309 host->srp_dev = device;
3310 host->port = port;
3311
3312 host->dev.class = &srp_class;
3313 host->dev.parent = device->dev->dma_device;
3314 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3315
3316 if (device_register(&host->dev))
3317 goto free_host;
3318 if (device_create_file(&host->dev, &dev_attr_add_target))
3319 goto err_class;
3320 if (device_create_file(&host->dev, &dev_attr_ibdev))
3321 goto err_class;
3322 if (device_create_file(&host->dev, &dev_attr_port))
3323 goto err_class;
3324
3325 return host;
3326
3327 err_class:
3328 device_unregister(&host->dev);
3329
3330 free_host:
3331 kfree(host);
3332
3333 return NULL;
3334 }
3335
3336 static void srp_add_one(struct ib_device *device)
3337 {
3338 struct srp_device *srp_dev;
3339 struct ib_device_attr *dev_attr;
3340 struct srp_host *host;
3341 int mr_page_shift, p;
3342 u64 max_pages_per_mr;
3343
3344 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3345 if (!dev_attr)
3346 return;
3347
3348 if (ib_query_device(device, dev_attr)) {
3349 pr_warn("Query device failed for %s\n", device->name);
3350 goto free_attr;
3351 }
3352
3353 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3354 if (!srp_dev)
3355 goto free_attr;
3356
3357 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3358 device->map_phys_fmr && device->unmap_fmr);
3359 srp_dev->has_fr = (dev_attr->device_cap_flags &
3360 IB_DEVICE_MEM_MGT_EXTENSIONS);
3361 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3362 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3363
3364 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3365 (!srp_dev->has_fmr || prefer_fr));
3366 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
3367
3368 /*
3369 * Use the smallest page size supported by the HCA, down to a
3370 * minimum of 4096 bytes. We're unlikely to build large sglists
3371 * out of smaller entries.
3372 */
3373 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3374 srp_dev->mr_page_size = 1 << mr_page_shift;
3375 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3376 max_pages_per_mr = dev_attr->max_mr_size;
3377 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3378 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3379 max_pages_per_mr);
3380 if (srp_dev->use_fast_reg) {
3381 srp_dev->max_pages_per_mr =
3382 min_t(u32, srp_dev->max_pages_per_mr,
3383 dev_attr->max_fast_reg_page_list_len);
3384 }
3385 srp_dev->mr_max_size = srp_dev->mr_page_size *
3386 srp_dev->max_pages_per_mr;
3387 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3388 device->name, mr_page_shift, dev_attr->max_mr_size,
3389 dev_attr->max_fast_reg_page_list_len,
3390 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3391
3392 INIT_LIST_HEAD(&srp_dev->dev_list);
3393
3394 srp_dev->dev = device;
3395 srp_dev->pd = ib_alloc_pd(device);
3396 if (IS_ERR(srp_dev->pd))
3397 goto free_dev;
3398
3399 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3400 IB_ACCESS_LOCAL_WRITE |
3401 IB_ACCESS_REMOTE_READ |
3402 IB_ACCESS_REMOTE_WRITE);
3403 if (IS_ERR(srp_dev->mr))
3404 goto err_pd;
3405
3406 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
3407 host = srp_add_port(srp_dev, p);
3408 if (host)
3409 list_add_tail(&host->list, &srp_dev->dev_list);
3410 }
3411
3412 ib_set_client_data(device, &srp_client, srp_dev);
3413
3414 goto free_attr;
3415
3416 err_pd:
3417 ib_dealloc_pd(srp_dev->pd);
3418
3419 free_dev:
3420 kfree(srp_dev);
3421
3422 free_attr:
3423 kfree(dev_attr);
3424 }
3425
3426 static void srp_remove_one(struct ib_device *device, void *client_data)
3427 {
3428 struct srp_device *srp_dev;
3429 struct srp_host *host, *tmp_host;
3430 struct srp_target_port *target;
3431
3432 srp_dev = client_data;
3433 if (!srp_dev)
3434 return;
3435
3436 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3437 device_unregister(&host->dev);
3438 /*
3439 * Wait for the sysfs entry to go away, so that no new
3440 * target ports can be created.
3441 */
3442 wait_for_completion(&host->released);
3443
3444 /*
3445 * Remove all target ports.
3446 */
3447 spin_lock(&host->target_lock);
3448 list_for_each_entry(target, &host->target_list, list)
3449 srp_queue_remove_work(target);
3450 spin_unlock(&host->target_lock);
3451
3452 /*
3453 * Wait for tl_err and target port removal tasks.
3454 */
3455 flush_workqueue(system_long_wq);
3456 flush_workqueue(srp_remove_wq);
3457
3458 kfree(host);
3459 }
3460
3461 ib_dereg_mr(srp_dev->mr);
3462 ib_dealloc_pd(srp_dev->pd);
3463
3464 kfree(srp_dev);
3465 }
3466
3467 static struct srp_function_template ib_srp_transport_functions = {
3468 .has_rport_state = true,
3469 .reset_timer_if_blocked = true,
3470 .reconnect_delay = &srp_reconnect_delay,
3471 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3472 .dev_loss_tmo = &srp_dev_loss_tmo,
3473 .reconnect = srp_rport_reconnect,
3474 .rport_delete = srp_rport_delete,
3475 .terminate_rport_io = srp_terminate_io,
3476 };
3477
3478 static int __init srp_init_module(void)
3479 {
3480 int ret;
3481
3482 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
3483
3484 if (srp_sg_tablesize) {
3485 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
3486 if (!cmd_sg_entries)
3487 cmd_sg_entries = srp_sg_tablesize;
3488 }
3489
3490 if (!cmd_sg_entries)
3491 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3492
3493 if (cmd_sg_entries > 255) {
3494 pr_warn("Clamping cmd_sg_entries to 255\n");
3495 cmd_sg_entries = 255;
3496 }
3497
3498 if (!indirect_sg_entries)
3499 indirect_sg_entries = cmd_sg_entries;
3500 else if (indirect_sg_entries < cmd_sg_entries) {
3501 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3502 cmd_sg_entries);
3503 indirect_sg_entries = cmd_sg_entries;
3504 }
3505
3506 srp_remove_wq = create_workqueue("srp_remove");
3507 if (!srp_remove_wq) {
3508 ret = -ENOMEM;
3509 goto out;
3510 }
3511
3512 ret = -ENOMEM;
3513 ib_srp_transport_template =
3514 srp_attach_transport(&ib_srp_transport_functions);
3515 if (!ib_srp_transport_template)
3516 goto destroy_wq;
3517
3518 ret = class_register(&srp_class);
3519 if (ret) {
3520 pr_err("couldn't register class infiniband_srp\n");
3521 goto release_tr;
3522 }
3523
3524 ib_sa_register_client(&srp_sa_client);
3525
3526 ret = ib_register_client(&srp_client);
3527 if (ret) {
3528 pr_err("couldn't register IB client\n");
3529 goto unreg_sa;
3530 }
3531
3532 out:
3533 return ret;
3534
3535 unreg_sa:
3536 ib_sa_unregister_client(&srp_sa_client);
3537 class_unregister(&srp_class);
3538
3539 release_tr:
3540 srp_release_transport(ib_srp_transport_template);
3541
3542 destroy_wq:
3543 destroy_workqueue(srp_remove_wq);
3544 goto out;
3545 }
3546
3547 static void __exit srp_cleanup_module(void)
3548 {
3549 ib_unregister_client(&srp_client);
3550 ib_sa_unregister_client(&srp_sa_client);
3551 class_unregister(&srp_class);
3552 srp_release_transport(ib_srp_transport_template);
3553 destroy_workqueue(srp_remove_wq);
3554 }
3555
3556 module_init(srp_init_module);
3557 module_exit(srp_cleanup_module);