2 * SCSI RDMA (SRP) transport class
4 * Copyright (C) 2007 FUJITA Tomonori <tomof@acm.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation, version 2 of the
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/jiffies.h>
24 #include <linux/err.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/delay.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_cmnd.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_host.h>
33 #include <scsi/scsi_transport.h>
34 #include <scsi/scsi_transport_srp.h>
35 #include "scsi_priv.h"
37 struct srp_host_attrs
{
38 atomic_t next_port_id
;
40 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
42 #define SRP_HOST_ATTRS 0
43 #define SRP_RPORT_ATTRS 8
46 struct scsi_transport_template t
;
47 struct srp_function_template
*f
;
49 struct device_attribute
*host_attrs
[SRP_HOST_ATTRS
+ 1];
51 struct device_attribute
*rport_attrs
[SRP_RPORT_ATTRS
+ 1];
52 struct transport_container rport_attr_cont
;
55 #define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
57 #define dev_to_rport(d) container_of(d, struct srp_rport, dev)
58 #define transport_class_to_srp_rport(dev) dev_to_rport((dev)->parent)
59 static inline struct Scsi_Host
*rport_to_shost(struct srp_rport
*r
)
61 return dev_to_shost(r
->dev
.parent
);
65 * srp_tmo_valid() - check timeout combination validity
66 * @reconnect_delay: Reconnect delay in seconds.
67 * @fast_io_fail_tmo: Fast I/O fail timeout in seconds.
68 * @dev_loss_tmo: Device loss timeout in seconds.
70 * The combination of the timeout parameters must be such that SCSI commands
71 * are finished in a reasonable time. Hence do not allow the fast I/O fail
72 * timeout to exceed SCSI_DEVICE_BLOCK_MAX_TIMEOUT nor allow dev_loss_tmo to
73 * exceed that limit if failing I/O fast has been disabled. Furthermore, these
74 * parameters must be such that multipath can detect failed paths timely.
75 * Hence do not allow all three parameters to be disabled simultaneously.
77 int srp_tmo_valid(int reconnect_delay
, int fast_io_fail_tmo
, int dev_loss_tmo
)
79 if (reconnect_delay
< 0 && fast_io_fail_tmo
< 0 && dev_loss_tmo
< 0)
81 if (reconnect_delay
== 0)
83 if (fast_io_fail_tmo
> SCSI_DEVICE_BLOCK_MAX_TIMEOUT
)
85 if (fast_io_fail_tmo
< 0 &&
86 dev_loss_tmo
> SCSI_DEVICE_BLOCK_MAX_TIMEOUT
)
88 if (dev_loss_tmo
>= LONG_MAX
/ HZ
)
90 if (fast_io_fail_tmo
>= 0 && dev_loss_tmo
>= 0 &&
91 fast_io_fail_tmo
>= dev_loss_tmo
)
95 EXPORT_SYMBOL_GPL(srp_tmo_valid
);
97 static int srp_host_setup(struct transport_container
*tc
, struct device
*dev
,
100 struct Scsi_Host
*shost
= dev_to_shost(dev
);
101 struct srp_host_attrs
*srp_host
= to_srp_host_attrs(shost
);
103 atomic_set(&srp_host
->next_port_id
, 0);
107 static DECLARE_TRANSPORT_CLASS(srp_host_class
, "srp_host", srp_host_setup
,
110 static DECLARE_TRANSPORT_CLASS(srp_rport_class
, "srp_remote_ports",
114 (p)->port_id[0], (p)->port_id[1], (p)->port_id[2], (p)->port_id[3], \
115 (p)->port_id[4], (p)->port_id[5], (p)->port_id[6], (p)->port_id[7], \
116 (p)->port_id[8], (p)->port_id[9], (p)->port_id[10], (p)->port_id[11], \
117 (p)->port_id[12], (p)->port_id[13], (p)->port_id[14], (p)->port_id[15]
119 #define SRP_PID_FMT "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:" \
120 "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x"
123 show_srp_rport_id(struct device
*dev
, struct device_attribute
*attr
,
126 struct srp_rport
*rport
= transport_class_to_srp_rport(dev
);
127 return sprintf(buf
, SRP_PID_FMT
"\n", SRP_PID(rport
));
130 static DEVICE_ATTR(port_id
, S_IRUGO
, show_srp_rport_id
, NULL
);
132 static const struct {
135 } srp_rport_role_names
[] = {
136 {SRP_RPORT_ROLE_INITIATOR
, "SRP Initiator"},
137 {SRP_RPORT_ROLE_TARGET
, "SRP Target"},
141 show_srp_rport_roles(struct device
*dev
, struct device_attribute
*attr
,
144 struct srp_rport
*rport
= transport_class_to_srp_rport(dev
);
148 for (i
= 0; i
< ARRAY_SIZE(srp_rport_role_names
); i
++)
149 if (srp_rport_role_names
[i
].value
== rport
->roles
) {
150 name
= srp_rport_role_names
[i
].name
;
153 return sprintf(buf
, "%s\n", name
? : "unknown");
156 static DEVICE_ATTR(roles
, S_IRUGO
, show_srp_rport_roles
, NULL
);
158 static ssize_t
store_srp_rport_delete(struct device
*dev
,
159 struct device_attribute
*attr
,
160 const char *buf
, size_t count
)
162 struct srp_rport
*rport
= transport_class_to_srp_rport(dev
);
163 struct Scsi_Host
*shost
= dev_to_shost(dev
);
164 struct srp_internal
*i
= to_srp_internal(shost
->transportt
);
166 if (i
->f
->rport_delete
) {
167 i
->f
->rport_delete(rport
);
174 static DEVICE_ATTR(delete, S_IWUSR
, NULL
, store_srp_rport_delete
);
176 static ssize_t
show_srp_rport_state(struct device
*dev
,
177 struct device_attribute
*attr
,
180 static const char *const state_name
[] = {
181 [SRP_RPORT_RUNNING
] = "running",
182 [SRP_RPORT_BLOCKED
] = "blocked",
183 [SRP_RPORT_FAIL_FAST
] = "fail-fast",
184 [SRP_RPORT_LOST
] = "lost",
186 struct srp_rport
*rport
= transport_class_to_srp_rport(dev
);
187 enum srp_rport_state state
= rport
->state
;
189 return sprintf(buf
, "%s\n",
190 (unsigned)state
< ARRAY_SIZE(state_name
) ?
191 state_name
[state
] : "???");
194 static DEVICE_ATTR(state
, S_IRUGO
, show_srp_rport_state
, NULL
);
196 static ssize_t
srp_show_tmo(char *buf
, int tmo
)
198 return tmo
>= 0 ? sprintf(buf
, "%d\n", tmo
) : sprintf(buf
, "off\n");
201 static int srp_parse_tmo(int *tmo
, const char *buf
)
205 if (strncmp(buf
, "off", 3) != 0)
206 res
= kstrtoint(buf
, 0, tmo
);
213 static ssize_t
show_reconnect_delay(struct device
*dev
,
214 struct device_attribute
*attr
, char *buf
)
216 struct srp_rport
*rport
= transport_class_to_srp_rport(dev
);
218 return srp_show_tmo(buf
, rport
->reconnect_delay
);
221 static ssize_t
store_reconnect_delay(struct device
*dev
,
222 struct device_attribute
*attr
,
223 const char *buf
, const size_t count
)
225 struct srp_rport
*rport
= transport_class_to_srp_rport(dev
);
228 res
= srp_parse_tmo(&delay
, buf
);
231 res
= srp_tmo_valid(delay
, rport
->fast_io_fail_tmo
,
232 rport
->dev_loss_tmo
);
236 if (rport
->reconnect_delay
<= 0 && delay
> 0 &&
237 rport
->state
!= SRP_RPORT_RUNNING
) {
238 queue_delayed_work(system_long_wq
, &rport
->reconnect_work
,
240 } else if (delay
<= 0) {
241 cancel_delayed_work(&rport
->reconnect_work
);
243 rport
->reconnect_delay
= delay
;
250 static DEVICE_ATTR(reconnect_delay
, S_IRUGO
| S_IWUSR
, show_reconnect_delay
,
251 store_reconnect_delay
);
253 static ssize_t
show_failed_reconnects(struct device
*dev
,
254 struct device_attribute
*attr
, char *buf
)
256 struct srp_rport
*rport
= transport_class_to_srp_rport(dev
);
258 return sprintf(buf
, "%d\n", rport
->failed_reconnects
);
261 static DEVICE_ATTR(failed_reconnects
, S_IRUGO
, show_failed_reconnects
, NULL
);
263 static ssize_t
show_srp_rport_fast_io_fail_tmo(struct device
*dev
,
264 struct device_attribute
*attr
,
267 struct srp_rport
*rport
= transport_class_to_srp_rport(dev
);
269 return srp_show_tmo(buf
, rport
->fast_io_fail_tmo
);
272 static ssize_t
store_srp_rport_fast_io_fail_tmo(struct device
*dev
,
273 struct device_attribute
*attr
,
274 const char *buf
, size_t count
)
276 struct srp_rport
*rport
= transport_class_to_srp_rport(dev
);
278 int fast_io_fail_tmo
;
280 res
= srp_parse_tmo(&fast_io_fail_tmo
, buf
);
283 res
= srp_tmo_valid(rport
->reconnect_delay
, fast_io_fail_tmo
,
284 rport
->dev_loss_tmo
);
287 rport
->fast_io_fail_tmo
= fast_io_fail_tmo
;
294 static DEVICE_ATTR(fast_io_fail_tmo
, S_IRUGO
| S_IWUSR
,
295 show_srp_rport_fast_io_fail_tmo
,
296 store_srp_rport_fast_io_fail_tmo
);
298 static ssize_t
show_srp_rport_dev_loss_tmo(struct device
*dev
,
299 struct device_attribute
*attr
,
302 struct srp_rport
*rport
= transport_class_to_srp_rport(dev
);
304 return srp_show_tmo(buf
, rport
->dev_loss_tmo
);
307 static ssize_t
store_srp_rport_dev_loss_tmo(struct device
*dev
,
308 struct device_attribute
*attr
,
309 const char *buf
, size_t count
)
311 struct srp_rport
*rport
= transport_class_to_srp_rport(dev
);
315 res
= srp_parse_tmo(&dev_loss_tmo
, buf
);
318 res
= srp_tmo_valid(rport
->reconnect_delay
, rport
->fast_io_fail_tmo
,
322 rport
->dev_loss_tmo
= dev_loss_tmo
;
329 static DEVICE_ATTR(dev_loss_tmo
, S_IRUGO
| S_IWUSR
,
330 show_srp_rport_dev_loss_tmo
,
331 store_srp_rport_dev_loss_tmo
);
333 static int srp_rport_set_state(struct srp_rport
*rport
,
334 enum srp_rport_state new_state
)
336 enum srp_rport_state old_state
= rport
->state
;
338 lockdep_assert_held(&rport
->mutex
);
341 case SRP_RPORT_RUNNING
:
349 case SRP_RPORT_BLOCKED
:
351 case SRP_RPORT_RUNNING
:
357 case SRP_RPORT_FAIL_FAST
:
368 rport
->state
= new_state
;
376 * srp_reconnect_work() - reconnect and schedule a new attempt if necessary
377 * @work: Work structure used for scheduling this operation.
379 static void srp_reconnect_work(struct work_struct
*work
)
381 struct srp_rport
*rport
= container_of(to_delayed_work(work
),
382 struct srp_rport
, reconnect_work
);
383 struct Scsi_Host
*shost
= rport_to_shost(rport
);
386 res
= srp_reconnect_rport(rport
);
388 shost_printk(KERN_ERR
, shost
,
389 "reconnect attempt %d failed (%d)\n",
390 ++rport
->failed_reconnects
, res
);
391 delay
= rport
->reconnect_delay
*
392 min(100, max(1, rport
->failed_reconnects
- 10));
394 queue_delayed_work(system_long_wq
,
395 &rport
->reconnect_work
, delay
* HZ
);
399 static void __rport_fail_io_fast(struct srp_rport
*rport
)
401 struct Scsi_Host
*shost
= rport_to_shost(rport
);
402 struct srp_internal
*i
;
404 lockdep_assert_held(&rport
->mutex
);
406 if (srp_rport_set_state(rport
, SRP_RPORT_FAIL_FAST
))
408 scsi_target_unblock(rport
->dev
.parent
, SDEV_TRANSPORT_OFFLINE
);
410 /* Involve the LLD if possible to terminate all I/O on the rport. */
411 i
= to_srp_internal(shost
->transportt
);
412 if (i
->f
->terminate_rport_io
)
413 i
->f
->terminate_rport_io(rport
);
417 * rport_fast_io_fail_timedout() - fast I/O failure timeout handler
418 * @work: Work structure used for scheduling this operation.
420 static void rport_fast_io_fail_timedout(struct work_struct
*work
)
422 struct srp_rport
*rport
= container_of(to_delayed_work(work
),
423 struct srp_rport
, fast_io_fail_work
);
424 struct Scsi_Host
*shost
= rport_to_shost(rport
);
426 pr_info("fast_io_fail_tmo expired for SRP %s / %s.\n",
427 dev_name(&rport
->dev
), dev_name(&shost
->shost_gendev
));
429 mutex_lock(&rport
->mutex
);
430 if (rport
->state
== SRP_RPORT_BLOCKED
)
431 __rport_fail_io_fast(rport
);
432 mutex_unlock(&rport
->mutex
);
436 * rport_dev_loss_timedout() - device loss timeout handler
437 * @work: Work structure used for scheduling this operation.
439 static void rport_dev_loss_timedout(struct work_struct
*work
)
441 struct srp_rport
*rport
= container_of(to_delayed_work(work
),
442 struct srp_rport
, dev_loss_work
);
443 struct Scsi_Host
*shost
= rport_to_shost(rport
);
444 struct srp_internal
*i
= to_srp_internal(shost
->transportt
);
446 pr_info("dev_loss_tmo expired for SRP %s / %s.\n",
447 dev_name(&rport
->dev
), dev_name(&shost
->shost_gendev
));
449 mutex_lock(&rport
->mutex
);
450 WARN_ON(srp_rport_set_state(rport
, SRP_RPORT_LOST
) != 0);
451 scsi_target_unblock(rport
->dev
.parent
, SDEV_TRANSPORT_OFFLINE
);
452 mutex_unlock(&rport
->mutex
);
454 i
->f
->rport_delete(rport
);
457 static void __srp_start_tl_fail_timers(struct srp_rport
*rport
)
459 struct Scsi_Host
*shost
= rport_to_shost(rport
);
460 int delay
, fast_io_fail_tmo
, dev_loss_tmo
;
462 lockdep_assert_held(&rport
->mutex
);
464 delay
= rport
->reconnect_delay
;
465 fast_io_fail_tmo
= rport
->fast_io_fail_tmo
;
466 dev_loss_tmo
= rport
->dev_loss_tmo
;
467 pr_debug("%s current state: %d\n", dev_name(&shost
->shost_gendev
),
470 if (rport
->state
== SRP_RPORT_LOST
)
473 queue_delayed_work(system_long_wq
, &rport
->reconnect_work
,
475 if (srp_rport_set_state(rport
, SRP_RPORT_BLOCKED
) == 0) {
476 pr_debug("%s new state: %d\n", dev_name(&shost
->shost_gendev
),
478 scsi_target_block(&shost
->shost_gendev
);
479 if (fast_io_fail_tmo
>= 0)
480 queue_delayed_work(system_long_wq
,
481 &rport
->fast_io_fail_work
,
482 1UL * fast_io_fail_tmo
* HZ
);
483 if (dev_loss_tmo
>= 0)
484 queue_delayed_work(system_long_wq
,
485 &rport
->dev_loss_work
,
486 1UL * dev_loss_tmo
* HZ
);
491 * srp_start_tl_fail_timers() - start the transport layer failure timers
492 * @rport: SRP target port.
494 * Start the transport layer fast I/O failure and device loss timers. Do not
495 * modify a timer that was already started.
497 void srp_start_tl_fail_timers(struct srp_rport
*rport
)
499 mutex_lock(&rport
->mutex
);
500 __srp_start_tl_fail_timers(rport
);
501 mutex_unlock(&rport
->mutex
);
503 EXPORT_SYMBOL(srp_start_tl_fail_timers
);
506 * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
507 * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
509 static int scsi_request_fn_active(struct Scsi_Host
*shost
)
511 struct scsi_device
*sdev
;
512 struct request_queue
*q
;
513 int request_fn_active
= 0;
515 shost_for_each_device(sdev
, shost
) {
516 q
= sdev
->request_queue
;
518 spin_lock_irq(q
->queue_lock
);
519 request_fn_active
+= q
->request_fn_active
;
520 spin_unlock_irq(q
->queue_lock
);
523 return request_fn_active
;
527 * srp_reconnect_rport() - reconnect to an SRP target port
528 * @rport: SRP target port.
530 * Blocks SCSI command queueing before invoking reconnect() such that
531 * queuecommand() won't be invoked concurrently with reconnect() from outside
532 * the SCSI EH. This is important since a reconnect() implementation may
533 * reallocate resources needed by queuecommand().
536 * - This function neither waits until outstanding requests have finished nor
537 * tries to abort these. It is the responsibility of the reconnect()
538 * function to finish outstanding commands before reconnecting to the target
540 * - It is the responsibility of the caller to ensure that the resources
541 * reallocated by the reconnect() function won't be used while this function
542 * is in progress. One possible strategy is to invoke this function from
543 * the context of the SCSI EH thread only. Another possible strategy is to
544 * lock the rport mutex inside each SCSI LLD callback that can be invoked by
545 * the SCSI EH (the scsi_host_template.eh_*() functions and also the
546 * scsi_host_template.queuecommand() function).
548 int srp_reconnect_rport(struct srp_rport
*rport
)
550 struct Scsi_Host
*shost
= rport_to_shost(rport
);
551 struct srp_internal
*i
= to_srp_internal(shost
->transportt
);
552 struct scsi_device
*sdev
;
555 pr_debug("SCSI host %s\n", dev_name(&shost
->shost_gendev
));
557 res
= mutex_lock_interruptible(&rport
->mutex
);
560 scsi_target_block(&shost
->shost_gendev
);
561 while (scsi_request_fn_active(shost
))
563 res
= rport
->state
!= SRP_RPORT_LOST
? i
->f
->reconnect(rport
) : -ENODEV
;
564 pr_debug("%s (state %d): transport.reconnect() returned %d\n",
565 dev_name(&shost
->shost_gendev
), rport
->state
, res
);
567 cancel_delayed_work(&rport
->fast_io_fail_work
);
568 cancel_delayed_work(&rport
->dev_loss_work
);
570 rport
->failed_reconnects
= 0;
571 srp_rport_set_state(rport
, SRP_RPORT_RUNNING
);
572 scsi_target_unblock(&shost
->shost_gendev
, SDEV_RUNNING
);
574 * If the SCSI error handler has offlined one or more devices,
575 * invoking scsi_target_unblock() won't change the state of
576 * these devices into running so do that explicitly.
578 spin_lock_irq(shost
->host_lock
);
579 __shost_for_each_device(sdev
, shost
)
580 if (sdev
->sdev_state
== SDEV_OFFLINE
)
581 sdev
->sdev_state
= SDEV_RUNNING
;
582 spin_unlock_irq(shost
->host_lock
);
583 } else if (rport
->state
== SRP_RPORT_RUNNING
) {
585 * srp_reconnect_rport() has been invoked with fast_io_fail
586 * and dev_loss off. Mark the port as failed and start the TL
587 * failure timers if these had not yet been started.
589 __rport_fail_io_fast(rport
);
590 scsi_target_unblock(&shost
->shost_gendev
,
591 SDEV_TRANSPORT_OFFLINE
);
592 __srp_start_tl_fail_timers(rport
);
593 } else if (rport
->state
!= SRP_RPORT_BLOCKED
) {
594 scsi_target_unblock(&shost
->shost_gendev
,
595 SDEV_TRANSPORT_OFFLINE
);
597 mutex_unlock(&rport
->mutex
);
602 EXPORT_SYMBOL(srp_reconnect_rport
);
605 * srp_timed_out() - SRP transport intercept of the SCSI timeout EH
606 * @scmd: SCSI command.
608 * If a timeout occurs while an rport is in the blocked state, ask the SCSI
609 * EH to continue waiting (BLK_EH_RESET_TIMER). Otherwise let the SCSI core
610 * handle the timeout (BLK_EH_NOT_HANDLED).
612 * Note: This function is called from soft-IRQ context and with the request
615 static enum blk_eh_timer_return
srp_timed_out(struct scsi_cmnd
*scmd
)
617 struct scsi_device
*sdev
= scmd
->device
;
618 struct Scsi_Host
*shost
= sdev
->host
;
619 struct srp_internal
*i
= to_srp_internal(shost
->transportt
);
621 pr_debug("timeout for sdev %s\n", dev_name(&sdev
->sdev_gendev
));
622 return i
->f
->reset_timer_if_blocked
&& scsi_device_blocked(sdev
) ?
623 BLK_EH_RESET_TIMER
: BLK_EH_NOT_HANDLED
;
626 static void srp_rport_release(struct device
*dev
)
628 struct srp_rport
*rport
= dev_to_rport(dev
);
630 put_device(dev
->parent
);
634 static int scsi_is_srp_rport(const struct device
*dev
)
636 return dev
->release
== srp_rport_release
;
639 static int srp_rport_match(struct attribute_container
*cont
,
642 struct Scsi_Host
*shost
;
643 struct srp_internal
*i
;
645 if (!scsi_is_srp_rport(dev
))
648 shost
= dev_to_shost(dev
->parent
);
649 if (!shost
->transportt
)
651 if (shost
->transportt
->host_attrs
.ac
.class != &srp_host_class
.class)
654 i
= to_srp_internal(shost
->transportt
);
655 return &i
->rport_attr_cont
.ac
== cont
;
658 static int srp_host_match(struct attribute_container
*cont
, struct device
*dev
)
660 struct Scsi_Host
*shost
;
661 struct srp_internal
*i
;
663 if (!scsi_is_host_device(dev
))
666 shost
= dev_to_shost(dev
);
667 if (!shost
->transportt
)
669 if (shost
->transportt
->host_attrs
.ac
.class != &srp_host_class
.class)
672 i
= to_srp_internal(shost
->transportt
);
673 return &i
->t
.host_attrs
.ac
== cont
;
677 * srp_rport_get() - increment rport reference count
678 * @rport: SRP target port.
680 void srp_rport_get(struct srp_rport
*rport
)
682 get_device(&rport
->dev
);
684 EXPORT_SYMBOL(srp_rport_get
);
687 * srp_rport_put() - decrement rport reference count
688 * @rport: SRP target port.
690 void srp_rport_put(struct srp_rport
*rport
)
692 put_device(&rport
->dev
);
694 EXPORT_SYMBOL(srp_rport_put
);
697 * srp_rport_add - add a SRP remote port to the device hierarchy
698 * @shost: scsi host the remote port is connected to.
699 * @ids: The port id for the remote port.
701 * Publishes a port to the rest of the system.
703 struct srp_rport
*srp_rport_add(struct Scsi_Host
*shost
,
704 struct srp_rport_identifiers
*ids
)
706 struct srp_rport
*rport
;
707 struct device
*parent
= &shost
->shost_gendev
;
708 struct srp_internal
*i
= to_srp_internal(shost
->transportt
);
711 rport
= kzalloc(sizeof(*rport
), GFP_KERNEL
);
713 return ERR_PTR(-ENOMEM
);
715 mutex_init(&rport
->mutex
);
717 device_initialize(&rport
->dev
);
719 rport
->dev
.parent
= get_device(parent
);
720 rport
->dev
.release
= srp_rport_release
;
722 memcpy(rport
->port_id
, ids
->port_id
, sizeof(rport
->port_id
));
723 rport
->roles
= ids
->roles
;
726 rport
->reconnect_delay
= i
->f
->reconnect_delay
?
727 *i
->f
->reconnect_delay
: 10;
728 INIT_DELAYED_WORK(&rport
->reconnect_work
, srp_reconnect_work
);
729 rport
->fast_io_fail_tmo
= i
->f
->fast_io_fail_tmo
?
730 *i
->f
->fast_io_fail_tmo
: 15;
731 rport
->dev_loss_tmo
= i
->f
->dev_loss_tmo
? *i
->f
->dev_loss_tmo
: 60;
732 INIT_DELAYED_WORK(&rport
->fast_io_fail_work
,
733 rport_fast_io_fail_timedout
);
734 INIT_DELAYED_WORK(&rport
->dev_loss_work
, rport_dev_loss_timedout
);
736 id
= atomic_inc_return(&to_srp_host_attrs(shost
)->next_port_id
);
737 dev_set_name(&rport
->dev
, "port-%d:%d", shost
->host_no
, id
);
739 transport_setup_device(&rport
->dev
);
741 ret
= device_add(&rport
->dev
);
743 transport_destroy_device(&rport
->dev
);
744 put_device(&rport
->dev
);
748 transport_add_device(&rport
->dev
);
749 transport_configure_device(&rport
->dev
);
753 EXPORT_SYMBOL_GPL(srp_rport_add
);
756 * srp_rport_del - remove a SRP remote port
757 * @rport: SRP remote port to remove
759 * Removes the specified SRP remote port.
761 void srp_rport_del(struct srp_rport
*rport
)
763 struct device
*dev
= &rport
->dev
;
765 transport_remove_device(dev
);
767 transport_destroy_device(dev
);
771 EXPORT_SYMBOL_GPL(srp_rport_del
);
773 static int do_srp_rport_del(struct device
*dev
, void *data
)
775 if (scsi_is_srp_rport(dev
))
776 srp_rport_del(dev_to_rport(dev
));
781 * srp_remove_host - tear down a Scsi_Host's SRP data structures
782 * @shost: Scsi Host that is torn down
784 * Removes all SRP remote ports for a given Scsi_Host.
785 * Must be called just before scsi_remove_host for SRP HBAs.
787 void srp_remove_host(struct Scsi_Host
*shost
)
789 device_for_each_child(&shost
->shost_gendev
, NULL
, do_srp_rport_del
);
791 EXPORT_SYMBOL_GPL(srp_remove_host
);
794 * srp_stop_rport_timers - stop the transport layer recovery timers
795 * @rport: SRP remote port for which to stop the timers.
797 * Must be called after srp_remove_host() and scsi_remove_host(). The caller
798 * must hold a reference on the rport (rport->dev) and on the SCSI host
799 * (rport->dev.parent).
801 void srp_stop_rport_timers(struct srp_rport
*rport
)
803 mutex_lock(&rport
->mutex
);
804 if (rport
->state
== SRP_RPORT_BLOCKED
)
805 __rport_fail_io_fast(rport
);
806 srp_rport_set_state(rport
, SRP_RPORT_LOST
);
807 mutex_unlock(&rport
->mutex
);
809 cancel_delayed_work_sync(&rport
->reconnect_work
);
810 cancel_delayed_work_sync(&rport
->fast_io_fail_work
);
811 cancel_delayed_work_sync(&rport
->dev_loss_work
);
813 EXPORT_SYMBOL_GPL(srp_stop_rport_timers
);
815 static int srp_tsk_mgmt_response(struct Scsi_Host
*shost
, u64 nexus
, u64 tm_id
,
818 struct srp_internal
*i
= to_srp_internal(shost
->transportt
);
819 return i
->f
->tsk_mgmt_response(shost
, nexus
, tm_id
, result
);
822 static int srp_it_nexus_response(struct Scsi_Host
*shost
, u64 nexus
, int result
)
824 struct srp_internal
*i
= to_srp_internal(shost
->transportt
);
825 return i
->f
->it_nexus_response(shost
, nexus
, result
);
829 * srp_attach_transport - instantiate SRP transport template
830 * @ft: SRP transport class function template
832 struct scsi_transport_template
*
833 srp_attach_transport(struct srp_function_template
*ft
)
836 struct srp_internal
*i
;
838 i
= kzalloc(sizeof(*i
), GFP_KERNEL
);
842 i
->t
.eh_timed_out
= srp_timed_out
;
844 i
->t
.tsk_mgmt_response
= srp_tsk_mgmt_response
;
845 i
->t
.it_nexus_response
= srp_it_nexus_response
;
847 i
->t
.host_size
= sizeof(struct srp_host_attrs
);
848 i
->t
.host_attrs
.ac
.attrs
= &i
->host_attrs
[0];
849 i
->t
.host_attrs
.ac
.class = &srp_host_class
.class;
850 i
->t
.host_attrs
.ac
.match
= srp_host_match
;
851 i
->host_attrs
[0] = NULL
;
852 transport_container_register(&i
->t
.host_attrs
);
854 i
->rport_attr_cont
.ac
.attrs
= &i
->rport_attrs
[0];
855 i
->rport_attr_cont
.ac
.class = &srp_rport_class
.class;
856 i
->rport_attr_cont
.ac
.match
= srp_rport_match
;
859 i
->rport_attrs
[count
++] = &dev_attr_port_id
;
860 i
->rport_attrs
[count
++] = &dev_attr_roles
;
861 if (ft
->has_rport_state
) {
862 i
->rport_attrs
[count
++] = &dev_attr_state
;
863 i
->rport_attrs
[count
++] = &dev_attr_fast_io_fail_tmo
;
864 i
->rport_attrs
[count
++] = &dev_attr_dev_loss_tmo
;
867 i
->rport_attrs
[count
++] = &dev_attr_reconnect_delay
;
868 i
->rport_attrs
[count
++] = &dev_attr_failed_reconnects
;
870 if (ft
->rport_delete
)
871 i
->rport_attrs
[count
++] = &dev_attr_delete
;
872 i
->rport_attrs
[count
++] = NULL
;
873 BUG_ON(count
> ARRAY_SIZE(i
->rport_attrs
));
875 transport_container_register(&i
->rport_attr_cont
);
881 EXPORT_SYMBOL_GPL(srp_attach_transport
);
884 * srp_release_transport - release SRP transport template instance
885 * @t: transport template instance
887 void srp_release_transport(struct scsi_transport_template
*t
)
889 struct srp_internal
*i
= to_srp_internal(t
);
891 transport_container_unregister(&i
->t
.host_attrs
);
892 transport_container_unregister(&i
->rport_attr_cont
);
896 EXPORT_SYMBOL_GPL(srp_release_transport
);
898 static __init
int srp_transport_init(void)
902 ret
= transport_class_register(&srp_host_class
);
905 ret
= transport_class_register(&srp_rport_class
);
907 goto unregister_host_class
;
910 unregister_host_class
:
911 transport_class_unregister(&srp_host_class
);
915 static void __exit
srp_transport_exit(void)
917 transport_class_unregister(&srp_host_class
);
918 transport_class_unregister(&srp_rport_class
);
921 MODULE_AUTHOR("FUJITA Tomonori");
922 MODULE_DESCRIPTION("SRP Transport Attributes");
923 MODULE_LICENSE("GPL");
925 module_init(srp_transport_init
);
926 module_exit(srp_transport_exit
);