2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56 #include <linux/completion.h>
57 #include <linux/irqflags.h>
59 #include <scsi/libsas.h>
60 #include "remote_device.h"
61 #include "remote_node_context.h"
69 * isci_task_refuse() - complete the request to the upper layer driver in
70 * the case where an I/O needs to be completed back in the submit path.
71 * @ihost: host on which the the request was queued
72 * @task: request to complete
73 * @response: response code for the completed task.
74 * @status: status code for the completed task.
77 static void isci_task_refuse(struct isci_host
*ihost
, struct sas_task
*task
,
78 enum service_response response
,
79 enum exec_status status
)
82 enum isci_completion_selection disposition
;
84 disposition
= isci_perform_normal_io_completion
;
85 disposition
= isci_task_set_completion_status(task
, response
, status
,
88 /* Tasks aborted specifically by a call to the lldd_abort_task
89 * function should not be completed to the host in the regular path.
91 switch (disposition
) {
92 case isci_perform_normal_io_completion
:
93 /* Normal notification (task_done) */
94 dev_dbg(&ihost
->pdev
->dev
,
95 "%s: Normal - task = %p, response=%d, "
97 __func__
, task
, response
, status
);
99 task
->lldd_task
= NULL
;
101 isci_execpath_callback(ihost
, task
, task
->task_done
);
104 case isci_perform_aborted_io_completion
:
105 /* No notification because this request is already in the
108 dev_warn(&ihost
->pdev
->dev
,
109 "%s: Aborted - task = %p, response=%d, "
111 __func__
, task
, response
, status
);
114 case isci_perform_error_io_completion
:
115 /* Use sas_task_abort */
116 dev_warn(&ihost
->pdev
->dev
,
117 "%s: Error - task = %p, response=%d, "
119 __func__
, task
, response
, status
);
121 isci_execpath_callback(ihost
, task
, sas_task_abort
);
125 dev_warn(&ihost
->pdev
->dev
,
126 "%s: isci task notification default case!",
128 sas_task_abort(task
);
133 #define for_each_sas_task(num, task) \
134 for (; num > 0; num--,\
135 task = list_entry(task->list.next, struct sas_task, list))
138 static inline int isci_device_io_ready(struct isci_remote_device
*idev
,
139 struct sas_task
*task
)
141 return idev
? test_bit(IDEV_IO_READY
, &idev
->flags
) ||
142 (test_bit(IDEV_IO_NCQERROR
, &idev
->flags
) &&
143 isci_task_is_ncq_recovery(task
))
147 * isci_task_execute_task() - This function is one of the SAS Domain Template
148 * functions. This function is called by libsas to send a task down to
150 * @task: This parameter specifies the SAS task to send.
151 * @num: This parameter specifies the number of tasks to queue.
152 * @gfp_flags: This parameter specifies the context of this call.
154 * status, zero indicates success.
156 int isci_task_execute_task(struct sas_task
*task
, int num
, gfp_t gfp_flags
)
158 struct isci_host
*ihost
= dev_to_ihost(task
->dev
);
159 struct isci_remote_device
*idev
;
164 dev_dbg(&ihost
->pdev
->dev
, "%s: num=%d\n", __func__
, num
);
166 for_each_sas_task(num
, task
) {
167 enum sci_status status
= SCI_FAILURE
;
169 spin_lock_irqsave(&ihost
->scic_lock
, flags
);
170 idev
= isci_lookup_device(task
->dev
);
171 io_ready
= isci_device_io_ready(idev
, task
);
172 tag
= isci_alloc_tag(ihost
);
173 spin_unlock_irqrestore(&ihost
->scic_lock
, flags
);
175 dev_dbg(&ihost
->pdev
->dev
,
176 "task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n",
177 task
, num
, task
->dev
, idev
, idev
? idev
->flags
: 0,
181 isci_task_refuse(ihost
, task
, SAS_TASK_UNDELIVERED
,
183 } else if (!io_ready
|| tag
== SCI_CONTROLLER_INVALID_IO_TAG
) {
184 /* Indicate QUEUE_FULL so that the scsi midlayer
187 isci_task_refuse(ihost
, task
, SAS_TASK_COMPLETE
,
190 /* There is a device and it's ready for I/O. */
191 spin_lock_irqsave(&task
->task_state_lock
, flags
);
193 if (task
->task_state_flags
& SAS_TASK_STATE_ABORTED
) {
194 /* The I/O was aborted. */
195 spin_unlock_irqrestore(&task
->task_state_lock
,
198 isci_task_refuse(ihost
, task
,
199 SAS_TASK_UNDELIVERED
,
200 SAM_STAT_TASK_ABORTED
);
202 task
->task_state_flags
|= SAS_TASK_AT_INITIATOR
;
203 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
205 /* build and send the request. */
206 status
= isci_request_execute(ihost
, idev
, task
, tag
);
208 if (status
!= SCI_SUCCESS
) {
210 spin_lock_irqsave(&task
->task_state_lock
, flags
);
211 /* Did not really start this command. */
212 task
->task_state_flags
&= ~SAS_TASK_AT_INITIATOR
;
213 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
215 /* Indicate QUEUE_FULL so that the scsi
216 * midlayer retries. if the request
217 * failed for remote device reasons,
218 * it gets returned as
219 * SAS_TASK_UNDELIVERED next time
222 isci_task_refuse(ihost
, task
,
228 if (status
!= SCI_SUCCESS
&& tag
!= SCI_CONTROLLER_INVALID_IO_TAG
) {
229 spin_lock_irqsave(&ihost
->scic_lock
, flags
);
230 /* command never hit the device, so just free
231 * the tci and skip the sequence increment
233 isci_tci_free(ihost
, ISCI_TAG_TCI(tag
));
234 spin_unlock_irqrestore(&ihost
->scic_lock
, flags
);
236 isci_put_device(idev
);
241 static struct isci_request
*isci_task_request_build(struct isci_host
*ihost
,
242 struct isci_remote_device
*idev
,
243 u16 tag
, struct isci_tmf
*isci_tmf
)
245 enum sci_status status
= SCI_FAILURE
;
246 struct isci_request
*ireq
= NULL
;
247 struct domain_device
*dev
;
249 dev_dbg(&ihost
->pdev
->dev
,
250 "%s: isci_tmf = %p\n", __func__
, isci_tmf
);
252 dev
= idev
->domain_dev
;
254 /* do common allocation and init of request object. */
255 ireq
= isci_tmf_request_from_tag(ihost
, isci_tmf
, tag
);
259 /* let the core do it's construct. */
260 status
= sci_task_request_construct(ihost
, idev
, tag
,
263 if (status
!= SCI_SUCCESS
) {
264 dev_warn(&ihost
->pdev
->dev
,
265 "%s: sci_task_request_construct failed - "
272 /* XXX convert to get this from task->tproto like other drivers */
273 if (dev
->dev_type
== SAS_END_DEV
) {
274 isci_tmf
->proto
= SAS_PROTOCOL_SSP
;
275 status
= sci_task_request_construct_ssp(ireq
);
276 if (status
!= SCI_SUCCESS
)
280 if (dev
->dev_type
== SATA_DEV
|| (dev
->tproto
& SAS_PROTOCOL_STP
)) {
281 isci_tmf
->proto
= SAS_PROTOCOL_SATA
;
282 status
= isci_sata_management_task_request_build(ireq
);
284 if (status
!= SCI_SUCCESS
)
290 int isci_task_execute_tmf(struct isci_host
*ihost
,
291 struct isci_remote_device
*idev
,
292 struct isci_tmf
*tmf
, unsigned long timeout_ms
)
294 DECLARE_COMPLETION_ONSTACK(completion
);
295 enum sci_task_status status
= SCI_TASK_FAILURE
;
296 struct isci_request
*ireq
;
297 int ret
= TMF_RESP_FUNC_FAILED
;
299 unsigned long timeleft
;
302 spin_lock_irqsave(&ihost
->scic_lock
, flags
);
303 tag
= isci_alloc_tag(ihost
);
304 spin_unlock_irqrestore(&ihost
->scic_lock
, flags
);
306 if (tag
== SCI_CONTROLLER_INVALID_IO_TAG
)
309 /* sanity check, return TMF_RESP_FUNC_FAILED
310 * if the device is not there and ready.
313 (!test_bit(IDEV_IO_READY
, &idev
->flags
) &&
314 !test_bit(IDEV_IO_NCQERROR
, &idev
->flags
))) {
315 dev_dbg(&ihost
->pdev
->dev
,
316 "%s: idev = %p not ready (%#lx)\n",
318 idev
, idev
? idev
->flags
: 0);
321 dev_dbg(&ihost
->pdev
->dev
,
325 /* Assign the pointer to the TMF's completion kernel wait structure. */
326 tmf
->complete
= &completion
;
328 ireq
= isci_task_request_build(ihost
, idev
, tag
, tmf
);
332 spin_lock_irqsave(&ihost
->scic_lock
, flags
);
334 /* start the TMF io. */
335 status
= sci_controller_start_task(ihost
, idev
, ireq
);
337 if (status
!= SCI_TASK_SUCCESS
) {
338 dev_warn(&ihost
->pdev
->dev
,
339 "%s: start_io failed - status = 0x%x, request = %p\n",
343 spin_unlock_irqrestore(&ihost
->scic_lock
, flags
);
347 if (tmf
->cb_state_func
!= NULL
)
348 tmf
->cb_state_func(isci_tmf_started
, tmf
, tmf
->cb_data
);
350 isci_request_change_state(ireq
, started
);
352 /* add the request to the remote device request list. */
353 list_add(&ireq
->dev_node
, &idev
->reqs_in_process
);
355 spin_unlock_irqrestore(&ihost
->scic_lock
, flags
);
357 /* Wait for the TMF to complete, or a timeout. */
358 timeleft
= wait_for_completion_timeout(&completion
,
359 msecs_to_jiffies(timeout_ms
));
362 spin_lock_irqsave(&ihost
->scic_lock
, flags
);
364 if (tmf
->cb_state_func
!= NULL
)
365 tmf
->cb_state_func(isci_tmf_timed_out
, tmf
, tmf
->cb_data
);
367 sci_controller_terminate_request(ihost
,
371 spin_unlock_irqrestore(&ihost
->scic_lock
, flags
);
373 wait_for_completion(tmf
->complete
);
378 if (tmf
->status
== SCI_SUCCESS
)
379 ret
= TMF_RESP_FUNC_COMPLETE
;
380 else if (tmf
->status
== SCI_FAILURE_IO_RESPONSE_VALID
) {
381 dev_dbg(&ihost
->pdev
->dev
,
383 "SCI_FAILURE_IO_RESPONSE_VALID\n",
385 ret
= TMF_RESP_FUNC_COMPLETE
;
387 /* Else - leave the default "failed" status alone. */
389 dev_dbg(&ihost
->pdev
->dev
,
390 "%s: completed request = %p\n",
397 spin_lock_irqsave(&ihost
->scic_lock
, flags
);
398 isci_tci_free(ihost
, ISCI_TAG_TCI(tag
));
399 spin_unlock_irqrestore(&ihost
->scic_lock
, flags
);
404 void isci_task_build_tmf(
405 struct isci_tmf
*tmf
,
406 enum isci_tmf_function_codes code
,
407 void (*tmf_sent_cb
)(enum isci_tmf_cb_state
,
412 memset(tmf
, 0, sizeof(*tmf
));
414 tmf
->tmf_code
= code
;
415 tmf
->cb_state_func
= tmf_sent_cb
;
416 tmf
->cb_data
= cb_data
;
419 static void isci_task_build_abort_task_tmf(
420 struct isci_tmf
*tmf
,
421 enum isci_tmf_function_codes code
,
422 void (*tmf_sent_cb
)(enum isci_tmf_cb_state
,
425 struct isci_request
*old_request
)
427 isci_task_build_tmf(tmf
, code
, tmf_sent_cb
,
428 (void *)old_request
);
429 tmf
->io_tag
= old_request
->io_tag
;
433 * isci_task_validate_request_to_abort() - This function checks the given I/O
434 * against the "started" state. If the request is still "started", it's
435 * state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD
436 * BEFORE CALLING THIS FUNCTION.
437 * @isci_request: This parameter specifies the request object to control.
438 * @isci_host: This parameter specifies the ISCI host object
439 * @isci_device: This is the device to which the request is pending.
440 * @aborted_io_completion: This is a completion structure that will be added to
441 * the request in case it is changed to aborting; this completion is
442 * triggered when the request is fully completed.
444 * Either "started" on successful change of the task status to "aborted", or
445 * "unallocated" if the task cannot be controlled.
447 static enum isci_request_status
isci_task_validate_request_to_abort(
448 struct isci_request
*isci_request
,
449 struct isci_host
*isci_host
,
450 struct isci_remote_device
*isci_device
,
451 struct completion
*aborted_io_completion
)
453 enum isci_request_status old_state
= unallocated
;
455 /* Only abort the task if it's in the
456 * device's request_in_process list
458 if (isci_request
&& !list_empty(&isci_request
->dev_node
)) {
459 old_state
= isci_request_change_started_to_aborted(
460 isci_request
, aborted_io_completion
);
468 * isci_request_cleanup_completed_loiterer() - This function will take care of
469 * the final cleanup on any request which has been explicitly terminated.
470 * @isci_host: This parameter specifies the ISCI host object
471 * @isci_device: This is the device to which the request is pending.
472 * @isci_request: This parameter specifies the terminated request object.
473 * @task: This parameter is the libsas I/O request.
475 static void isci_request_cleanup_completed_loiterer(
476 struct isci_host
*isci_host
,
477 struct isci_remote_device
*isci_device
,
478 struct isci_request
*isci_request
,
479 struct sas_task
*task
)
483 dev_dbg(&isci_host
->pdev
->dev
,
484 "%s: isci_device=%p, request=%p, task=%p\n",
485 __func__
, isci_device
, isci_request
, task
);
489 spin_lock_irqsave(&task
->task_state_lock
, flags
);
490 task
->lldd_task
= NULL
;
492 task
->task_state_flags
&= ~SAS_TASK_NEED_DEV_RESET
;
494 isci_set_task_doneflags(task
);
496 /* If this task is not in the abort path, call task_done. */
497 if (!(task
->task_state_flags
& SAS_TASK_STATE_ABORTED
)) {
499 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
500 task
->task_done(task
);
502 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
505 if (isci_request
!= NULL
) {
506 spin_lock_irqsave(&isci_host
->scic_lock
, flags
);
507 list_del_init(&isci_request
->dev_node
);
508 spin_unlock_irqrestore(&isci_host
->scic_lock
, flags
);
513 * isci_terminate_request_core() - This function will terminate the given
514 * request, and wait for it to complete. This function must only be called
515 * from a thread that can wait. Note that the request is terminated and
516 * completed (back to the host, if started there).
519 * @isci_request: The I/O request to be terminated.
522 static void isci_terminate_request_core(struct isci_host
*ihost
,
523 struct isci_remote_device
*idev
,
524 struct isci_request
*isci_request
)
526 enum sci_status status
= SCI_SUCCESS
;
527 bool was_terminated
= false;
528 bool needs_cleanup_handling
= false;
529 enum isci_request_status request_status
;
531 unsigned long termination_completed
= 1;
532 struct completion
*io_request_completion
;
533 struct sas_task
*task
;
535 dev_dbg(&ihost
->pdev
->dev
,
536 "%s: device = %p; request = %p\n",
537 __func__
, idev
, isci_request
);
539 spin_lock_irqsave(&ihost
->scic_lock
, flags
);
541 io_request_completion
= isci_request
->io_request_completion
;
543 task
= (isci_request
->ttype
== io_task
)
544 ? isci_request_access_task(isci_request
)
547 /* Note that we are not going to control
548 * the target to abort the request.
550 set_bit(IREQ_COMPLETE_IN_TARGET
, &isci_request
->flags
);
552 /* Make sure the request wasn't just sitting around signalling
553 * device condition (if the request handle is NULL, then the
554 * request completed but needed additional handling here).
556 if (!test_bit(IREQ_TERMINATED
, &isci_request
->flags
)) {
557 was_terminated
= true;
558 needs_cleanup_handling
= true;
559 status
= sci_controller_terminate_request(ihost
,
563 spin_unlock_irqrestore(&ihost
->scic_lock
, flags
);
566 * The only time the request to terminate will
567 * fail is when the io request is completed and
570 if (status
!= SCI_SUCCESS
) {
571 dev_err(&ihost
->pdev
->dev
,
572 "%s: sci_controller_terminate_request"
573 " returned = 0x%x\n",
576 isci_request
->io_request_completion
= NULL
;
579 if (was_terminated
) {
580 dev_dbg(&ihost
->pdev
->dev
,
581 "%s: before completion wait (%p/%p)\n",
582 __func__
, isci_request
, io_request_completion
);
584 /* Wait here for the request to complete. */
585 #define TERMINATION_TIMEOUT_MSEC 500
586 termination_completed
587 = wait_for_completion_timeout(
588 io_request_completion
,
589 msecs_to_jiffies(TERMINATION_TIMEOUT_MSEC
));
591 if (!termination_completed
) {
593 /* The request to terminate has timed out. */
594 spin_lock_irqsave(&ihost
->scic_lock
,
597 /* Check for state changes. */
598 if (!test_bit(IREQ_TERMINATED
, &isci_request
->flags
)) {
600 /* The best we can do is to have the
601 * request die a silent death if it
602 * ever really completes.
604 * Set the request state to "dead",
605 * and clear the task pointer so that
606 * an actual completion event callback
607 * doesn't do anything.
609 isci_request
->status
= dead
;
610 isci_request
->io_request_completion
613 if (isci_request
->ttype
== io_task
) {
615 /* Break links with the
618 isci_request
->ttype_ptr
.io_task_ptr
622 termination_completed
= 1;
624 spin_unlock_irqrestore(&ihost
->scic_lock
,
627 if (!termination_completed
) {
629 dev_err(&ihost
->pdev
->dev
,
630 "%s: *** Timeout waiting for "
631 "termination(%p/%p)\n",
632 __func__
, io_request_completion
,
635 /* The request can no longer be referenced
636 * safely since it may go away if the
637 * termination every really does complete.
642 if (termination_completed
)
643 dev_dbg(&ihost
->pdev
->dev
,
644 "%s: after completion wait (%p/%p)\n",
645 __func__
, isci_request
, io_request_completion
);
648 if (termination_completed
) {
650 isci_request
->io_request_completion
= NULL
;
652 /* Peek at the status of the request. This will tell
653 * us if there was special handling on the request such that it
654 * needs to be detached and freed here.
656 spin_lock_irqsave(&isci_request
->state_lock
, flags
);
657 request_status
= isci_request_get_state(isci_request
);
659 if ((isci_request
->ttype
== io_task
) /* TMFs are in their own thread */
660 && ((request_status
== aborted
)
661 || (request_status
== aborting
)
662 || (request_status
== terminating
)
663 || (request_status
== completed
)
664 || (request_status
== dead
)
668 /* The completion routine won't free a request in
669 * the aborted/aborting/etc. states, so we do
672 needs_cleanup_handling
= true;
674 spin_unlock_irqrestore(&isci_request
->state_lock
, flags
);
677 if (needs_cleanup_handling
)
678 isci_request_cleanup_completed_loiterer(
679 ihost
, idev
, isci_request
, task
);
684 * isci_terminate_pending_requests() - This function will change the all of the
685 * requests on the given device's state to "aborting", will terminate the
686 * requests, and wait for them to complete. This function must only be
687 * called from a thread that can wait. Note that the requests are all
688 * terminated and completed (back to the host, if started there).
689 * @isci_host: This parameter specifies SCU.
690 * @idev: This parameter specifies the target.
693 void isci_terminate_pending_requests(struct isci_host
*ihost
,
694 struct isci_remote_device
*idev
)
696 struct completion request_completion
;
697 enum isci_request_status old_state
;
701 spin_lock_irqsave(&ihost
->scic_lock
, flags
);
702 list_splice_init(&idev
->reqs_in_process
, &list
);
704 /* assumes that isci_terminate_request_core deletes from the list */
705 while (!list_empty(&list
)) {
706 struct isci_request
*ireq
= list_entry(list
.next
, typeof(*ireq
), dev_node
);
708 /* Change state to "terminating" if it is currently
711 old_state
= isci_request_change_started_to_newstate(ireq
,
720 /* termination in progress, or otherwise dispositioned.
721 * We know the request was on 'list' so should be safe
722 * to move it back to reqs_in_process
724 list_move(&ireq
->dev_node
, &idev
->reqs_in_process
);
731 spin_unlock_irqrestore(&ihost
->scic_lock
, flags
);
733 init_completion(&request_completion
);
735 dev_dbg(&ihost
->pdev
->dev
,
736 "%s: idev=%p request=%p; task=%p old_state=%d\n",
737 __func__
, idev
, ireq
,
738 ireq
->ttype
== io_task
? isci_request_access_task(ireq
) : NULL
,
741 /* If the old_state is started:
742 * This request was not already being aborted. If it had been,
743 * then the aborting I/O (ie. the TMF request) would not be in
744 * the aborting state, and thus would be terminated here. Note
745 * that since the TMF completion's call to the kernel function
746 * "complete()" does not happen until the pending I/O request
747 * terminate fully completes, we do not have to implement a
748 * special wait here for already aborting requests - the
749 * termination of the TMF request will force the request
750 * to finish it's already started terminate.
752 * If old_state == completed:
753 * This request completed from the SCU hardware perspective
754 * and now just needs cleaning up in terms of freeing the
755 * request and potentially calling up to libsas.
757 * If old_state == aborting:
758 * This request has already gone through a TMF timeout, but may
759 * not have been terminated; needs cleaning up at least.
761 isci_terminate_request_core(ihost
, idev
, ireq
);
762 spin_lock_irqsave(&ihost
->scic_lock
, flags
);
764 spin_unlock_irqrestore(&ihost
->scic_lock
, flags
);
768 * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
769 * Template functions.
770 * @lun: This parameter specifies the lun to be reset.
772 * status, zero indicates success.
774 static int isci_task_send_lu_reset_sas(
775 struct isci_host
*isci_host
,
776 struct isci_remote_device
*isci_device
,
780 int ret
= TMF_RESP_FUNC_FAILED
;
782 dev_dbg(&isci_host
->pdev
->dev
,
783 "%s: isci_host = %p, isci_device = %p\n",
784 __func__
, isci_host
, isci_device
);
785 /* Send the LUN reset to the target. By the time the call returns,
786 * the TMF has fully exected in the target (in which case the return
787 * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
788 * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
790 isci_task_build_tmf(&tmf
, isci_tmf_ssp_lun_reset
, NULL
, NULL
);
792 #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
793 ret
= isci_task_execute_tmf(isci_host
, isci_device
, &tmf
, ISCI_LU_RESET_TIMEOUT_MS
);
795 if (ret
== TMF_RESP_FUNC_COMPLETE
)
796 dev_dbg(&isci_host
->pdev
->dev
,
797 "%s: %p: TMF_LU_RESET passed\n",
798 __func__
, isci_device
);
800 dev_dbg(&isci_host
->pdev
->dev
,
801 "%s: %p: TMF_LU_RESET failed (%x)\n",
802 __func__
, isci_device
, ret
);
808 * isci_task_lu_reset() - This function is one of the SAS Domain Template
809 * functions. This is one of the Task Management functoins called by libsas,
810 * to reset the given lun. Note the assumption that while this call is
811 * executing, no I/O will be sent by the host to the device.
812 * @lun: This parameter specifies the lun to be reset.
814 * status, zero indicates success.
816 int isci_task_lu_reset(struct domain_device
*domain_device
, u8
*lun
)
818 struct isci_host
*isci_host
= dev_to_ihost(domain_device
);
819 struct isci_remote_device
*isci_device
;
823 spin_lock_irqsave(&isci_host
->scic_lock
, flags
);
824 isci_device
= isci_lookup_device(domain_device
);
825 spin_unlock_irqrestore(&isci_host
->scic_lock
, flags
);
827 dev_dbg(&isci_host
->pdev
->dev
,
828 "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
829 __func__
, domain_device
, isci_host
, isci_device
);
832 set_bit(IDEV_EH
, &isci_device
->flags
);
834 /* If there is a device reset pending on any request in the
835 * device's list, fail this LUN reset request in order to
836 * escalate to the device reset.
839 isci_device_is_reset_pending(isci_host
, isci_device
)) {
840 dev_warn(&isci_host
->pdev
->dev
,
841 "%s: No dev (%p), or "
842 "RESET PENDING: domain_device=%p\n",
843 __func__
, isci_device
, domain_device
);
844 ret
= TMF_RESP_FUNC_FAILED
;
848 /* Send the task management part of the reset. */
849 if (sas_protocol_ata(domain_device
->tproto
)) {
850 ret
= isci_task_send_lu_reset_sata(isci_host
, isci_device
, lun
);
852 ret
= isci_task_send_lu_reset_sas(isci_host
, isci_device
, lun
);
854 /* If the LUN reset worked, all the I/O can now be terminated. */
855 if (ret
== TMF_RESP_FUNC_COMPLETE
)
856 /* Terminate all I/O now. */
857 isci_terminate_pending_requests(isci_host
,
861 isci_put_device(isci_device
);
866 /* int (*lldd_clear_nexus_port)(struct asd_sas_port *); */
867 int isci_task_clear_nexus_port(struct asd_sas_port
*port
)
869 return TMF_RESP_FUNC_FAILED
;
874 int isci_task_clear_nexus_ha(struct sas_ha_struct
*ha
)
876 return TMF_RESP_FUNC_FAILED
;
879 /* Task Management Functions. Must be called from process context. */
882 * isci_abort_task_process_cb() - This is a helper function for the abort task
883 * TMF command. It manages the request state with respect to the successful
884 * transmission / completion of the abort task request.
885 * @cb_state: This parameter specifies when this function was called - after
886 * the TMF request has been started and after it has timed-out.
887 * @tmf: This parameter specifies the TMF in progress.
891 static void isci_abort_task_process_cb(
892 enum isci_tmf_cb_state cb_state
,
893 struct isci_tmf
*tmf
,
896 struct isci_request
*old_request
;
898 old_request
= (struct isci_request
*)cb_data
;
900 dev_dbg(&old_request
->isci_host
->pdev
->dev
,
901 "%s: tmf=%p, old_request=%p\n",
902 __func__
, tmf
, old_request
);
906 case isci_tmf_started
:
907 /* The TMF has been started. Nothing to do here, since the
908 * request state was already set to "aborted" by the abort
911 if ((old_request
->status
!= aborted
)
912 && (old_request
->status
!= completed
))
913 dev_err(&old_request
->isci_host
->pdev
->dev
,
914 "%s: Bad request status (%d): tmf=%p, old_request=%p\n",
915 __func__
, old_request
->status
, tmf
, old_request
);
918 case isci_tmf_timed_out
:
920 /* Set the task's state to "aborting", since the abort task
921 * function thread set it to "aborted" (above) in anticipation
922 * of the task management request working correctly. Since the
923 * timeout has now fired, the TMF request failed. We set the
924 * state such that the request completion will indicate the
925 * device is no longer present.
927 isci_request_change_state(old_request
, aborting
);
931 dev_err(&old_request
->isci_host
->pdev
->dev
,
932 "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
933 __func__
, cb_state
, tmf
, old_request
);
939 * isci_task_abort_task() - This function is one of the SAS Domain Template
940 * functions. This function is called by libsas to abort a specified task.
941 * @task: This parameter specifies the SAS task to abort.
943 * status, zero indicates success.
945 int isci_task_abort_task(struct sas_task
*task
)
947 struct isci_host
*isci_host
= dev_to_ihost(task
->dev
);
948 DECLARE_COMPLETION_ONSTACK(aborted_io_completion
);
949 struct isci_request
*old_request
= NULL
;
950 enum isci_request_status old_state
;
951 struct isci_remote_device
*isci_device
= NULL
;
953 int ret
= TMF_RESP_FUNC_FAILED
;
955 bool any_dev_reset
= false;
957 /* Get the isci_request reference from the task. Note that
958 * this check does not depend on the pending request list
959 * in the device, because tasks driving resets may land here
960 * after completion in the core.
962 spin_lock_irqsave(&isci_host
->scic_lock
, flags
);
963 spin_lock(&task
->task_state_lock
);
965 old_request
= task
->lldd_task
;
967 /* If task is already done, the request isn't valid */
968 if (!(task
->task_state_flags
& SAS_TASK_STATE_DONE
) &&
969 (task
->task_state_flags
& SAS_TASK_AT_INITIATOR
) &&
971 isci_device
= isci_lookup_device(task
->dev
);
973 spin_unlock(&task
->task_state_lock
);
974 spin_unlock_irqrestore(&isci_host
->scic_lock
, flags
);
976 dev_dbg(&isci_host
->pdev
->dev
,
977 "%s: task = %p\n", __func__
, task
);
979 if (!isci_device
|| !old_request
)
982 set_bit(IDEV_EH
, &isci_device
->flags
);
984 /* This version of the driver will fail abort requests for
985 * SATA/STP. Failing the abort request this way will cause the
986 * SCSI error handler thread to escalate to LUN reset
988 if (sas_protocol_ata(task
->task_proto
)) {
989 dev_warn(&isci_host
->pdev
->dev
,
990 " task %p is for a STP/SATA device;"
991 " returning TMF_RESP_FUNC_FAILED\n"
992 " to cause a LUN reset...\n", task
);
996 dev_dbg(&isci_host
->pdev
->dev
,
997 "%s: old_request == %p\n", __func__
, old_request
);
999 any_dev_reset
= isci_device_is_reset_pending(isci_host
,isci_device
);
1001 spin_lock_irqsave(&task
->task_state_lock
, flags
);
1003 any_dev_reset
= any_dev_reset
|| (task
->task_state_flags
& SAS_TASK_NEED_DEV_RESET
);
1005 /* If the extraction of the request reference from the task
1006 * failed, then the request has been completed (or if there is a
1007 * pending reset then this abort request function must be failed
1008 * in order to escalate to the target reset).
1010 if ((old_request
== NULL
) || any_dev_reset
) {
1012 /* If the device reset task flag is set, fail the task
1013 * management request. Otherwise, the original request
1016 if (any_dev_reset
) {
1018 /* Turn off the task's DONE to make sure this
1019 * task is escalated to a target reset.
1021 task
->task_state_flags
&= ~SAS_TASK_STATE_DONE
;
1023 /* Make the reset happen as soon as possible. */
1024 task
->task_state_flags
|= SAS_TASK_NEED_DEV_RESET
;
1026 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1028 /* Fail the task management request in order to
1029 * escalate to the target reset.
1031 ret
= TMF_RESP_FUNC_FAILED
;
1033 dev_dbg(&isci_host
->pdev
->dev
,
1034 "%s: Failing task abort in order to "
1035 "escalate to target reset because\n"
1036 "SAS_TASK_NEED_DEV_RESET is set for "
1037 "task %p on dev %p\n",
1038 __func__
, task
, isci_device
);
1042 /* The request has already completed and there
1043 * is nothing to do here other than to set the task
1044 * done bit, and indicate that the task abort function
1047 isci_set_task_doneflags(task
);
1049 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1051 ret
= TMF_RESP_FUNC_COMPLETE
;
1053 dev_dbg(&isci_host
->pdev
->dev
,
1054 "%s: abort task not needed for %p\n",
1060 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1062 spin_lock_irqsave(&isci_host
->scic_lock
, flags
);
1064 /* Check the request status and change to "aborted" if currently
1065 * "starting"; if true then set the I/O kernel completion
1066 * struct that will be triggered when the request completes.
1068 old_state
= isci_task_validate_request_to_abort(
1069 old_request
, isci_host
, isci_device
,
1070 &aborted_io_completion
);
1071 if ((old_state
!= started
) &&
1072 (old_state
!= completed
) &&
1073 (old_state
!= aborting
)) {
1075 spin_unlock_irqrestore(&isci_host
->scic_lock
, flags
);
1077 /* The request was already being handled by someone else (because
1078 * they got to set the state away from started).
1080 dev_dbg(&isci_host
->pdev
->dev
,
1081 "%s: device = %p; old_request %p already being aborted\n",
1083 isci_device
, old_request
);
1084 ret
= TMF_RESP_FUNC_COMPLETE
;
1087 if (task
->task_proto
== SAS_PROTOCOL_SMP
||
1088 test_bit(IREQ_COMPLETE_IN_TARGET
, &old_request
->flags
)) {
1090 spin_unlock_irqrestore(&isci_host
->scic_lock
, flags
);
1092 dev_dbg(&isci_host
->pdev
->dev
,
1093 "%s: SMP request (%d)"
1094 " or complete_in_target (%d), thus no TMF\n",
1095 __func__
, (task
->task_proto
== SAS_PROTOCOL_SMP
),
1096 test_bit(IREQ_COMPLETE_IN_TARGET
, &old_request
->flags
));
1098 /* Set the state on the task. */
1099 isci_task_all_done(task
);
1101 ret
= TMF_RESP_FUNC_COMPLETE
;
1103 /* Stopping and SMP devices are not sent a TMF, and are not
1104 * reset, but the outstanding I/O request is terminated below.
1107 /* Fill in the tmf stucture */
1108 isci_task_build_abort_task_tmf(&tmf
, isci_tmf_ssp_task_abort
,
1109 isci_abort_task_process_cb
,
1112 spin_unlock_irqrestore(&isci_host
->scic_lock
, flags
);
1114 #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */
1115 ret
= isci_task_execute_tmf(isci_host
, isci_device
, &tmf
,
1116 ISCI_ABORT_TASK_TIMEOUT_MS
);
1118 if (ret
!= TMF_RESP_FUNC_COMPLETE
)
1119 dev_err(&isci_host
->pdev
->dev
,
1120 "%s: isci_task_send_tmf failed\n",
1123 if (ret
== TMF_RESP_FUNC_COMPLETE
) {
1124 set_bit(IREQ_COMPLETE_IN_TARGET
, &old_request
->flags
);
1126 /* Clean up the request on our side, and wait for the aborted
1129 isci_terminate_request_core(isci_host
, isci_device
, old_request
);
1132 /* Make sure we do not leave a reference to aborted_io_completion */
1133 old_request
->io_request_completion
= NULL
;
1135 isci_put_device(isci_device
);
1140 * isci_task_abort_task_set() - This function is one of the SAS Domain Template
1141 * functions. This is one of the Task Management functoins called by libsas,
1142 * to abort all task for the given lun.
1143 * @d_device: This parameter specifies the domain device associated with this
1145 * @lun: This parameter specifies the lun associated with this request.
1147 * status, zero indicates success.
1149 int isci_task_abort_task_set(
1150 struct domain_device
*d_device
,
1153 return TMF_RESP_FUNC_FAILED
;
1158 * isci_task_clear_aca() - This function is one of the SAS Domain Template
1159 * functions. This is one of the Task Management functoins called by libsas.
1160 * @d_device: This parameter specifies the domain device associated with this
1162 * @lun: This parameter specifies the lun associated with this request.
1164 * status, zero indicates success.
1166 int isci_task_clear_aca(
1167 struct domain_device
*d_device
,
1170 return TMF_RESP_FUNC_FAILED
;
1176 * isci_task_clear_task_set() - This function is one of the SAS Domain Template
1177 * functions. This is one of the Task Management functoins called by libsas.
1178 * @d_device: This parameter specifies the domain device associated with this
1180 * @lun: This parameter specifies the lun associated with this request.
1182 * status, zero indicates success.
1184 int isci_task_clear_task_set(
1185 struct domain_device
*d_device
,
1188 return TMF_RESP_FUNC_FAILED
;
1193 * isci_task_query_task() - This function is implemented to cause libsas to
1194 * correctly escalate the failed abort to a LUN or target reset (this is
1195 * because sas_scsi_find_task libsas function does not correctly interpret
1196 * all return codes from the abort task call). When TMF_RESP_FUNC_SUCC is
1197 * returned, libsas turns this into a LUN reset; when FUNC_FAILED is
1198 * returned, libsas will turn this into a target reset
1199 * @task: This parameter specifies the sas task being queried.
1200 * @lun: This parameter specifies the lun associated with this request.
1202 * status, zero indicates success.
1204 int isci_task_query_task(
1205 struct sas_task
*task
)
1207 /* See if there is a pending device reset for this device. */
1208 if (task
->task_state_flags
& SAS_TASK_NEED_DEV_RESET
)
1209 return TMF_RESP_FUNC_FAILED
;
1211 return TMF_RESP_FUNC_SUCC
;
1215 * isci_task_request_complete() - This function is called by the sci core when
1216 * an task request completes.
1217 * @ihost: This parameter specifies the ISCI host object
1218 * @ireq: This parameter is the completed isci_request object.
1219 * @completion_status: This parameter specifies the completion status from the
1225 isci_task_request_complete(struct isci_host
*ihost
,
1226 struct isci_request
*ireq
,
1227 enum sci_task_status completion_status
)
1229 struct isci_tmf
*tmf
= isci_request_access_tmf(ireq
);
1230 struct completion
*tmf_complete
;
1232 dev_dbg(&ihost
->pdev
->dev
,
1233 "%s: request = %p, status=%d\n",
1234 __func__
, ireq
, completion_status
);
1236 isci_request_change_state(ireq
, completed
);
1238 tmf
->status
= completion_status
;
1239 set_bit(IREQ_COMPLETE_IN_TARGET
, &ireq
->flags
);
1241 if (tmf
->proto
== SAS_PROTOCOL_SSP
) {
1242 memcpy(&tmf
->resp
.resp_iu
,
1244 SSP_RESP_IU_MAX_SIZE
);
1245 } else if (tmf
->proto
== SAS_PROTOCOL_SATA
) {
1246 memcpy(&tmf
->resp
.d2h_fis
,
1248 sizeof(struct dev_to_host_fis
));
1251 /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
1252 tmf_complete
= tmf
->complete
;
1254 sci_controller_complete_io(ihost
, ireq
->target_device
, ireq
);
1255 /* set the 'terminated' flag handle to make sure it cannot be terminated
1256 * or completed again.
1258 set_bit(IREQ_TERMINATED
, &ireq
->flags
);
1260 isci_request_change_state(ireq
, unallocated
);
1261 list_del_init(&ireq
->dev_node
);
1263 /* The task management part completes last. */
1264 complete(tmf_complete
);
1267 static void isci_smp_task_timedout(unsigned long _task
)
1269 struct sas_task
*task
= (void *) _task
;
1270 unsigned long flags
;
1272 spin_lock_irqsave(&task
->task_state_lock
, flags
);
1273 if (!(task
->task_state_flags
& SAS_TASK_STATE_DONE
))
1274 task
->task_state_flags
|= SAS_TASK_STATE_ABORTED
;
1275 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1277 complete(&task
->completion
);
1280 static void isci_smp_task_done(struct sas_task
*task
)
1282 if (!del_timer(&task
->timer
))
1284 complete(&task
->completion
);
1287 static struct sas_task
*isci_alloc_task(void)
1289 struct sas_task
*task
= kzalloc(sizeof(*task
), GFP_KERNEL
);
1292 INIT_LIST_HEAD(&task
->list
);
1293 spin_lock_init(&task
->task_state_lock
);
1294 task
->task_state_flags
= SAS_TASK_STATE_PENDING
;
1295 init_timer(&task
->timer
);
1296 init_completion(&task
->completion
);
1302 static void isci_free_task(struct isci_host
*ihost
, struct sas_task
*task
)
1305 BUG_ON(!list_empty(&task
->list
));
1310 static int isci_smp_execute_task(struct isci_host
*ihost
,
1311 struct domain_device
*dev
, void *req
,
1312 int req_size
, void *resp
, int resp_size
)
1315 struct sas_task
*task
= NULL
;
1317 for (retry
= 0; retry
< 3; retry
++) {
1318 task
= isci_alloc_task();
1323 task
->task_proto
= dev
->tproto
;
1324 sg_init_one(&task
->smp_task
.smp_req
, req
, req_size
);
1325 sg_init_one(&task
->smp_task
.smp_resp
, resp
, resp_size
);
1327 task
->task_done
= isci_smp_task_done
;
1329 task
->timer
.data
= (unsigned long) task
;
1330 task
->timer
.function
= isci_smp_task_timedout
;
1331 task
->timer
.expires
= jiffies
+ 10*HZ
;
1332 add_timer(&task
->timer
);
1334 res
= isci_task_execute_task(task
, 1, GFP_KERNEL
);
1337 del_timer(&task
->timer
);
1338 dev_err(&ihost
->pdev
->dev
,
1339 "%s: executing SMP task failed:%d\n",
1344 wait_for_completion(&task
->completion
);
1346 if ((task
->task_state_flags
& SAS_TASK_STATE_ABORTED
)) {
1347 dev_err(&ihost
->pdev
->dev
,
1348 "%s: smp task timed out or aborted\n",
1350 isci_task_abort_task(task
);
1351 if (!(task
->task_state_flags
& SAS_TASK_STATE_DONE
)) {
1352 dev_err(&ihost
->pdev
->dev
,
1353 "%s: SMP task aborted and not done\n",
1358 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1359 task
->task_status
.stat
== SAM_STAT_GOOD
) {
1363 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1364 task
->task_status
.stat
== SAS_DATA_UNDERRUN
) {
1365 /* no error, but return the number of bytes of
1367 res
= task
->task_status
.residual
;
1370 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1371 task
->task_status
.stat
== SAS_DATA_OVERRUN
) {
1375 dev_err(&ihost
->pdev
->dev
,
1376 "%s: task to dev %016llx response: 0x%x "
1377 "status 0x%x\n", __func__
,
1378 SAS_ADDR(dev
->sas_addr
),
1379 task
->task_status
.resp
,
1380 task
->task_status
.stat
);
1381 isci_free_task(ihost
, task
);
1386 BUG_ON(retry
== 3 && task
!= NULL
);
1387 isci_free_task(ihost
, task
);
1391 #define DISCOVER_REQ_SIZE 16
1392 #define DISCOVER_RESP_SIZE 56
1394 int isci_smp_get_phy_attached_dev_type(struct isci_host
*ihost
,
1395 struct domain_device
*dev
,
1396 int phy_id
, int *adt
)
1398 struct smp_resp
*disc_resp
;
1402 disc_resp
= kzalloc(DISCOVER_RESP_SIZE
, GFP_KERNEL
);
1406 disc_req
= kzalloc(DISCOVER_REQ_SIZE
, GFP_KERNEL
);
1408 disc_req
[0] = SMP_REQUEST
;
1409 disc_req
[1] = SMP_DISCOVER
;
1410 disc_req
[9] = phy_id
;
1415 res
= isci_smp_execute_task(ihost
, dev
, disc_req
, DISCOVER_REQ_SIZE
,
1416 disc_resp
, DISCOVER_RESP_SIZE
);
1418 if (disc_resp
->result
!= SMP_RESP_FUNC_ACC
)
1419 res
= disc_resp
->result
;
1421 *adt
= disc_resp
->disc
.attached_dev_type
;
1429 static void isci_wait_for_smp_phy_reset(struct isci_remote_device
*idev
, int phy_num
)
1431 struct domain_device
*dev
= idev
->domain_dev
;
1432 struct isci_port
*iport
= idev
->isci_port
;
1433 struct isci_host
*ihost
= iport
->isci_host
;
1434 int res
, iteration
= 0, attached_device_type
;
1435 #define STP_WAIT_MSECS 25000
1436 unsigned long tmo
= msecs_to_jiffies(STP_WAIT_MSECS
);
1437 unsigned long deadline
= jiffies
+ tmo
;
1439 SMP_PHYWAIT_PHYDOWN
,
1442 } phy_state
= SMP_PHYWAIT_PHYDOWN
;
1444 /* While there is time, wait for the phy to go away and come back */
1445 while (time_is_after_jiffies(deadline
) && phy_state
!= SMP_PHYWAIT_DONE
) {
1446 int event
= atomic_read(&iport
->event
);
1450 tmo
= wait_event_timeout(ihost
->eventq
,
1451 event
!= atomic_read(&iport
->event
) ||
1452 !test_bit(IPORT_BCN_BLOCKED
, &iport
->flags
),
1454 /* link down, stop polling */
1455 if (!test_bit(IPORT_BCN_BLOCKED
, &iport
->flags
))
1458 dev_dbg(&ihost
->pdev
->dev
,
1459 "%s: iport %p, iteration %d,"
1460 " phase %d: time_remaining %lu, bcns = %d\n",
1461 __func__
, iport
, iteration
, phy_state
,
1462 tmo
, test_bit(IPORT_BCN_PENDING
, &iport
->flags
));
1464 res
= isci_smp_get_phy_attached_dev_type(ihost
, dev
, phy_num
,
1465 &attached_device_type
);
1466 tmo
= deadline
- jiffies
;
1469 dev_warn(&ihost
->pdev
->dev
,
1470 "%s: iteration %d, phase %d:"
1471 " SMP error=%d, time_remaining=%lu\n",
1472 __func__
, iteration
, phy_state
, res
, tmo
);
1475 dev_dbg(&ihost
->pdev
->dev
,
1476 "%s: iport %p, iteration %d,"
1477 " phase %d: time_remaining %lu, bcns = %d, "
1478 "attdevtype = %x\n",
1479 __func__
, iport
, iteration
, phy_state
,
1480 tmo
, test_bit(IPORT_BCN_PENDING
, &iport
->flags
),
1481 attached_device_type
);
1483 switch (phy_state
) {
1484 case SMP_PHYWAIT_PHYDOWN
:
1485 /* Has the device gone away? */
1486 if (!attached_device_type
)
1487 phy_state
= SMP_PHYWAIT_PHYUP
;
1491 case SMP_PHYWAIT_PHYUP
:
1492 /* Has the device come back? */
1493 if (attached_device_type
)
1494 phy_state
= SMP_PHYWAIT_DONE
;
1497 case SMP_PHYWAIT_DONE
:
1502 dev_dbg(&ihost
->pdev
->dev
, "%s: done\n", __func__
);
1505 static int isci_reset_device(struct isci_host
*ihost
,
1506 struct isci_remote_device
*idev
, int hard_reset
)
1508 struct sas_phy
*phy
= sas_find_local_phy(idev
->domain_dev
);
1509 struct isci_port
*iport
= idev
->isci_port
;
1510 enum sci_status status
;
1511 unsigned long flags
;
1514 dev_dbg(&ihost
->pdev
->dev
, "%s: idev %p\n", __func__
, idev
);
1516 spin_lock_irqsave(&ihost
->scic_lock
, flags
);
1517 status
= sci_remote_device_reset(idev
);
1518 if (status
!= SCI_SUCCESS
) {
1519 spin_unlock_irqrestore(&ihost
->scic_lock
, flags
);
1521 dev_warn(&ihost
->pdev
->dev
,
1522 "%s: sci_remote_device_reset(%p) returned %d!\n",
1523 __func__
, idev
, status
);
1525 return TMF_RESP_FUNC_FAILED
;
1527 spin_unlock_irqrestore(&ihost
->scic_lock
, flags
);
1529 /* Make sure all pending requests are able to be fully terminated. */
1530 isci_device_clear_reset_pending(ihost
, idev
);
1532 /* If this is a device on an expander, disable BCN processing. */
1533 if (!scsi_is_sas_phy_local(phy
))
1534 set_bit(IPORT_BCN_BLOCKED
, &iport
->flags
);
1536 rc
= sas_phy_reset(phy
, hard_reset
);
1538 /* Terminate in-progress I/O now. */
1539 isci_remote_device_nuke_requests(ihost
, idev
);
1541 /* Since all pending TCs have been cleaned, resume the RNC. */
1542 spin_lock_irqsave(&ihost
->scic_lock
, flags
);
1543 status
= sci_remote_device_reset_complete(idev
);
1544 spin_unlock_irqrestore(&ihost
->scic_lock
, flags
);
1546 /* If this is a device on an expander, bring the phy back up. */
1547 if (!scsi_is_sas_phy_local(phy
)) {
1548 /* A phy reset will cause the device to go away then reappear.
1549 * Since libsas will take action on incoming BCNs (eg. remove
1550 * a device going through an SMP phy-control driven reset),
1551 * we need to wait until the phy comes back up before letting
1552 * discovery proceed in libsas.
1554 isci_wait_for_smp_phy_reset(idev
, phy
->number
);
1556 spin_lock_irqsave(&ihost
->scic_lock
, flags
);
1557 isci_port_bcn_enable(ihost
, idev
->isci_port
);
1558 spin_unlock_irqrestore(&ihost
->scic_lock
, flags
);
1561 if (status
!= SCI_SUCCESS
) {
1562 dev_warn(&ihost
->pdev
->dev
,
1563 "%s: sci_remote_device_reset_complete(%p) "
1564 "returned %d!\n", __func__
, idev
, status
);
1567 dev_dbg(&ihost
->pdev
->dev
, "%s: idev %p complete.\n", __func__
, idev
);
1572 int isci_task_I_T_nexus_reset(struct domain_device
*dev
)
1574 struct isci_host
*ihost
= dev_to_ihost(dev
);
1575 struct isci_remote_device
*idev
;
1576 int ret
, hard_reset
= 1;
1577 unsigned long flags
;
1579 spin_lock_irqsave(&ihost
->scic_lock
, flags
);
1580 idev
= isci_lookup_device(dev
);
1581 spin_unlock_irqrestore(&ihost
->scic_lock
, flags
);
1583 if (!idev
|| !test_bit(IDEV_EH
, &idev
->flags
)) {
1584 ret
= TMF_RESP_FUNC_COMPLETE
;
1588 if (dev
->dev_type
== SATA_DEV
|| (dev
->tproto
& SAS_PROTOCOL_STP
))
1591 ret
= isci_reset_device(ihost
, idev
, hard_reset
);
1593 isci_put_device(idev
);
1597 int isci_bus_reset_handler(struct scsi_cmnd
*cmd
)
1599 struct domain_device
*dev
= sdev_to_domain_dev(cmd
->device
);
1600 struct isci_host
*ihost
= dev_to_ihost(dev
);
1601 struct isci_remote_device
*idev
;
1602 int ret
, hard_reset
= 1;
1603 unsigned long flags
;
1605 if (dev
->dev_type
== SATA_DEV
|| (dev
->tproto
& SAS_PROTOCOL_STP
))
1608 spin_lock_irqsave(&ihost
->scic_lock
, flags
);
1609 idev
= isci_lookup_device(dev
);
1610 spin_unlock_irqrestore(&ihost
->scic_lock
, flags
);
1613 ret
= TMF_RESP_FUNC_COMPLETE
;
1617 ret
= isci_reset_device(ihost
, idev
, hard_reset
);
1619 isci_put_device(idev
);