2 * Copyright (c) 2015 Linaro Ltd.
3 * Copyright (c) 2015 Hisilicon Limited.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
13 #define DRV_NAME "hisi_sas"
15 #define DEV_IS_GONE(dev) \
16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device
*device
,
19 u8
*lun
, struct hisi_sas_tmf_task
*tmf
);
21 hisi_sas_internal_task_abort(struct hisi_hba
*hisi_hba
,
22 struct domain_device
*device
,
23 int abort_flag
, int tag
);
24 static int hisi_sas_softreset_ata_disk(struct domain_device
*device
);
25 static int hisi_sas_control_phy(struct asd_sas_phy
*sas_phy
, enum phy_func func
,
28 u8
hisi_sas_get_ata_protocol(u8 cmd
, int direction
)
31 case ATA_CMD_FPDMA_WRITE
:
32 case ATA_CMD_FPDMA_READ
:
33 case ATA_CMD_FPDMA_RECV
:
34 case ATA_CMD_FPDMA_SEND
:
35 case ATA_CMD_NCQ_NON_DATA
:
36 return HISI_SAS_SATA_PROTOCOL_FPDMA
;
38 case ATA_CMD_DOWNLOAD_MICRO
:
40 case ATA_CMD_PMP_READ
:
41 case ATA_CMD_READ_LOG_EXT
:
42 case ATA_CMD_PIO_READ
:
43 case ATA_CMD_PIO_READ_EXT
:
44 case ATA_CMD_PMP_WRITE
:
45 case ATA_CMD_WRITE_LOG_EXT
:
46 case ATA_CMD_PIO_WRITE
:
47 case ATA_CMD_PIO_WRITE_EXT
:
48 return HISI_SAS_SATA_PROTOCOL_PIO
;
51 case ATA_CMD_DOWNLOAD_MICRO_DMA
:
52 case ATA_CMD_PMP_READ_DMA
:
53 case ATA_CMD_PMP_WRITE_DMA
:
55 case ATA_CMD_READ_EXT
:
56 case ATA_CMD_READ_LOG_DMA_EXT
:
57 case ATA_CMD_READ_STREAM_DMA_EXT
:
58 case ATA_CMD_TRUSTED_RCV_DMA
:
59 case ATA_CMD_TRUSTED_SND_DMA
:
61 case ATA_CMD_WRITE_EXT
:
62 case ATA_CMD_WRITE_FUA_EXT
:
63 case ATA_CMD_WRITE_QUEUED
:
64 case ATA_CMD_WRITE_LOG_DMA_EXT
:
65 case ATA_CMD_WRITE_STREAM_DMA_EXT
:
66 case ATA_CMD_ZAC_MGMT_IN
:
67 return HISI_SAS_SATA_PROTOCOL_DMA
;
69 case ATA_CMD_CHK_POWER
:
70 case ATA_CMD_DEV_RESET
:
73 case ATA_CMD_FLUSH_EXT
:
75 case ATA_CMD_VERIFY_EXT
:
76 case ATA_CMD_SET_FEATURES
:
78 case ATA_CMD_STANDBYNOW1
:
79 case ATA_CMD_ZAC_MGMT_OUT
:
80 return HISI_SAS_SATA_PROTOCOL_NONDATA
;
82 if (direction
== DMA_NONE
)
83 return HISI_SAS_SATA_PROTOCOL_NONDATA
;
84 return HISI_SAS_SATA_PROTOCOL_PIO
;
87 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol
);
89 void hisi_sas_sata_done(struct sas_task
*task
,
90 struct hisi_sas_slot
*slot
)
92 struct task_status_struct
*ts
= &task
->task_status
;
93 struct ata_task_resp
*resp
= (struct ata_task_resp
*)ts
->buf
;
94 struct hisi_sas_status_buffer
*status_buf
=
95 hisi_sas_status_buf_addr_mem(slot
);
96 u8
*iu
= &status_buf
->iu
[0];
97 struct dev_to_host_fis
*d2h
= (struct dev_to_host_fis
*)iu
;
99 resp
->frame_len
= sizeof(struct dev_to_host_fis
);
100 memcpy(&resp
->ending_fis
[0], d2h
, sizeof(struct dev_to_host_fis
));
102 ts
->buf_valid_size
= sizeof(*resp
);
104 EXPORT_SYMBOL_GPL(hisi_sas_sata_done
);
106 int hisi_sas_get_ncq_tag(struct sas_task
*task
, u32
*tag
)
108 struct ata_queued_cmd
*qc
= task
->uldd_task
;
111 if (qc
->tf
.command
== ATA_CMD_FPDMA_WRITE
||
112 qc
->tf
.command
== ATA_CMD_FPDMA_READ
) {
119 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag
);
121 static struct hisi_hba
*dev_to_hisi_hba(struct domain_device
*device
)
123 return device
->port
->ha
->lldd_ha
;
126 struct hisi_sas_port
*to_hisi_sas_port(struct asd_sas_port
*sas_port
)
128 return container_of(sas_port
, struct hisi_sas_port
, sas_port
);
130 EXPORT_SYMBOL_GPL(to_hisi_sas_port
);
132 void hisi_sas_stop_phys(struct hisi_hba
*hisi_hba
)
136 for (phy_no
= 0; phy_no
< hisi_hba
->n_phy
; phy_no
++)
137 hisi_hba
->hw
->phy_disable(hisi_hba
, phy_no
);
139 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys
);
141 static void hisi_sas_slot_index_clear(struct hisi_hba
*hisi_hba
, int slot_idx
)
143 void *bitmap
= hisi_hba
->slot_index_tags
;
145 clear_bit(slot_idx
, bitmap
);
148 static void hisi_sas_slot_index_free(struct hisi_hba
*hisi_hba
, int slot_idx
)
150 hisi_sas_slot_index_clear(hisi_hba
, slot_idx
);
153 static void hisi_sas_slot_index_set(struct hisi_hba
*hisi_hba
, int slot_idx
)
155 void *bitmap
= hisi_hba
->slot_index_tags
;
157 set_bit(slot_idx
, bitmap
);
160 static int hisi_sas_slot_index_alloc(struct hisi_hba
*hisi_hba
, int *slot_idx
)
163 void *bitmap
= hisi_hba
->slot_index_tags
;
165 index
= find_first_zero_bit(bitmap
, hisi_hba
->slot_index_count
);
166 if (index
>= hisi_hba
->slot_index_count
)
167 return -SAS_QUEUE_FULL
;
168 hisi_sas_slot_index_set(hisi_hba
, index
);
173 static void hisi_sas_slot_index_init(struct hisi_hba
*hisi_hba
)
177 for (i
= 0; i
< hisi_hba
->slot_index_count
; ++i
)
178 hisi_sas_slot_index_clear(hisi_hba
, i
);
181 void hisi_sas_slot_task_free(struct hisi_hba
*hisi_hba
, struct sas_task
*task
,
182 struct hisi_sas_slot
*slot
)
186 struct device
*dev
= hisi_hba
->dev
;
187 struct domain_device
*device
= task
->dev
;
188 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
190 if (!task
->lldd_task
)
193 task
->lldd_task
= NULL
;
195 if (!sas_protocol_ata(task
->task_proto
))
197 dma_unmap_sg(dev
, task
->scatter
,
202 atomic64_dec(&sas_dev
->running_req
);
206 dma_pool_free(hisi_hba
->buffer_pool
, slot
->buf
, slot
->buf_dma
);
208 list_del_init(&slot
->entry
);
212 hisi_sas_slot_index_free(hisi_hba
, slot
->idx
);
214 /* slot memory is fully zeroed when it is reused */
216 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free
);
218 static int hisi_sas_task_prep_smp(struct hisi_hba
*hisi_hba
,
219 struct hisi_sas_slot
*slot
)
221 return hisi_hba
->hw
->prep_smp(hisi_hba
, slot
);
224 static int hisi_sas_task_prep_ssp(struct hisi_hba
*hisi_hba
,
225 struct hisi_sas_slot
*slot
, int is_tmf
,
226 struct hisi_sas_tmf_task
*tmf
)
228 return hisi_hba
->hw
->prep_ssp(hisi_hba
, slot
, is_tmf
, tmf
);
231 static int hisi_sas_task_prep_ata(struct hisi_hba
*hisi_hba
,
232 struct hisi_sas_slot
*slot
)
234 return hisi_hba
->hw
->prep_stp(hisi_hba
, slot
);
237 static int hisi_sas_task_prep_abort(struct hisi_hba
*hisi_hba
,
238 struct hisi_sas_slot
*slot
,
239 int device_id
, int abort_flag
, int tag_to_abort
)
241 return hisi_hba
->hw
->prep_abort(hisi_hba
, slot
,
242 device_id
, abort_flag
, tag_to_abort
);
246 * This function will issue an abort TMF regardless of whether the
247 * task is in the sdev or not. Then it will do the task complete
248 * cleanup and callbacks.
250 static void hisi_sas_slot_abort(struct work_struct
*work
)
252 struct hisi_sas_slot
*abort_slot
=
253 container_of(work
, struct hisi_sas_slot
, abort_slot
);
254 struct sas_task
*task
= abort_slot
->task
;
255 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(task
->dev
);
256 struct scsi_cmnd
*cmnd
= task
->uldd_task
;
257 struct hisi_sas_tmf_task tmf_task
;
259 struct device
*dev
= hisi_hba
->dev
;
260 int tag
= abort_slot
->idx
;
263 if (!(task
->task_proto
& SAS_PROTOCOL_SSP
)) {
264 dev_err(dev
, "cannot abort slot for non-ssp task\n");
268 int_to_scsilun(cmnd
->device
->lun
, &lun
);
269 tmf_task
.tmf
= TMF_ABORT_TASK
;
270 tmf_task
.tag_of_task_to_be_managed
= cpu_to_le16(tag
);
272 hisi_sas_debug_issue_ssp_tmf(task
->dev
, lun
.scsi_lun
, &tmf_task
);
274 /* Do cleanup for this task */
275 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
276 hisi_sas_slot_task_free(hisi_hba
, task
, abort_slot
);
277 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
279 task
->task_done(task
);
282 static int hisi_sas_task_prep(struct sas_task
*task
, struct hisi_sas_dq
283 *dq
, int is_tmf
, struct hisi_sas_tmf_task
*tmf
,
286 struct hisi_hba
*hisi_hba
= dq
->hisi_hba
;
287 struct domain_device
*device
= task
->dev
;
288 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
289 struct hisi_sas_port
*port
;
290 struct hisi_sas_slot
*slot
;
291 struct hisi_sas_cmd_hdr
*cmd_hdr_base
;
292 struct asd_sas_port
*sas_port
= device
->port
;
293 struct device
*dev
= hisi_hba
->dev
;
294 int dlvry_queue_slot
, dlvry_queue
, n_elem
= 0, rc
, slot_idx
;
298 struct task_status_struct
*ts
= &task
->task_status
;
300 ts
->resp
= SAS_TASK_UNDELIVERED
;
301 ts
->stat
= SAS_PHY_DOWN
;
303 * libsas will use dev->port, should
304 * not call task_done for sata
306 if (device
->dev_type
!= SAS_SATA_DEV
)
307 task
->task_done(task
);
311 if (DEV_IS_GONE(sas_dev
)) {
313 dev_info(dev
, "task prep: device %d not ready\n",
316 dev_info(dev
, "task prep: device %016llx not ready\n",
317 SAS_ADDR(device
->sas_addr
));
322 port
= to_hisi_sas_port(sas_port
);
323 if (port
&& !port
->port_attached
) {
324 dev_info(dev
, "task prep: %s port%d not attach device\n",
325 (dev_is_sata(device
)) ?
332 if (!sas_protocol_ata(task
->task_proto
)) {
333 if (task
->num_scatter
) {
334 n_elem
= dma_map_sg(dev
, task
->scatter
,
335 task
->num_scatter
, task
->data_dir
);
342 n_elem
= task
->num_scatter
;
344 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
345 if (hisi_hba
->hw
->slot_index_alloc
)
346 rc
= hisi_hba
->hw
->slot_index_alloc(hisi_hba
, &slot_idx
,
349 rc
= hisi_sas_slot_index_alloc(hisi_hba
, &slot_idx
);
351 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
354 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
356 rc
= hisi_hba
->hw
->get_free_slot(hisi_hba
, dq
);
360 dlvry_queue
= dq
->id
;
361 dlvry_queue_slot
= dq
->wr_point
;
362 slot
= &hisi_hba
->slot_info
[slot_idx
];
363 memset(slot
, 0, sizeof(struct hisi_sas_slot
));
365 slot
->idx
= slot_idx
;
366 slot
->n_elem
= n_elem
;
367 slot
->dlvry_queue
= dlvry_queue
;
368 slot
->dlvry_queue_slot
= dlvry_queue_slot
;
369 cmd_hdr_base
= hisi_hba
->cmd_hdr
[dlvry_queue
];
370 slot
->cmd_hdr
= &cmd_hdr_base
[dlvry_queue_slot
];
373 task
->lldd_task
= slot
;
374 INIT_WORK(&slot
->abort_slot
, hisi_sas_slot_abort
);
376 slot
->buf
= dma_pool_alloc(hisi_hba
->buffer_pool
,
377 GFP_ATOMIC
, &slot
->buf_dma
);
380 goto err_out_slot_buf
;
382 memset(slot
->cmd_hdr
, 0, sizeof(struct hisi_sas_cmd_hdr
));
383 memset(hisi_sas_cmd_hdr_addr_mem(slot
), 0, HISI_SAS_COMMAND_TABLE_SZ
);
384 memset(hisi_sas_status_buf_addr_mem(slot
), 0, HISI_SAS_STATUS_BUF_SZ
);
386 switch (task
->task_proto
) {
387 case SAS_PROTOCOL_SMP
:
388 rc
= hisi_sas_task_prep_smp(hisi_hba
, slot
);
390 case SAS_PROTOCOL_SSP
:
391 rc
= hisi_sas_task_prep_ssp(hisi_hba
, slot
, is_tmf
, tmf
);
393 case SAS_PROTOCOL_SATA
:
394 case SAS_PROTOCOL_STP
:
395 case SAS_PROTOCOL_SATA
| SAS_PROTOCOL_STP
:
396 rc
= hisi_sas_task_prep_ata(hisi_hba
, slot
);
399 dev_err(dev
, "task prep: unknown/unsupported proto (0x%x)\n",
406 dev_err(dev
, "task prep: rc = 0x%x\n", rc
);
410 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
411 list_add_tail(&slot
->entry
, &sas_dev
->list
);
412 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
413 spin_lock_irqsave(&task
->task_state_lock
, flags
);
414 task
->task_state_flags
|= SAS_TASK_AT_INITIATOR
;
415 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
417 dq
->slot_prep
= slot
;
419 atomic64_inc(&sas_dev
->running_req
);
425 dma_pool_free(hisi_hba
->buffer_pool
, slot
->buf
,
428 /* Nothing to be done */
430 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
431 hisi_sas_slot_index_free(hisi_hba
, slot_idx
);
432 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
434 dev_err(dev
, "task prep: failed[%d]!\n", rc
);
435 if (!sas_protocol_ata(task
->task_proto
))
437 dma_unmap_sg(dev
, task
->scatter
,
444 static int hisi_sas_task_exec(struct sas_task
*task
, gfp_t gfp_flags
,
445 int is_tmf
, struct hisi_sas_tmf_task
*tmf
)
450 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(task
->dev
);
451 struct device
*dev
= hisi_hba
->dev
;
452 struct domain_device
*device
= task
->dev
;
453 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
454 struct hisi_sas_dq
*dq
= sas_dev
->dq
;
456 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
)))
459 /* protect task_prep and start_delivery sequence */
460 spin_lock_irqsave(&dq
->lock
, flags
);
461 rc
= hisi_sas_task_prep(task
, dq
, is_tmf
, tmf
, &pass
);
463 dev_err(dev
, "task exec: failed[%d]!\n", rc
);
466 hisi_hba
->hw
->start_delivery(dq
);
467 spin_unlock_irqrestore(&dq
->lock
, flags
);
472 static void hisi_sas_bytes_dmaed(struct hisi_hba
*hisi_hba
, int phy_no
)
474 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
475 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
476 struct sas_ha_struct
*sas_ha
;
478 if (!phy
->phy_attached
)
481 sas_ha
= &hisi_hba
->sha
;
482 sas_ha
->notify_phy_event(sas_phy
, PHYE_OOB_DONE
);
485 struct sas_phy
*sphy
= sas_phy
->phy
;
487 sphy
->negotiated_linkrate
= sas_phy
->linkrate
;
488 sphy
->minimum_linkrate_hw
= SAS_LINK_RATE_1_5_GBPS
;
489 sphy
->maximum_linkrate_hw
=
490 hisi_hba
->hw
->phy_get_max_linkrate();
491 if (sphy
->minimum_linkrate
== SAS_LINK_RATE_UNKNOWN
)
492 sphy
->minimum_linkrate
= phy
->minimum_linkrate
;
494 if (sphy
->maximum_linkrate
== SAS_LINK_RATE_UNKNOWN
)
495 sphy
->maximum_linkrate
= phy
->maximum_linkrate
;
498 if (phy
->phy_type
& PORT_TYPE_SAS
) {
499 struct sas_identify_frame
*id
;
501 id
= (struct sas_identify_frame
*)phy
->frame_rcvd
;
502 id
->dev_type
= phy
->identify
.device_type
;
503 id
->initiator_bits
= SAS_PROTOCOL_ALL
;
504 id
->target_bits
= phy
->identify
.target_port_protocols
;
505 } else if (phy
->phy_type
& PORT_TYPE_SATA
) {
509 sas_phy
->frame_rcvd_size
= phy
->frame_rcvd_size
;
510 sas_ha
->notify_port_event(sas_phy
, PORTE_BYTES_DMAED
);
513 static struct hisi_sas_device
*hisi_sas_alloc_dev(struct domain_device
*device
)
515 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
516 struct hisi_sas_device
*sas_dev
= NULL
;
520 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
521 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
522 if (hisi_hba
->devices
[i
].dev_type
== SAS_PHY_UNUSED
) {
523 int queue
= i
% hisi_hba
->queue_count
;
524 struct hisi_sas_dq
*dq
= &hisi_hba
->dq
[queue
];
526 hisi_hba
->devices
[i
].device_id
= i
;
527 sas_dev
= &hisi_hba
->devices
[i
];
528 sas_dev
->dev_status
= HISI_SAS_DEV_NORMAL
;
529 sas_dev
->dev_type
= device
->dev_type
;
530 sas_dev
->hisi_hba
= hisi_hba
;
531 sas_dev
->sas_device
= device
;
533 INIT_LIST_HEAD(&hisi_hba
->devices
[i
].list
);
537 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
542 static int hisi_sas_dev_found(struct domain_device
*device
)
544 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
545 struct domain_device
*parent_dev
= device
->parent
;
546 struct hisi_sas_device
*sas_dev
;
547 struct device
*dev
= hisi_hba
->dev
;
549 if (hisi_hba
->hw
->alloc_dev
)
550 sas_dev
= hisi_hba
->hw
->alloc_dev(device
);
552 sas_dev
= hisi_sas_alloc_dev(device
);
554 dev_err(dev
, "fail alloc dev: max support %d devices\n",
555 HISI_SAS_MAX_DEVICES
);
559 device
->lldd_dev
= sas_dev
;
560 hisi_hba
->hw
->setup_itct(hisi_hba
, sas_dev
);
562 if (parent_dev
&& DEV_IS_EXPANDER(parent_dev
->dev_type
)) {
564 u8 phy_num
= parent_dev
->ex_dev
.num_phys
;
567 for (phy_no
= 0; phy_no
< phy_num
; phy_no
++) {
568 phy
= &parent_dev
->ex_dev
.ex_phy
[phy_no
];
569 if (SAS_ADDR(phy
->attached_sas_addr
) ==
570 SAS_ADDR(device
->sas_addr
)) {
571 sas_dev
->attached_phy
= phy_no
;
576 if (phy_no
== phy_num
) {
577 dev_info(dev
, "dev found: no attached "
578 "dev:%016llx at ex:%016llx\n",
579 SAS_ADDR(device
->sas_addr
),
580 SAS_ADDR(parent_dev
->sas_addr
));
585 dev_info(dev
, "dev[%d:%x] found\n",
586 sas_dev
->device_id
, sas_dev
->dev_type
);
591 static int hisi_sas_slave_configure(struct scsi_device
*sdev
)
593 struct domain_device
*dev
= sdev_to_domain_dev(sdev
);
594 int ret
= sas_slave_configure(sdev
);
598 if (!dev_is_sata(dev
))
599 sas_change_queue_depth(sdev
, 64);
604 static void hisi_sas_scan_start(struct Scsi_Host
*shost
)
606 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
608 hisi_hba
->hw
->phys_init(hisi_hba
);
611 static int hisi_sas_scan_finished(struct Scsi_Host
*shost
, unsigned long time
)
613 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
614 struct sas_ha_struct
*sha
= &hisi_hba
->sha
;
616 /* Wait for PHY up interrupt to occur */
624 static void hisi_sas_phyup_work(struct work_struct
*work
)
626 struct hisi_sas_phy
*phy
=
627 container_of(work
, typeof(*phy
), works
[HISI_PHYE_PHY_UP
]);
628 struct hisi_hba
*hisi_hba
= phy
->hisi_hba
;
629 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
630 int phy_no
= sas_phy
->id
;
632 hisi_hba
->hw
->sl_notify(hisi_hba
, phy_no
); /* This requires a sleep */
633 hisi_sas_bytes_dmaed(hisi_hba
, phy_no
);
636 static void hisi_sas_linkreset_work(struct work_struct
*work
)
638 struct hisi_sas_phy
*phy
=
639 container_of(work
, typeof(*phy
), works
[HISI_PHYE_LINK_RESET
]);
640 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
642 hisi_sas_control_phy(sas_phy
, PHY_FUNC_LINK_RESET
, NULL
);
645 static const work_func_t hisi_sas_phye_fns
[HISI_PHYES_NUM
] = {
646 [HISI_PHYE_PHY_UP
] = hisi_sas_phyup_work
,
647 [HISI_PHYE_LINK_RESET
] = hisi_sas_linkreset_work
,
650 bool hisi_sas_notify_phy_event(struct hisi_sas_phy
*phy
,
651 enum hisi_sas_phy_event event
)
653 struct hisi_hba
*hisi_hba
= phy
->hisi_hba
;
655 if (WARN_ON(event
>= HISI_PHYES_NUM
))
658 return queue_work(hisi_hba
->wq
, &phy
->works
[event
]);
660 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event
);
662 static void hisi_sas_phy_init(struct hisi_hba
*hisi_hba
, int phy_no
)
664 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
665 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
668 phy
->hisi_hba
= hisi_hba
;
670 sas_phy
->enabled
= (phy_no
< hisi_hba
->n_phy
) ? 1 : 0;
671 sas_phy
->class = SAS
;
672 sas_phy
->iproto
= SAS_PROTOCOL_ALL
;
674 sas_phy
->type
= PHY_TYPE_PHYSICAL
;
675 sas_phy
->role
= PHY_ROLE_INITIATOR
;
676 sas_phy
->oob_mode
= OOB_NOT_CONNECTED
;
677 sas_phy
->linkrate
= SAS_LINK_RATE_UNKNOWN
;
678 sas_phy
->id
= phy_no
;
679 sas_phy
->sas_addr
= &hisi_hba
->sas_addr
[0];
680 sas_phy
->frame_rcvd
= &phy
->frame_rcvd
[0];
681 sas_phy
->ha
= (struct sas_ha_struct
*)hisi_hba
->shost
->hostdata
;
682 sas_phy
->lldd_phy
= phy
;
684 for (i
= 0; i
< HISI_PHYES_NUM
; i
++)
685 INIT_WORK(&phy
->works
[i
], hisi_sas_phye_fns
[i
]);
688 static void hisi_sas_port_notify_formed(struct asd_sas_phy
*sas_phy
)
690 struct sas_ha_struct
*sas_ha
= sas_phy
->ha
;
691 struct hisi_hba
*hisi_hba
= sas_ha
->lldd_ha
;
692 struct hisi_sas_phy
*phy
= sas_phy
->lldd_phy
;
693 struct asd_sas_port
*sas_port
= sas_phy
->port
;
694 struct hisi_sas_port
*port
= to_hisi_sas_port(sas_port
);
700 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
701 port
->port_attached
= 1;
702 port
->id
= phy
->port_id
;
704 sas_port
->lldd_port
= port
;
705 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
708 static void hisi_sas_do_release_task(struct hisi_hba
*hisi_hba
, struct sas_task
*task
,
709 struct hisi_sas_slot
*slot
)
713 struct task_status_struct
*ts
;
715 ts
= &task
->task_status
;
717 ts
->resp
= SAS_TASK_COMPLETE
;
718 ts
->stat
= SAS_ABORTED_TASK
;
719 spin_lock_irqsave(&task
->task_state_lock
, flags
);
720 task
->task_state_flags
&=
721 ~(SAS_TASK_STATE_PENDING
| SAS_TASK_AT_INITIATOR
);
722 task
->task_state_flags
|= SAS_TASK_STATE_DONE
;
723 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
726 hisi_sas_slot_task_free(hisi_hba
, task
, slot
);
729 /* hisi_hba.lock should be locked */
730 static void hisi_sas_release_task(struct hisi_hba
*hisi_hba
,
731 struct domain_device
*device
)
733 struct hisi_sas_slot
*slot
, *slot2
;
734 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
736 list_for_each_entry_safe(slot
, slot2
, &sas_dev
->list
, entry
)
737 hisi_sas_do_release_task(hisi_hba
, slot
->task
, slot
);
740 static void hisi_sas_release_tasks(struct hisi_hba
*hisi_hba
)
742 struct hisi_sas_device
*sas_dev
;
743 struct domain_device
*device
;
746 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
747 sas_dev
= &hisi_hba
->devices
[i
];
748 device
= sas_dev
->sas_device
;
750 if ((sas_dev
->dev_type
== SAS_PHY_UNUSED
) ||
754 hisi_sas_release_task(hisi_hba
, device
);
758 static void hisi_sas_dereg_device(struct hisi_hba
*hisi_hba
,
759 struct domain_device
*device
)
761 if (hisi_hba
->hw
->dereg_device
)
762 hisi_hba
->hw
->dereg_device(hisi_hba
, device
);
765 static void hisi_sas_dev_gone(struct domain_device
*device
)
767 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
768 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
769 struct device
*dev
= hisi_hba
->dev
;
771 dev_info(dev
, "dev[%d:%x] is gone\n",
772 sas_dev
->device_id
, sas_dev
->dev_type
);
774 if (!test_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
)) {
775 hisi_sas_internal_task_abort(hisi_hba
, device
,
776 HISI_SAS_INT_ABT_DEV
, 0);
778 hisi_sas_dereg_device(hisi_hba
, device
);
780 hisi_hba
->hw
->clear_itct(hisi_hba
, sas_dev
);
781 device
->lldd_dev
= NULL
;
782 memset(sas_dev
, 0, sizeof(*sas_dev
));
785 if (hisi_hba
->hw
->free_device
)
786 hisi_hba
->hw
->free_device(sas_dev
);
787 sas_dev
->dev_type
= SAS_PHY_UNUSED
;
790 static int hisi_sas_queue_command(struct sas_task
*task
, gfp_t gfp_flags
)
792 return hisi_sas_task_exec(task
, gfp_flags
, 0, NULL
);
795 static int hisi_sas_control_phy(struct asd_sas_phy
*sas_phy
, enum phy_func func
,
798 struct sas_ha_struct
*sas_ha
= sas_phy
->ha
;
799 struct hisi_hba
*hisi_hba
= sas_ha
->lldd_ha
;
800 int phy_no
= sas_phy
->id
;
803 case PHY_FUNC_HARD_RESET
:
804 hisi_hba
->hw
->phy_hard_reset(hisi_hba
, phy_no
);
807 case PHY_FUNC_LINK_RESET
:
808 hisi_hba
->hw
->phy_disable(hisi_hba
, phy_no
);
810 hisi_hba
->hw
->phy_start(hisi_hba
, phy_no
);
813 case PHY_FUNC_DISABLE
:
814 hisi_hba
->hw
->phy_disable(hisi_hba
, phy_no
);
817 case PHY_FUNC_SET_LINK_RATE
:
818 hisi_hba
->hw
->phy_set_linkrate(hisi_hba
, phy_no
, funcdata
);
820 case PHY_FUNC_GET_EVENTS
:
821 if (hisi_hba
->hw
->get_events
) {
822 hisi_hba
->hw
->get_events(hisi_hba
, phy_no
);
826 case PHY_FUNC_RELEASE_SPINUP_HOLD
:
833 static void hisi_sas_task_done(struct sas_task
*task
)
835 if (!del_timer(&task
->slow_task
->timer
))
837 complete(&task
->slow_task
->completion
);
840 static void hisi_sas_tmf_timedout(struct timer_list
*t
)
842 struct sas_task_slow
*slow
= from_timer(slow
, t
, timer
);
843 struct sas_task
*task
= slow
->task
;
846 spin_lock_irqsave(&task
->task_state_lock
, flags
);
847 if (!(task
->task_state_flags
& SAS_TASK_STATE_DONE
))
848 task
->task_state_flags
|= SAS_TASK_STATE_ABORTED
;
849 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
851 complete(&task
->slow_task
->completion
);
854 #define TASK_TIMEOUT 20
856 static int hisi_sas_exec_internal_tmf_task(struct domain_device
*device
,
857 void *parameter
, u32 para_len
,
858 struct hisi_sas_tmf_task
*tmf
)
860 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
861 struct hisi_hba
*hisi_hba
= sas_dev
->hisi_hba
;
862 struct device
*dev
= hisi_hba
->dev
;
863 struct sas_task
*task
;
866 for (retry
= 0; retry
< TASK_RETRY
; retry
++) {
867 task
= sas_alloc_slow_task(GFP_KERNEL
);
872 task
->task_proto
= device
->tproto
;
874 if (dev_is_sata(device
)) {
875 task
->ata_task
.device_control_reg_update
= 1;
876 memcpy(&task
->ata_task
.fis
, parameter
, para_len
);
878 memcpy(&task
->ssp_task
, parameter
, para_len
);
880 task
->task_done
= hisi_sas_task_done
;
882 task
->slow_task
->timer
.function
= hisi_sas_tmf_timedout
;
883 task
->slow_task
->timer
.expires
= jiffies
+ TASK_TIMEOUT
*HZ
;
884 add_timer(&task
->slow_task
->timer
);
886 res
= hisi_sas_task_exec(task
, GFP_KERNEL
, 1, tmf
);
889 del_timer(&task
->slow_task
->timer
);
890 dev_err(dev
, "abort tmf: executing internal task failed: %d\n",
895 wait_for_completion(&task
->slow_task
->completion
);
896 res
= TMF_RESP_FUNC_FAILED
;
897 /* Even TMF timed out, return direct. */
898 if ((task
->task_state_flags
& SAS_TASK_STATE_ABORTED
)) {
899 if (!(task
->task_state_flags
& SAS_TASK_STATE_DONE
)) {
900 struct hisi_sas_slot
*slot
= task
->lldd_task
;
902 dev_err(dev
, "abort tmf: TMF task timeout and not done\n");
908 dev_err(dev
, "abort tmf: TMF task timeout\n");
911 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
912 task
->task_status
.stat
== TMF_RESP_FUNC_COMPLETE
) {
913 res
= TMF_RESP_FUNC_COMPLETE
;
917 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
918 task
->task_status
.stat
== TMF_RESP_FUNC_SUCC
) {
919 res
= TMF_RESP_FUNC_SUCC
;
923 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
924 task
->task_status
.stat
== SAS_DATA_UNDERRUN
) {
925 /* no error, but return the number of bytes of
928 dev_warn(dev
, "abort tmf: task to dev %016llx "
929 "resp: 0x%x sts 0x%x underrun\n",
930 SAS_ADDR(device
->sas_addr
),
931 task
->task_status
.resp
,
932 task
->task_status
.stat
);
933 res
= task
->task_status
.residual
;
937 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
938 task
->task_status
.stat
== SAS_DATA_OVERRUN
) {
939 dev_warn(dev
, "abort tmf: blocked task error\n");
944 dev_warn(dev
, "abort tmf: task to dev "
945 "%016llx resp: 0x%x status 0x%x\n",
946 SAS_ADDR(device
->sas_addr
), task
->task_status
.resp
,
947 task
->task_status
.stat
);
952 if (retry
== TASK_RETRY
)
953 dev_warn(dev
, "abort tmf: executing internal task failed!\n");
958 static void hisi_sas_fill_ata_reset_cmd(struct ata_device
*dev
,
959 bool reset
, int pmp
, u8
*fis
)
961 struct ata_taskfile tf
;
963 ata_tf_init(dev
, &tf
);
968 tf
.command
= ATA_CMD_DEV_RESET
;
969 ata_tf_to_fis(&tf
, pmp
, 0, fis
);
972 static int hisi_sas_softreset_ata_disk(struct domain_device
*device
)
975 struct ata_port
*ap
= device
->sata_dev
.ap
;
976 struct ata_link
*link
;
977 int rc
= TMF_RESP_FUNC_FAILED
;
978 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
979 struct device
*dev
= hisi_hba
->dev
;
980 int s
= sizeof(struct host_to_dev_fis
);
983 ata_for_each_link(link
, ap
, EDGE
) {
984 int pmp
= sata_srst_pmp(link
);
986 hisi_sas_fill_ata_reset_cmd(link
->device
, 1, pmp
, fis
);
987 rc
= hisi_sas_exec_internal_tmf_task(device
, fis
, s
, NULL
);
988 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
992 if (rc
== TMF_RESP_FUNC_COMPLETE
) {
993 ata_for_each_link(link
, ap
, EDGE
) {
994 int pmp
= sata_srst_pmp(link
);
996 hisi_sas_fill_ata_reset_cmd(link
->device
, 0, pmp
, fis
);
997 rc
= hisi_sas_exec_internal_tmf_task(device
, fis
,
999 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
1000 dev_err(dev
, "ata disk de-reset failed\n");
1003 dev_err(dev
, "ata disk reset failed\n");
1006 if (rc
== TMF_RESP_FUNC_COMPLETE
) {
1007 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
1008 hisi_sas_release_task(hisi_hba
, device
);
1009 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
1015 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device
*device
,
1016 u8
*lun
, struct hisi_sas_tmf_task
*tmf
)
1018 struct sas_ssp_task ssp_task
;
1020 if (!(device
->tproto
& SAS_PROTOCOL_SSP
))
1021 return TMF_RESP_FUNC_ESUPP
;
1023 memcpy(ssp_task
.LUN
, lun
, 8);
1025 return hisi_sas_exec_internal_tmf_task(device
, &ssp_task
,
1026 sizeof(ssp_task
), tmf
);
1029 static void hisi_sas_refresh_port_id(struct hisi_hba
*hisi_hba
)
1031 u32 state
= hisi_hba
->hw
->get_phys_state(hisi_hba
);
1034 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
1035 struct hisi_sas_device
*sas_dev
= &hisi_hba
->devices
[i
];
1036 struct domain_device
*device
= sas_dev
->sas_device
;
1037 struct asd_sas_port
*sas_port
;
1038 struct hisi_sas_port
*port
;
1039 struct hisi_sas_phy
*phy
= NULL
;
1040 struct asd_sas_phy
*sas_phy
;
1042 if ((sas_dev
->dev_type
== SAS_PHY_UNUSED
)
1043 || !device
|| !device
->port
)
1046 sas_port
= device
->port
;
1047 port
= to_hisi_sas_port(sas_port
);
1049 list_for_each_entry(sas_phy
, &sas_port
->phy_list
, port_phy_el
)
1050 if (state
& BIT(sas_phy
->id
)) {
1051 phy
= sas_phy
->lldd_phy
;
1056 port
->id
= phy
->port_id
;
1058 /* Update linkrate of directly attached device. */
1059 if (!device
->parent
)
1060 device
->linkrate
= phy
->sas_phy
.linkrate
;
1062 hisi_hba
->hw
->setup_itct(hisi_hba
, sas_dev
);
1068 static void hisi_sas_rescan_topology(struct hisi_hba
*hisi_hba
, u32 old_state
,
1071 struct sas_ha_struct
*sas_ha
= &hisi_hba
->sha
;
1072 struct asd_sas_port
*_sas_port
= NULL
;
1075 for (phy_no
= 0; phy_no
< hisi_hba
->n_phy
; phy_no
++) {
1076 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1077 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1078 struct asd_sas_port
*sas_port
= sas_phy
->port
;
1079 bool do_port_check
= !!(_sas_port
!= sas_port
);
1081 if (!sas_phy
->phy
->enabled
)
1084 /* Report PHY state change to libsas */
1085 if (state
& BIT(phy_no
)) {
1086 if (do_port_check
&& sas_port
&& sas_port
->port_dev
) {
1087 struct domain_device
*dev
= sas_port
->port_dev
;
1089 _sas_port
= sas_port
;
1091 if (DEV_IS_EXPANDER(dev
->dev_type
))
1092 sas_ha
->notify_port_event(sas_phy
,
1093 PORTE_BROADCAST_RCVD
);
1095 } else if (old_state
& (1 << phy_no
))
1096 /* PHY down but was up before */
1097 hisi_sas_phy_down(hisi_hba
, phy_no
, 0);
1102 static int hisi_sas_controller_reset(struct hisi_hba
*hisi_hba
)
1104 struct device
*dev
= hisi_hba
->dev
;
1105 struct Scsi_Host
*shost
= hisi_hba
->shost
;
1106 u32 old_state
, state
;
1107 unsigned long flags
;
1110 if (!hisi_hba
->hw
->soft_reset
)
1113 if (test_and_set_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
))
1116 dev_info(dev
, "controller resetting...\n");
1117 old_state
= hisi_hba
->hw
->get_phys_state(hisi_hba
);
1119 scsi_block_requests(shost
);
1120 set_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
1121 rc
= hisi_hba
->hw
->soft_reset(hisi_hba
);
1123 dev_warn(dev
, "controller reset failed (%d)\n", rc
);
1124 clear_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
1125 scsi_unblock_requests(shost
);
1128 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
1129 hisi_sas_release_tasks(hisi_hba
);
1130 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
1132 clear_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
1134 /* Init and wait for PHYs to come up and all libsas event finished. */
1135 hisi_hba
->hw
->phys_init(hisi_hba
);
1137 hisi_sas_refresh_port_id(hisi_hba
);
1138 scsi_unblock_requests(shost
);
1140 state
= hisi_hba
->hw
->get_phys_state(hisi_hba
);
1141 hisi_sas_rescan_topology(hisi_hba
, old_state
, state
);
1142 dev_info(dev
, "controller reset complete\n");
1145 clear_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
);
1150 static int hisi_sas_abort_task(struct sas_task
*task
)
1152 struct scsi_lun lun
;
1153 struct hisi_sas_tmf_task tmf_task
;
1154 struct domain_device
*device
= task
->dev
;
1155 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1156 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(task
->dev
);
1157 struct device
*dev
= hisi_hba
->dev
;
1158 int rc
= TMF_RESP_FUNC_FAILED
;
1159 unsigned long flags
;
1162 dev_warn(dev
, "Device has been removed\n");
1163 return TMF_RESP_FUNC_FAILED
;
1166 if (task
->task_state_flags
& SAS_TASK_STATE_DONE
) {
1167 rc
= TMF_RESP_FUNC_COMPLETE
;
1171 sas_dev
->dev_status
= HISI_SAS_DEV_EH
;
1172 if (task
->lldd_task
&& task
->task_proto
& SAS_PROTOCOL_SSP
) {
1173 struct scsi_cmnd
*cmnd
= task
->uldd_task
;
1174 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1175 u32 tag
= slot
->idx
;
1178 int_to_scsilun(cmnd
->device
->lun
, &lun
);
1179 tmf_task
.tmf
= TMF_ABORT_TASK
;
1180 tmf_task
.tag_of_task_to_be_managed
= cpu_to_le16(tag
);
1182 rc
= hisi_sas_debug_issue_ssp_tmf(task
->dev
, lun
.scsi_lun
,
1185 rc2
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1186 HISI_SAS_INT_ABT_CMD
, tag
);
1188 dev_err(dev
, "abort task: internal abort (%d)\n", rc2
);
1189 return TMF_RESP_FUNC_FAILED
;
1193 * If the TMF finds that the IO is not in the device and also
1194 * the internal abort does not succeed, then it is safe to
1196 * Note: if the internal abort succeeds then the slot
1197 * will have already been completed
1199 if (rc
== TMF_RESP_FUNC_COMPLETE
&& rc2
!= TMF_RESP_FUNC_SUCC
) {
1200 if (task
->lldd_task
) {
1201 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
1202 hisi_sas_do_release_task(hisi_hba
, task
, slot
);
1203 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
1206 } else if (task
->task_proto
& SAS_PROTOCOL_SATA
||
1207 task
->task_proto
& SAS_PROTOCOL_STP
) {
1208 if (task
->dev
->dev_type
== SAS_SATA_DEV
) {
1209 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1210 HISI_SAS_INT_ABT_DEV
, 0);
1212 dev_err(dev
, "abort task: internal abort failed\n");
1215 hisi_sas_dereg_device(hisi_hba
, device
);
1216 rc
= hisi_sas_softreset_ata_disk(device
);
1218 } else if (task
->lldd_task
&& task
->task_proto
& SAS_PROTOCOL_SMP
) {
1220 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1221 u32 tag
= slot
->idx
;
1223 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1224 HISI_SAS_INT_ABT_CMD
, tag
);
1225 if (((rc
< 0) || (rc
== TMF_RESP_FUNC_FAILED
)) &&
1227 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
1228 hisi_sas_do_release_task(hisi_hba
, task
, slot
);
1229 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
1234 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
1235 dev_notice(dev
, "abort task: rc=%d\n", rc
);
1239 static int hisi_sas_abort_task_set(struct domain_device
*device
, u8
*lun
)
1241 struct hisi_sas_tmf_task tmf_task
;
1242 int rc
= TMF_RESP_FUNC_FAILED
;
1244 tmf_task
.tmf
= TMF_ABORT_TASK_SET
;
1245 rc
= hisi_sas_debug_issue_ssp_tmf(device
, lun
, &tmf_task
);
1250 static int hisi_sas_clear_aca(struct domain_device
*device
, u8
*lun
)
1252 int rc
= TMF_RESP_FUNC_FAILED
;
1253 struct hisi_sas_tmf_task tmf_task
;
1255 tmf_task
.tmf
= TMF_CLEAR_ACA
;
1256 rc
= hisi_sas_debug_issue_ssp_tmf(device
, lun
, &tmf_task
);
1261 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device
*device
)
1263 struct sas_phy
*phy
= sas_get_local_phy(device
);
1264 int rc
, reset_type
= (device
->dev_type
== SAS_SATA_DEV
||
1265 (device
->tproto
& SAS_PROTOCOL_STP
)) ? 0 : 1;
1266 rc
= sas_phy_reset(phy
, reset_type
);
1267 sas_put_local_phy(phy
);
1272 static int hisi_sas_I_T_nexus_reset(struct domain_device
*device
)
1274 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1275 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
1276 struct device
*dev
= hisi_hba
->dev
;
1277 int rc
= TMF_RESP_FUNC_FAILED
;
1278 unsigned long flags
;
1280 if (sas_dev
->dev_status
!= HISI_SAS_DEV_EH
)
1281 return TMF_RESP_FUNC_FAILED
;
1282 sas_dev
->dev_status
= HISI_SAS_DEV_NORMAL
;
1284 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1285 HISI_SAS_INT_ABT_DEV
, 0);
1287 dev_err(dev
, "I_T nexus reset: internal abort (%d)\n", rc
);
1288 return TMF_RESP_FUNC_FAILED
;
1290 hisi_sas_dereg_device(hisi_hba
, device
);
1292 rc
= hisi_sas_debug_I_T_nexus_reset(device
);
1294 if (rc
== TMF_RESP_FUNC_COMPLETE
) {
1295 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
1296 hisi_sas_release_task(hisi_hba
, device
);
1297 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
1302 static int hisi_sas_lu_reset(struct domain_device
*device
, u8
*lun
)
1304 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1305 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
1306 struct device
*dev
= hisi_hba
->dev
;
1307 unsigned long flags
;
1308 int rc
= TMF_RESP_FUNC_FAILED
;
1310 sas_dev
->dev_status
= HISI_SAS_DEV_EH
;
1311 if (dev_is_sata(device
)) {
1312 struct sas_phy
*phy
;
1314 /* Clear internal IO and then hardreset */
1315 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1316 HISI_SAS_INT_ABT_DEV
, 0);
1318 dev_err(dev
, "lu_reset: internal abort failed\n");
1321 hisi_sas_dereg_device(hisi_hba
, device
);
1323 phy
= sas_get_local_phy(device
);
1325 rc
= sas_phy_reset(phy
, 1);
1328 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
1329 hisi_sas_release_task(hisi_hba
, device
);
1330 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
1332 sas_put_local_phy(phy
);
1334 struct hisi_sas_tmf_task tmf_task
= { .tmf
= TMF_LU_RESET
};
1336 rc
= hisi_sas_debug_issue_ssp_tmf(device
, lun
, &tmf_task
);
1337 if (rc
== TMF_RESP_FUNC_COMPLETE
) {
1338 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
1339 hisi_sas_release_task(hisi_hba
, device
);
1340 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
1344 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
1345 dev_err(dev
, "lu_reset: for device[%d]:rc= %d\n",
1346 sas_dev
->device_id
, rc
);
1350 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct
*sas_ha
)
1352 struct hisi_hba
*hisi_hba
= sas_ha
->lldd_ha
;
1353 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r
);
1355 queue_work(hisi_hba
->wq
, &r
.work
);
1356 wait_for_completion(r
.completion
);
1358 return TMF_RESP_FUNC_COMPLETE
;
1360 return TMF_RESP_FUNC_FAILED
;
1363 static int hisi_sas_query_task(struct sas_task
*task
)
1365 struct scsi_lun lun
;
1366 struct hisi_sas_tmf_task tmf_task
;
1367 int rc
= TMF_RESP_FUNC_FAILED
;
1369 if (task
->lldd_task
&& task
->task_proto
& SAS_PROTOCOL_SSP
) {
1370 struct scsi_cmnd
*cmnd
= task
->uldd_task
;
1371 struct domain_device
*device
= task
->dev
;
1372 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1373 u32 tag
= slot
->idx
;
1375 int_to_scsilun(cmnd
->device
->lun
, &lun
);
1376 tmf_task
.tmf
= TMF_QUERY_TASK
;
1377 tmf_task
.tag_of_task_to_be_managed
= cpu_to_le16(tag
);
1379 rc
= hisi_sas_debug_issue_ssp_tmf(device
,
1383 /* The task is still in Lun, release it then */
1384 case TMF_RESP_FUNC_SUCC
:
1385 /* The task is not in Lun or failed, reset the phy */
1386 case TMF_RESP_FUNC_FAILED
:
1387 case TMF_RESP_FUNC_COMPLETE
:
1390 rc
= TMF_RESP_FUNC_FAILED
;
1398 hisi_sas_internal_abort_task_exec(struct hisi_hba
*hisi_hba
, int device_id
,
1399 struct sas_task
*task
, int abort_flag
,
1402 struct domain_device
*device
= task
->dev
;
1403 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1404 struct device
*dev
= hisi_hba
->dev
;
1405 struct hisi_sas_port
*port
;
1406 struct hisi_sas_slot
*slot
;
1407 struct asd_sas_port
*sas_port
= device
->port
;
1408 struct hisi_sas_cmd_hdr
*cmd_hdr_base
;
1409 struct hisi_sas_dq
*dq
= sas_dev
->dq
;
1410 int dlvry_queue_slot
, dlvry_queue
, n_elem
= 0, rc
, slot_idx
;
1411 unsigned long flags
, flags_dq
;
1413 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
)))
1419 port
= to_hisi_sas_port(sas_port
);
1421 /* simply get a slot and send abort command */
1422 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
1423 rc
= hisi_sas_slot_index_alloc(hisi_hba
, &slot_idx
);
1425 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
1428 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
1430 spin_lock_irqsave(&dq
->lock
, flags_dq
);
1431 rc
= hisi_hba
->hw
->get_free_slot(hisi_hba
, dq
);
1435 dlvry_queue
= dq
->id
;
1436 dlvry_queue_slot
= dq
->wr_point
;
1438 slot
= &hisi_hba
->slot_info
[slot_idx
];
1439 memset(slot
, 0, sizeof(struct hisi_sas_slot
));
1441 slot
->idx
= slot_idx
;
1442 slot
->n_elem
= n_elem
;
1443 slot
->dlvry_queue
= dlvry_queue
;
1444 slot
->dlvry_queue_slot
= dlvry_queue_slot
;
1445 cmd_hdr_base
= hisi_hba
->cmd_hdr
[dlvry_queue
];
1446 slot
->cmd_hdr
= &cmd_hdr_base
[dlvry_queue_slot
];
1449 task
->lldd_task
= slot
;
1451 slot
->buf
= dma_pool_alloc(hisi_hba
->buffer_pool
,
1452 GFP_ATOMIC
, &slot
->buf_dma
);
1458 memset(slot
->cmd_hdr
, 0, sizeof(struct hisi_sas_cmd_hdr
));
1459 memset(hisi_sas_cmd_hdr_addr_mem(slot
), 0, HISI_SAS_COMMAND_TABLE_SZ
);
1460 memset(hisi_sas_status_buf_addr_mem(slot
), 0, HISI_SAS_STATUS_BUF_SZ
);
1462 rc
= hisi_sas_task_prep_abort(hisi_hba
, slot
, device_id
,
1463 abort_flag
, task_tag
);
1467 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
1468 list_add_tail(&slot
->entry
, &sas_dev
->list
);
1469 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
1470 spin_lock_irqsave(&task
->task_state_lock
, flags
);
1471 task
->task_state_flags
|= SAS_TASK_AT_INITIATOR
;
1472 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1474 dq
->slot_prep
= slot
;
1476 atomic64_inc(&sas_dev
->running_req
);
1478 /* send abort command to the chip */
1479 hisi_hba
->hw
->start_delivery(dq
);
1480 spin_unlock_irqrestore(&dq
->lock
, flags_dq
);
1485 dma_pool_free(hisi_hba
->buffer_pool
, slot
->buf
,
1488 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
1489 hisi_sas_slot_index_free(hisi_hba
, slot_idx
);
1490 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
1491 spin_unlock_irqrestore(&dq
->lock
, flags_dq
);
1493 dev_err(dev
, "internal abort task prep: failed[%d]!\n", rc
);
1499 * hisi_sas_internal_task_abort -- execute an internal
1500 * abort command for single IO command or a device
1501 * @hisi_hba: host controller struct
1502 * @device: domain device
1503 * @abort_flag: mode of operation, device or single IO
1504 * @tag: tag of IO to be aborted (only relevant to single
1508 hisi_sas_internal_task_abort(struct hisi_hba
*hisi_hba
,
1509 struct domain_device
*device
,
1510 int abort_flag
, int tag
)
1512 struct sas_task
*task
;
1513 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1514 struct device
*dev
= hisi_hba
->dev
;
1518 * The interface is not realized means this HW don't support internal
1519 * abort, or don't need to do internal abort. Then here, we return
1520 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
1521 * the internal abort has been executed and returned CQ.
1523 if (!hisi_hba
->hw
->prep_abort
)
1524 return TMF_RESP_FUNC_FAILED
;
1526 task
= sas_alloc_slow_task(GFP_KERNEL
);
1531 task
->task_proto
= device
->tproto
;
1532 task
->task_done
= hisi_sas_task_done
;
1533 task
->slow_task
->timer
.function
= hisi_sas_tmf_timedout
;
1534 task
->slow_task
->timer
.expires
= jiffies
+ msecs_to_jiffies(110);
1535 add_timer(&task
->slow_task
->timer
);
1537 res
= hisi_sas_internal_abort_task_exec(hisi_hba
, sas_dev
->device_id
,
1538 task
, abort_flag
, tag
);
1540 del_timer(&task
->slow_task
->timer
);
1541 dev_err(dev
, "internal task abort: executing internal task failed: %d\n",
1545 wait_for_completion(&task
->slow_task
->completion
);
1546 res
= TMF_RESP_FUNC_FAILED
;
1548 /* Internal abort timed out */
1549 if ((task
->task_state_flags
& SAS_TASK_STATE_ABORTED
)) {
1550 if (!(task
->task_state_flags
& SAS_TASK_STATE_DONE
)) {
1551 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1555 dev_err(dev
, "internal task abort: timeout and not done.\n");
1559 dev_err(dev
, "internal task abort: timeout.\n");
1562 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1563 task
->task_status
.stat
== TMF_RESP_FUNC_COMPLETE
) {
1564 res
= TMF_RESP_FUNC_COMPLETE
;
1568 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1569 task
->task_status
.stat
== TMF_RESP_FUNC_SUCC
) {
1570 res
= TMF_RESP_FUNC_SUCC
;
1575 dev_dbg(dev
, "internal task abort: task to dev %016llx task=%p "
1576 "resp: 0x%x sts 0x%x\n",
1577 SAS_ADDR(device
->sas_addr
),
1579 task
->task_status
.resp
, /* 0 is complete, -1 is undelivered */
1580 task
->task_status
.stat
);
1581 sas_free_task(task
);
1586 static void hisi_sas_port_formed(struct asd_sas_phy
*sas_phy
)
1588 hisi_sas_port_notify_formed(sas_phy
);
1591 static int hisi_sas_write_gpio(struct sas_ha_struct
*sha
, u8 reg_type
,
1592 u8 reg_index
, u8 reg_count
, u8
*write_data
)
1594 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
1596 if (!hisi_hba
->hw
->write_gpio
)
1599 return hisi_hba
->hw
->write_gpio(hisi_hba
, reg_type
,
1600 reg_index
, reg_count
, write_data
);
1603 static void hisi_sas_phy_disconnected(struct hisi_sas_phy
*phy
)
1605 phy
->phy_attached
= 0;
1610 void hisi_sas_phy_down(struct hisi_hba
*hisi_hba
, int phy_no
, int rdy
)
1612 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1613 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1614 struct sas_ha_struct
*sas_ha
= &hisi_hba
->sha
;
1617 /* Phy down but ready */
1618 hisi_sas_bytes_dmaed(hisi_hba
, phy_no
);
1619 hisi_sas_port_notify_formed(sas_phy
);
1621 struct hisi_sas_port
*port
= phy
->port
;
1623 /* Phy down and not ready */
1624 sas_ha
->notify_phy_event(sas_phy
, PHYE_LOSS_OF_SIGNAL
);
1625 sas_phy_disconnected(sas_phy
);
1628 if (phy
->phy_type
& PORT_TYPE_SAS
) {
1629 int port_id
= port
->id
;
1631 if (!hisi_hba
->hw
->get_wideport_bitmap(hisi_hba
,
1633 port
->port_attached
= 0;
1634 } else if (phy
->phy_type
& PORT_TYPE_SATA
)
1635 port
->port_attached
= 0;
1637 hisi_sas_phy_disconnected(phy
);
1640 EXPORT_SYMBOL_GPL(hisi_sas_phy_down
);
1642 void hisi_sas_kill_tasklets(struct hisi_hba
*hisi_hba
)
1646 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
1647 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[i
];
1649 tasklet_kill(&cq
->tasklet
);
1652 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets
);
1654 struct scsi_transport_template
*hisi_sas_stt
;
1655 EXPORT_SYMBOL_GPL(hisi_sas_stt
);
1657 struct device_attribute
*host_attrs
[] = {
1658 &dev_attr_phy_event_threshold
,
1662 static struct scsi_host_template _hisi_sas_sht
= {
1663 .module
= THIS_MODULE
,
1665 .queuecommand
= sas_queuecommand
,
1666 .target_alloc
= sas_target_alloc
,
1667 .slave_configure
= hisi_sas_slave_configure
,
1668 .scan_finished
= hisi_sas_scan_finished
,
1669 .scan_start
= hisi_sas_scan_start
,
1670 .change_queue_depth
= sas_change_queue_depth
,
1671 .bios_param
= sas_bios_param
,
1674 .sg_tablesize
= SG_ALL
,
1675 .max_sectors
= SCSI_DEFAULT_MAX_SECTORS
,
1676 .use_clustering
= ENABLE_CLUSTERING
,
1677 .eh_device_reset_handler
= sas_eh_device_reset_handler
,
1678 .eh_target_reset_handler
= sas_eh_target_reset_handler
,
1679 .target_destroy
= sas_target_destroy
,
1681 .shost_attrs
= host_attrs
,
1683 struct scsi_host_template
*hisi_sas_sht
= &_hisi_sas_sht
;
1684 EXPORT_SYMBOL_GPL(hisi_sas_sht
);
1686 static struct sas_domain_function_template hisi_sas_transport_ops
= {
1687 .lldd_dev_found
= hisi_sas_dev_found
,
1688 .lldd_dev_gone
= hisi_sas_dev_gone
,
1689 .lldd_execute_task
= hisi_sas_queue_command
,
1690 .lldd_control_phy
= hisi_sas_control_phy
,
1691 .lldd_abort_task
= hisi_sas_abort_task
,
1692 .lldd_abort_task_set
= hisi_sas_abort_task_set
,
1693 .lldd_clear_aca
= hisi_sas_clear_aca
,
1694 .lldd_I_T_nexus_reset
= hisi_sas_I_T_nexus_reset
,
1695 .lldd_lu_reset
= hisi_sas_lu_reset
,
1696 .lldd_query_task
= hisi_sas_query_task
,
1697 .lldd_clear_nexus_ha
= hisi_sas_clear_nexus_ha
,
1698 .lldd_port_formed
= hisi_sas_port_formed
,
1699 .lldd_write_gpio
= hisi_sas_write_gpio
,
1702 void hisi_sas_init_mem(struct hisi_hba
*hisi_hba
)
1704 int i
, s
, max_command_entries
= hisi_hba
->hw
->max_command_entries
;
1706 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
1707 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[i
];
1708 struct hisi_sas_dq
*dq
= &hisi_hba
->dq
[i
];
1710 s
= sizeof(struct hisi_sas_cmd_hdr
) * HISI_SAS_QUEUE_SLOTS
;
1711 memset(hisi_hba
->cmd_hdr
[i
], 0, s
);
1714 s
= hisi_hba
->hw
->complete_hdr_size
* HISI_SAS_QUEUE_SLOTS
;
1715 memset(hisi_hba
->complete_hdr
[i
], 0, s
);
1719 s
= sizeof(struct hisi_sas_initial_fis
) * hisi_hba
->n_phy
;
1720 memset(hisi_hba
->initial_fis
, 0, s
);
1722 s
= max_command_entries
* sizeof(struct hisi_sas_iost
);
1723 memset(hisi_hba
->iost
, 0, s
);
1725 s
= max_command_entries
* sizeof(struct hisi_sas_breakpoint
);
1726 memset(hisi_hba
->breakpoint
, 0, s
);
1728 s
= HISI_SAS_MAX_ITCT_ENTRIES
* sizeof(struct hisi_sas_sata_breakpoint
);
1729 memset(hisi_hba
->sata_breakpoint
, 0, s
);
1731 EXPORT_SYMBOL_GPL(hisi_sas_init_mem
);
1733 int hisi_sas_alloc(struct hisi_hba
*hisi_hba
, struct Scsi_Host
*shost
)
1735 struct device
*dev
= hisi_hba
->dev
;
1736 int i
, s
, max_command_entries
= hisi_hba
->hw
->max_command_entries
;
1738 spin_lock_init(&hisi_hba
->lock
);
1739 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
1740 hisi_sas_phy_init(hisi_hba
, i
);
1741 hisi_hba
->port
[i
].port_attached
= 0;
1742 hisi_hba
->port
[i
].id
= -1;
1745 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
1746 hisi_hba
->devices
[i
].dev_type
= SAS_PHY_UNUSED
;
1747 hisi_hba
->devices
[i
].device_id
= i
;
1748 hisi_hba
->devices
[i
].dev_status
= HISI_SAS_DEV_NORMAL
;
1751 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
1752 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[i
];
1753 struct hisi_sas_dq
*dq
= &hisi_hba
->dq
[i
];
1755 /* Completion queue structure */
1757 cq
->hisi_hba
= hisi_hba
;
1759 /* Delivery queue structure */
1761 dq
->hisi_hba
= hisi_hba
;
1763 /* Delivery queue */
1764 s
= sizeof(struct hisi_sas_cmd_hdr
) * HISI_SAS_QUEUE_SLOTS
;
1765 hisi_hba
->cmd_hdr
[i
] = dma_alloc_coherent(dev
, s
,
1766 &hisi_hba
->cmd_hdr_dma
[i
], GFP_KERNEL
);
1767 if (!hisi_hba
->cmd_hdr
[i
])
1770 /* Completion queue */
1771 s
= hisi_hba
->hw
->complete_hdr_size
* HISI_SAS_QUEUE_SLOTS
;
1772 hisi_hba
->complete_hdr
[i
] = dma_alloc_coherent(dev
, s
,
1773 &hisi_hba
->complete_hdr_dma
[i
], GFP_KERNEL
);
1774 if (!hisi_hba
->complete_hdr
[i
])
1778 s
= sizeof(struct hisi_sas_slot_buf_table
);
1779 hisi_hba
->buffer_pool
= dma_pool_create("dma_buffer", dev
, s
, 16, 0);
1780 if (!hisi_hba
->buffer_pool
)
1783 s
= HISI_SAS_MAX_ITCT_ENTRIES
* sizeof(struct hisi_sas_itct
);
1784 hisi_hba
->itct
= dma_alloc_coherent(dev
, s
, &hisi_hba
->itct_dma
,
1786 if (!hisi_hba
->itct
)
1789 memset(hisi_hba
->itct
, 0, s
);
1791 hisi_hba
->slot_info
= devm_kcalloc(dev
, max_command_entries
,
1792 sizeof(struct hisi_sas_slot
),
1794 if (!hisi_hba
->slot_info
)
1797 s
= max_command_entries
* sizeof(struct hisi_sas_iost
);
1798 hisi_hba
->iost
= dma_alloc_coherent(dev
, s
, &hisi_hba
->iost_dma
,
1800 if (!hisi_hba
->iost
)
1803 s
= max_command_entries
* sizeof(struct hisi_sas_breakpoint
);
1804 hisi_hba
->breakpoint
= dma_alloc_coherent(dev
, s
,
1805 &hisi_hba
->breakpoint_dma
, GFP_KERNEL
);
1806 if (!hisi_hba
->breakpoint
)
1809 hisi_hba
->slot_index_count
= max_command_entries
;
1810 s
= hisi_hba
->slot_index_count
/ BITS_PER_BYTE
;
1811 hisi_hba
->slot_index_tags
= devm_kzalloc(dev
, s
, GFP_KERNEL
);
1812 if (!hisi_hba
->slot_index_tags
)
1815 s
= sizeof(struct hisi_sas_initial_fis
) * HISI_SAS_MAX_PHYS
;
1816 hisi_hba
->initial_fis
= dma_alloc_coherent(dev
, s
,
1817 &hisi_hba
->initial_fis_dma
, GFP_KERNEL
);
1818 if (!hisi_hba
->initial_fis
)
1821 s
= HISI_SAS_MAX_ITCT_ENTRIES
* sizeof(struct hisi_sas_sata_breakpoint
);
1822 hisi_hba
->sata_breakpoint
= dma_alloc_coherent(dev
, s
,
1823 &hisi_hba
->sata_breakpoint_dma
, GFP_KERNEL
);
1824 if (!hisi_hba
->sata_breakpoint
)
1826 hisi_sas_init_mem(hisi_hba
);
1828 hisi_sas_slot_index_init(hisi_hba
);
1830 hisi_hba
->wq
= create_singlethread_workqueue(dev_name(dev
));
1831 if (!hisi_hba
->wq
) {
1832 dev_err(dev
, "sas_alloc: failed to create workqueue\n");
1840 EXPORT_SYMBOL_GPL(hisi_sas_alloc
);
1842 void hisi_sas_free(struct hisi_hba
*hisi_hba
)
1844 struct device
*dev
= hisi_hba
->dev
;
1845 int i
, s
, max_command_entries
= hisi_hba
->hw
->max_command_entries
;
1847 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
1848 s
= sizeof(struct hisi_sas_cmd_hdr
) * HISI_SAS_QUEUE_SLOTS
;
1849 if (hisi_hba
->cmd_hdr
[i
])
1850 dma_free_coherent(dev
, s
,
1851 hisi_hba
->cmd_hdr
[i
],
1852 hisi_hba
->cmd_hdr_dma
[i
]);
1854 s
= hisi_hba
->hw
->complete_hdr_size
* HISI_SAS_QUEUE_SLOTS
;
1855 if (hisi_hba
->complete_hdr
[i
])
1856 dma_free_coherent(dev
, s
,
1857 hisi_hba
->complete_hdr
[i
],
1858 hisi_hba
->complete_hdr_dma
[i
]);
1861 dma_pool_destroy(hisi_hba
->buffer_pool
);
1863 s
= HISI_SAS_MAX_ITCT_ENTRIES
* sizeof(struct hisi_sas_itct
);
1865 dma_free_coherent(dev
, s
,
1866 hisi_hba
->itct
, hisi_hba
->itct_dma
);
1868 s
= max_command_entries
* sizeof(struct hisi_sas_iost
);
1870 dma_free_coherent(dev
, s
,
1871 hisi_hba
->iost
, hisi_hba
->iost_dma
);
1873 s
= max_command_entries
* sizeof(struct hisi_sas_breakpoint
);
1874 if (hisi_hba
->breakpoint
)
1875 dma_free_coherent(dev
, s
,
1876 hisi_hba
->breakpoint
,
1877 hisi_hba
->breakpoint_dma
);
1880 s
= sizeof(struct hisi_sas_initial_fis
) * HISI_SAS_MAX_PHYS
;
1881 if (hisi_hba
->initial_fis
)
1882 dma_free_coherent(dev
, s
,
1883 hisi_hba
->initial_fis
,
1884 hisi_hba
->initial_fis_dma
);
1886 s
= HISI_SAS_MAX_ITCT_ENTRIES
* sizeof(struct hisi_sas_sata_breakpoint
);
1887 if (hisi_hba
->sata_breakpoint
)
1888 dma_free_coherent(dev
, s
,
1889 hisi_hba
->sata_breakpoint
,
1890 hisi_hba
->sata_breakpoint_dma
);
1893 destroy_workqueue(hisi_hba
->wq
);
1895 EXPORT_SYMBOL_GPL(hisi_sas_free
);
1897 void hisi_sas_rst_work_handler(struct work_struct
*work
)
1899 struct hisi_hba
*hisi_hba
=
1900 container_of(work
, struct hisi_hba
, rst_work
);
1902 hisi_sas_controller_reset(hisi_hba
);
1904 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler
);
1906 void hisi_sas_sync_rst_work_handler(struct work_struct
*work
)
1908 struct hisi_sas_rst
*rst
=
1909 container_of(work
, struct hisi_sas_rst
, work
);
1911 if (!hisi_sas_controller_reset(rst
->hisi_hba
))
1913 complete(rst
->completion
);
1915 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler
);
1917 int hisi_sas_get_fw_info(struct hisi_hba
*hisi_hba
)
1919 struct device
*dev
= hisi_hba
->dev
;
1920 struct platform_device
*pdev
= hisi_hba
->platform_dev
;
1921 struct device_node
*np
= pdev
? pdev
->dev
.of_node
: NULL
;
1924 if (device_property_read_u8_array(dev
, "sas-addr", hisi_hba
->sas_addr
,
1926 dev_err(dev
, "could not get property sas-addr\n");
1932 * These properties are only required for platform device-based
1933 * controller with DT firmware.
1935 hisi_hba
->ctrl
= syscon_regmap_lookup_by_phandle(np
,
1936 "hisilicon,sas-syscon");
1937 if (IS_ERR(hisi_hba
->ctrl
)) {
1938 dev_err(dev
, "could not get syscon\n");
1942 if (device_property_read_u32(dev
, "ctrl-reset-reg",
1943 &hisi_hba
->ctrl_reset_reg
)) {
1945 "could not get property ctrl-reset-reg\n");
1949 if (device_property_read_u32(dev
, "ctrl-reset-sts-reg",
1950 &hisi_hba
->ctrl_reset_sts_reg
)) {
1952 "could not get property ctrl-reset-sts-reg\n");
1956 if (device_property_read_u32(dev
, "ctrl-clock-ena-reg",
1957 &hisi_hba
->ctrl_clock_ena_reg
)) {
1959 "could not get property ctrl-clock-ena-reg\n");
1964 refclk
= devm_clk_get(dev
, NULL
);
1966 dev_dbg(dev
, "no ref clk property\n");
1968 hisi_hba
->refclk_frequency_mhz
= clk_get_rate(refclk
) / 1000000;
1970 if (device_property_read_u32(dev
, "phy-count", &hisi_hba
->n_phy
)) {
1971 dev_err(dev
, "could not get property phy-count\n");
1975 if (device_property_read_u32(dev
, "queue-count",
1976 &hisi_hba
->queue_count
)) {
1977 dev_err(dev
, "could not get property queue-count\n");
1983 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info
);
1985 static struct Scsi_Host
*hisi_sas_shost_alloc(struct platform_device
*pdev
,
1986 const struct hisi_sas_hw
*hw
)
1988 struct resource
*res
;
1989 struct Scsi_Host
*shost
;
1990 struct hisi_hba
*hisi_hba
;
1991 struct device
*dev
= &pdev
->dev
;
1993 shost
= scsi_host_alloc(hisi_sas_sht
, sizeof(*hisi_hba
));
1995 dev_err(dev
, "scsi host alloc failed\n");
1998 hisi_hba
= shost_priv(shost
);
2000 INIT_WORK(&hisi_hba
->rst_work
, hisi_sas_rst_work_handler
);
2002 hisi_hba
->dev
= dev
;
2003 hisi_hba
->platform_dev
= pdev
;
2004 hisi_hba
->shost
= shost
;
2005 SHOST_TO_SAS_HA(shost
) = &hisi_hba
->sha
;
2007 timer_setup(&hisi_hba
->timer
, NULL
, 0);
2009 if (hisi_sas_get_fw_info(hisi_hba
) < 0)
2012 if (dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64)) &&
2013 dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32))) {
2014 dev_err(dev
, "No usable DMA addressing method\n");
2018 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2019 hisi_hba
->regs
= devm_ioremap_resource(dev
, res
);
2020 if (IS_ERR(hisi_hba
->regs
))
2023 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
2025 hisi_hba
->sgpio_regs
= devm_ioremap_resource(dev
, res
);
2026 if (IS_ERR(hisi_hba
->sgpio_regs
))
2030 if (hisi_sas_alloc(hisi_hba
, shost
)) {
2031 hisi_sas_free(hisi_hba
);
2037 scsi_host_put(shost
);
2038 dev_err(dev
, "shost alloc failed\n");
2042 void hisi_sas_init_add(struct hisi_hba
*hisi_hba
)
2046 for (i
= 0; i
< hisi_hba
->n_phy
; i
++)
2047 memcpy(&hisi_hba
->phy
[i
].dev_sas_addr
,
2051 EXPORT_SYMBOL_GPL(hisi_sas_init_add
);
2053 int hisi_sas_probe(struct platform_device
*pdev
,
2054 const struct hisi_sas_hw
*hw
)
2056 struct Scsi_Host
*shost
;
2057 struct hisi_hba
*hisi_hba
;
2058 struct device
*dev
= &pdev
->dev
;
2059 struct asd_sas_phy
**arr_phy
;
2060 struct asd_sas_port
**arr_port
;
2061 struct sas_ha_struct
*sha
;
2062 int rc
, phy_nr
, port_nr
, i
;
2064 shost
= hisi_sas_shost_alloc(pdev
, hw
);
2068 sha
= SHOST_TO_SAS_HA(shost
);
2069 hisi_hba
= shost_priv(shost
);
2070 platform_set_drvdata(pdev
, sha
);
2072 phy_nr
= port_nr
= hisi_hba
->n_phy
;
2074 arr_phy
= devm_kcalloc(dev
, phy_nr
, sizeof(void *), GFP_KERNEL
);
2075 arr_port
= devm_kcalloc(dev
, port_nr
, sizeof(void *), GFP_KERNEL
);
2076 if (!arr_phy
|| !arr_port
) {
2081 sha
->sas_phy
= arr_phy
;
2082 sha
->sas_port
= arr_port
;
2083 sha
->lldd_ha
= hisi_hba
;
2085 shost
->transportt
= hisi_sas_stt
;
2086 shost
->max_id
= HISI_SAS_MAX_DEVICES
;
2087 shost
->max_lun
= ~0;
2088 shost
->max_channel
= 1;
2089 shost
->max_cmd_len
= 16;
2090 shost
->sg_tablesize
= min_t(u16
, SG_ALL
, HISI_SAS_SGE_PAGE_CNT
);
2091 shost
->can_queue
= hisi_hba
->hw
->max_command_entries
;
2092 shost
->cmd_per_lun
= hisi_hba
->hw
->max_command_entries
;
2094 sha
->sas_ha_name
= DRV_NAME
;
2095 sha
->dev
= hisi_hba
->dev
;
2096 sha
->lldd_module
= THIS_MODULE
;
2097 sha
->sas_addr
= &hisi_hba
->sas_addr
[0];
2098 sha
->num_phys
= hisi_hba
->n_phy
;
2099 sha
->core
.shost
= hisi_hba
->shost
;
2101 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
2102 sha
->sas_phy
[i
] = &hisi_hba
->phy
[i
].sas_phy
;
2103 sha
->sas_port
[i
] = &hisi_hba
->port
[i
].sas_port
;
2106 hisi_sas_init_add(hisi_hba
);
2108 rc
= scsi_add_host(shost
, &pdev
->dev
);
2112 rc
= sas_register_ha(sha
);
2114 goto err_out_register_ha
;
2116 rc
= hisi_hba
->hw
->hw_init(hisi_hba
);
2118 goto err_out_register_ha
;
2120 scsi_scan_host(shost
);
2124 err_out_register_ha
:
2125 scsi_remove_host(shost
);
2127 hisi_sas_free(hisi_hba
);
2128 scsi_host_put(shost
);
2131 EXPORT_SYMBOL_GPL(hisi_sas_probe
);
2133 int hisi_sas_remove(struct platform_device
*pdev
)
2135 struct sas_ha_struct
*sha
= platform_get_drvdata(pdev
);
2136 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
2137 struct Scsi_Host
*shost
= sha
->core
.shost
;
2139 sas_unregister_ha(sha
);
2140 sas_remove_host(sha
->core
.shost
);
2142 hisi_sas_free(hisi_hba
);
2143 scsi_host_put(shost
);
2146 EXPORT_SYMBOL_GPL(hisi_sas_remove
);
2148 static __init
int hisi_sas_init(void)
2150 hisi_sas_stt
= sas_domain_attach_transport(&hisi_sas_transport_ops
);
2157 static __exit
void hisi_sas_exit(void)
2159 sas_release_transport(hisi_sas_stt
);
2162 module_init(hisi_sas_init
);
2163 module_exit(hisi_sas_exit
);
2165 MODULE_LICENSE("GPL");
2166 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2167 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2168 MODULE_ALIAS("platform:" DRV_NAME
);