]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/scsi/hisi_sas/hisi_sas_main.c
scsi: hisi_sas: print device id for errors
[mirror_ubuntu-eoan-kernel.git] / drivers / scsi / hisi_sas / hisi_sas_main.c
CommitLineData
e8899fad
JG
1/*
2 * Copyright (c) 2015 Linaro Ltd.
3 * Copyright (c) 2015 Hisilicon Limited.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 */
11
12#include "hisi_sas.h"
13#define DRV_NAME "hisi_sas"
14
42e7a693
JG
15#define DEV_IS_GONE(dev) \
16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
17
cac9b2a2
JG
18static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19 u8 *lun, struct hisi_sas_tmf_task *tmf);
441c2740
JG
20static int
21hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22 struct domain_device *device,
23 int abort_flag, int tag);
7c594f04 24static int hisi_sas_softreset_ata_disk(struct domain_device *device);
057c3d1f
XT
25static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
26 void *funcdata);
cac9b2a2 27
468f4b8d 28u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
6c7bb8a1 29{
468f4b8d 30 switch (fis->command) {
6c7bb8a1
XC
31 case ATA_CMD_FPDMA_WRITE:
32 case ATA_CMD_FPDMA_READ:
33 case ATA_CMD_FPDMA_RECV:
34 case ATA_CMD_FPDMA_SEND:
35 case ATA_CMD_NCQ_NON_DATA:
edafeef4 36 return HISI_SAS_SATA_PROTOCOL_FPDMA;
6c7bb8a1
XC
37
38 case ATA_CMD_DOWNLOAD_MICRO:
39 case ATA_CMD_ID_ATA:
40 case ATA_CMD_PMP_READ:
41 case ATA_CMD_READ_LOG_EXT:
42 case ATA_CMD_PIO_READ:
43 case ATA_CMD_PIO_READ_EXT:
44 case ATA_CMD_PMP_WRITE:
45 case ATA_CMD_WRITE_LOG_EXT:
46 case ATA_CMD_PIO_WRITE:
47 case ATA_CMD_PIO_WRITE_EXT:
edafeef4 48 return HISI_SAS_SATA_PROTOCOL_PIO;
6c7bb8a1
XC
49
50 case ATA_CMD_DSM:
51 case ATA_CMD_DOWNLOAD_MICRO_DMA:
52 case ATA_CMD_PMP_READ_DMA:
53 case ATA_CMD_PMP_WRITE_DMA:
54 case ATA_CMD_READ:
55 case ATA_CMD_READ_EXT:
56 case ATA_CMD_READ_LOG_DMA_EXT:
57 case ATA_CMD_READ_STREAM_DMA_EXT:
58 case ATA_CMD_TRUSTED_RCV_DMA:
59 case ATA_CMD_TRUSTED_SND_DMA:
60 case ATA_CMD_WRITE:
61 case ATA_CMD_WRITE_EXT:
62 case ATA_CMD_WRITE_FUA_EXT:
63 case ATA_CMD_WRITE_QUEUED:
64 case ATA_CMD_WRITE_LOG_DMA_EXT:
65 case ATA_CMD_WRITE_STREAM_DMA_EXT:
c3fe8a2b 66 case ATA_CMD_ZAC_MGMT_IN:
edafeef4 67 return HISI_SAS_SATA_PROTOCOL_DMA;
6c7bb8a1
XC
68
69 case ATA_CMD_CHK_POWER:
70 case ATA_CMD_DEV_RESET:
71 case ATA_CMD_EDD:
72 case ATA_CMD_FLUSH:
73 case ATA_CMD_FLUSH_EXT:
74 case ATA_CMD_VERIFY:
75 case ATA_CMD_VERIFY_EXT:
76 case ATA_CMD_SET_FEATURES:
77 case ATA_CMD_STANDBY:
78 case ATA_CMD_STANDBYNOW1:
c3fe8a2b 79 case ATA_CMD_ZAC_MGMT_OUT:
edafeef4 80 return HISI_SAS_SATA_PROTOCOL_NONDATA;
468f4b8d 81
3ff0f0b6
XT
82 case ATA_CMD_SET_MAX:
83 switch (fis->features) {
84 case ATA_SET_MAX_PASSWD:
85 case ATA_SET_MAX_LOCK:
86 return HISI_SAS_SATA_PROTOCOL_PIO;
468f4b8d 87
3ff0f0b6
XT
88 case ATA_SET_MAX_PASSWD_DMA:
89 case ATA_SET_MAX_UNLOCK_DMA:
90 return HISI_SAS_SATA_PROTOCOL_DMA;
91
92 default:
93 return HISI_SAS_SATA_PROTOCOL_NONDATA;
468f4b8d 94 }
3ff0f0b6
XT
95
96 default:
97 {
6c7bb8a1
XC
98 if (direction == DMA_NONE)
99 return HISI_SAS_SATA_PROTOCOL_NONDATA;
100 return HISI_SAS_SATA_PROTOCOL_PIO;
101 }
468f4b8d 102 }
6c7bb8a1
XC
103}
104EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
105
75904077
XC
106void hisi_sas_sata_done(struct sas_task *task,
107 struct hisi_sas_slot *slot)
108{
109 struct task_status_struct *ts = &task->task_status;
110 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
f557e32c
XT
111 struct hisi_sas_status_buffer *status_buf =
112 hisi_sas_status_buf_addr_mem(slot);
113 u8 *iu = &status_buf->iu[0];
114 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
75904077
XC
115
116 resp->frame_len = sizeof(struct dev_to_host_fis);
117 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
118
119 ts->buf_valid_size = sizeof(*resp);
120}
121EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
122
318913c6
XC
123int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
124{
125 struct ata_queued_cmd *qc = task->uldd_task;
126
127 if (qc) {
128 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
129 qc->tf.command == ATA_CMD_FPDMA_READ) {
130 *tag = qc->tag;
131 return 1;
132 }
133 }
134 return 0;
135}
136EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
137
42e7a693
JG
138static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
139{
140 return device->port->ha->lldd_ha;
141}
142
2e244f0f
JG
143struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
144{
145 return container_of(sas_port, struct hisi_sas_port, sas_port);
146}
147EXPORT_SYMBOL_GPL(to_hisi_sas_port);
148
a25d0d3d
XC
149void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
150{
151 int phy_no;
152
153 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
154 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
155}
156EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
157
257efd1f
JG
158static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
159{
160 void *bitmap = hisi_hba->slot_index_tags;
161
162 clear_bit(slot_idx, bitmap);
163}
164
42e7a693
JG
165static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
166{
167 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
168}
169
170static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
171{
172 void *bitmap = hisi_hba->slot_index_tags;
173
174 set_bit(slot_idx, bitmap);
175}
176
177static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
178{
179 unsigned int index;
180 void *bitmap = hisi_hba->slot_index_tags;
181
182 index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count);
183 if (index >= hisi_hba->slot_index_count)
184 return -SAS_QUEUE_FULL;
185 hisi_sas_slot_index_set(hisi_hba, index);
186 *slot_idx = index;
187 return 0;
188}
189
257efd1f
JG
190static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
191{
192 int i;
193
194 for (i = 0; i < hisi_hba->slot_index_count; ++i)
195 hisi_sas_slot_index_clear(hisi_hba, i);
196}
27a3f229
JG
197
198void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
199 struct hisi_sas_slot *slot)
200{
27a3f229 201
d3c4dd4e 202 if (task) {
11b75249 203 struct device *dev = hisi_hba->dev;
27a3f229 204
6ba0fbc3
XT
205 if (!task->lldd_task)
206 return;
207
208 task->lldd_task = NULL;
209
d3c4dd4e
JG
210 if (!sas_protocol_ata(task->task_proto))
211 if (slot->n_elem)
dc1e4730
XC
212 dma_unmap_sg(dev, task->scatter,
213 task->num_scatter,
d3c4dd4e 214 task->data_dir);
d3c4dd4e 215 }
27a3f229 216
f557e32c
XT
217 if (slot->buf)
218 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
27a3f229 219
27a3f229 220 list_del_init(&slot->entry);
6ba0fbc3 221 slot->buf = NULL;
27a3f229
JG
222 slot->task = NULL;
223 slot->port = NULL;
224 hisi_sas_slot_index_free(hisi_hba, slot->idx);
d3c4dd4e 225
59ba49f9 226 /* slot memory is fully zeroed when it is reused */
27a3f229
JG
227}
228EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
229
66ee999b
JG
230static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
231 struct hisi_sas_slot *slot)
232{
233 return hisi_hba->hw->prep_smp(hisi_hba, slot);
234}
235
42e7a693
JG
236static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
237 struct hisi_sas_slot *slot, int is_tmf,
238 struct hisi_sas_tmf_task *tmf)
239{
240 return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
241}
242
6f2ff1a1
JG
243static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
244 struct hisi_sas_slot *slot)
245{
246 return hisi_hba->hw->prep_stp(hisi_hba, slot);
247}
248
441c2740
JG
249static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
250 struct hisi_sas_slot *slot,
251 int device_id, int abort_flag, int tag_to_abort)
252{
253 return hisi_hba->hw->prep_abort(hisi_hba, slot,
254 device_id, abort_flag, tag_to_abort);
255}
256
cac9b2a2
JG
257/*
258 * This function will issue an abort TMF regardless of whether the
259 * task is in the sdev or not. Then it will do the task complete
260 * cleanup and callbacks.
261 */
262static void hisi_sas_slot_abort(struct work_struct *work)
263{
264 struct hisi_sas_slot *abort_slot =
265 container_of(work, struct hisi_sas_slot, abort_slot);
266 struct sas_task *task = abort_slot->task;
267 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
268 struct scsi_cmnd *cmnd = task->uldd_task;
269 struct hisi_sas_tmf_task tmf_task;
cac9b2a2 270 struct scsi_lun lun;
11b75249 271 struct device *dev = hisi_hba->dev;
cac9b2a2 272 int tag = abort_slot->idx;
da7b66e7 273 unsigned long flags;
cac9b2a2
JG
274
275 if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
276 dev_err(dev, "cannot abort slot for non-ssp task\n");
277 goto out;
278 }
279
280 int_to_scsilun(cmnd->device->lun, &lun);
281 tmf_task.tmf = TMF_ABORT_TASK;
282 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
283
284 hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
285out:
286 /* Do cleanup for this task */
da7b66e7 287 spin_lock_irqsave(&hisi_hba->lock, flags);
cac9b2a2 288 hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
da7b66e7 289 spin_unlock_irqrestore(&hisi_hba->lock, flags);
cac9b2a2
JG
290 if (task->task_done)
291 task->task_done(task);
cac9b2a2
JG
292}
293
b1a49412
XC
294static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
295 *dq, int is_tmf, struct hisi_sas_tmf_task *tmf,
296 int *pass)
42e7a693 297{
b1a49412 298 struct hisi_hba *hisi_hba = dq->hisi_hba;
42e7a693
JG
299 struct domain_device *device = task->dev;
300 struct hisi_sas_device *sas_dev = device->lldd_dev;
301 struct hisi_sas_port *port;
302 struct hisi_sas_slot *slot;
303 struct hisi_sas_cmd_hdr *cmd_hdr_base;
2e244f0f 304 struct asd_sas_port *sas_port = device->port;
11b75249 305 struct device *dev = hisi_hba->dev;
42e7a693 306 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
54c9dd2d 307 unsigned long flags;
42e7a693 308
2e244f0f 309 if (!sas_port) {
42e7a693
JG
310 struct task_status_struct *ts = &task->task_status;
311
312 ts->resp = SAS_TASK_UNDELIVERED;
313 ts->stat = SAS_PHY_DOWN;
314 /*
315 * libsas will use dev->port, should
316 * not call task_done for sata
317 */
318 if (device->dev_type != SAS_SATA_DEV)
319 task->task_done(task);
6bf6db51 320 return -ECOMM;
42e7a693
JG
321 }
322
323 if (DEV_IS_GONE(sas_dev)) {
324 if (sas_dev)
ad604832 325 dev_info(dev, "task prep: device %d not ready\n",
42e7a693
JG
326 sas_dev->device_id);
327 else
328 dev_info(dev, "task prep: device %016llx not ready\n",
329 SAS_ADDR(device->sas_addr));
330
6bf6db51 331 return -ECOMM;
42e7a693 332 }
2e244f0f
JG
333
334 port = to_hisi_sas_port(sas_port);
9859f24e 335 if (port && !port->port_attached) {
09fe9ecb 336 dev_info(dev, "task prep: %s port%d not attach device\n",
6073b771 337 (dev_is_sata(device)) ?
09fe9ecb
JG
338 "SATA/STP" : "SAS",
339 device->port->id);
340
6bf6db51 341 return -ECOMM;
42e7a693
JG
342 }
343
344 if (!sas_protocol_ata(task->task_proto)) {
345 if (task->num_scatter) {
346 n_elem = dma_map_sg(dev, task->scatter,
347 task->num_scatter, task->data_dir);
348 if (!n_elem) {
349 rc = -ENOMEM;
350 goto prep_out;
351 }
352 }
353 } else
354 n_elem = task->num_scatter;
355
b1a49412 356 spin_lock_irqsave(&hisi_hba->lock, flags);
685b6d6e
JG
357 if (hisi_hba->hw->slot_index_alloc)
358 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
359 device);
360 else
361 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
b1a49412
XC
362 if (rc) {
363 spin_unlock_irqrestore(&hisi_hba->lock, flags);
42e7a693 364 goto err_out;
b1a49412
XC
365 }
366 spin_unlock_irqrestore(&hisi_hba->lock, flags);
367
368 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
42e7a693
JG
369 if (rc)
370 goto err_out_tag;
371
b1a49412
XC
372 dlvry_queue = dq->id;
373 dlvry_queue_slot = dq->wr_point;
42e7a693
JG
374 slot = &hisi_hba->slot_info[slot_idx];
375 memset(slot, 0, sizeof(struct hisi_sas_slot));
376
377 slot->idx = slot_idx;
378 slot->n_elem = n_elem;
379 slot->dlvry_queue = dlvry_queue;
380 slot->dlvry_queue_slot = dlvry_queue_slot;
381 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
382 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
383 slot->task = task;
384 slot->port = port;
385 task->lldd_task = slot;
cac9b2a2 386 INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
42e7a693 387
f557e32c
XT
388 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
389 GFP_ATOMIC, &slot->buf_dma);
390 if (!slot->buf) {
9c9d18e7 391 rc = -ENOMEM;
42e7a693 392 goto err_out_slot_buf;
9c9d18e7 393 }
42e7a693 394 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
f557e32c
XT
395 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
396 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
42e7a693
JG
397
398 switch (task->task_proto) {
66ee999b
JG
399 case SAS_PROTOCOL_SMP:
400 rc = hisi_sas_task_prep_smp(hisi_hba, slot);
401 break;
42e7a693
JG
402 case SAS_PROTOCOL_SSP:
403 rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
404 break;
405 case SAS_PROTOCOL_SATA:
406 case SAS_PROTOCOL_STP:
407 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
6f2ff1a1
JG
408 rc = hisi_sas_task_prep_ata(hisi_hba, slot);
409 break;
42e7a693
JG
410 default:
411 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
412 task->task_proto);
413 rc = -EINVAL;
414 break;
415 }
416
417 if (rc) {
418 dev_err(dev, "task prep: rc = 0x%x\n", rc);
f557e32c 419 goto err_out_buf;
42e7a693
JG
420 }
421
9feaf909 422 spin_lock_irqsave(&hisi_hba->lock, flags);
405314df 423 list_add_tail(&slot->entry, &sas_dev->list);
9feaf909 424 spin_unlock_irqrestore(&hisi_hba->lock, flags);
54c9dd2d 425 spin_lock_irqsave(&task->task_state_lock, flags);
42e7a693 426 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
54c9dd2d 427 spin_unlock_irqrestore(&task->task_state_lock, flags);
42e7a693 428
b1a49412 429 dq->slot_prep = slot;
42e7a693
JG
430 ++(*pass);
431
9c9d18e7 432 return 0;
42e7a693 433
f557e32c
XT
434err_out_buf:
435 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
436 slot->buf_dma);
42e7a693
JG
437err_out_slot_buf:
438 /* Nothing to be done */
439err_out_tag:
b1a49412 440 spin_lock_irqsave(&hisi_hba->lock, flags);
42e7a693 441 hisi_sas_slot_index_free(hisi_hba, slot_idx);
b1a49412 442 spin_unlock_irqrestore(&hisi_hba->lock, flags);
42e7a693
JG
443err_out:
444 dev_err(dev, "task prep: failed[%d]!\n", rc);
445 if (!sas_protocol_ata(task->task_proto))
446 if (n_elem)
dc1e4730
XC
447 dma_unmap_sg(dev, task->scatter,
448 task->num_scatter,
42e7a693
JG
449 task->data_dir);
450prep_out:
451 return rc;
452}
453
454static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
455 int is_tmf, struct hisi_sas_tmf_task *tmf)
456{
457 u32 rc;
458 u32 pass = 0;
459 unsigned long flags;
460 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
11b75249 461 struct device *dev = hisi_hba->dev;
b1a49412
XC
462 struct domain_device *device = task->dev;
463 struct hisi_sas_device *sas_dev = device->lldd_dev;
464 struct hisi_sas_dq *dq = sas_dev->dq;
42e7a693 465
917d3bda 466 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
06ec0fb9
XC
467 return -EINVAL;
468
42e7a693 469 /* protect task_prep and start_delivery sequence */
b1a49412
XC
470 spin_lock_irqsave(&dq->lock, flags);
471 rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass);
42e7a693
JG
472 if (rc)
473 dev_err(dev, "task exec: failed[%d]!\n", rc);
474
475 if (likely(pass))
b1a49412
XC
476 hisi_hba->hw->start_delivery(dq);
477 spin_unlock_irqrestore(&dq->lock, flags);
42e7a693
JG
478
479 return rc;
480}
257efd1f 481
66139921
JG
482static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
483{
484 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
485 struct asd_sas_phy *sas_phy = &phy->sas_phy;
486 struct sas_ha_struct *sas_ha;
487
488 if (!phy->phy_attached)
489 return;
490
491 sas_ha = &hisi_hba->sha;
492 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
493
494 if (sas_phy->phy) {
495 struct sas_phy *sphy = sas_phy->phy;
496
497 sphy->negotiated_linkrate = sas_phy->linkrate;
66139921 498 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
2ae75787
XC
499 sphy->maximum_linkrate_hw =
500 hisi_hba->hw->phy_get_max_linkrate();
501 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
502 sphy->minimum_linkrate = phy->minimum_linkrate;
503
504 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
505 sphy->maximum_linkrate = phy->maximum_linkrate;
66139921
JG
506 }
507
508 if (phy->phy_type & PORT_TYPE_SAS) {
509 struct sas_identify_frame *id;
510
511 id = (struct sas_identify_frame *)phy->frame_rcvd;
512 id->dev_type = phy->identify.device_type;
513 id->initiator_bits = SAS_PROTOCOL_ALL;
514 id->target_bits = phy->identify.target_port_protocols;
515 } else if (phy->phy_type & PORT_TYPE_SATA) {
516 /*Nothing*/
517 }
518
519 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
520 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
521}
522
abda97c2
JG
523static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
524{
525 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
526 struct hisi_sas_device *sas_dev = NULL;
302e0901 527 unsigned long flags;
abda97c2
JG
528 int i;
529
302e0901 530 spin_lock_irqsave(&hisi_hba->lock, flags);
abda97c2
JG
531 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
532 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
b1a49412
XC
533 int queue = i % hisi_hba->queue_count;
534 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
535
abda97c2
JG
536 hisi_hba->devices[i].device_id = i;
537 sas_dev = &hisi_hba->devices[i];
538 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
539 sas_dev->dev_type = device->dev_type;
540 sas_dev->hisi_hba = hisi_hba;
541 sas_dev->sas_device = device;
b1a49412 542 sas_dev->dq = dq;
405314df 543 INIT_LIST_HEAD(&hisi_hba->devices[i].list);
abda97c2
JG
544 break;
545 }
546 }
302e0901 547 spin_unlock_irqrestore(&hisi_hba->lock, flags);
abda97c2
JG
548
549 return sas_dev;
550}
551
552static int hisi_sas_dev_found(struct domain_device *device)
553{
554 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
555 struct domain_device *parent_dev = device->parent;
556 struct hisi_sas_device *sas_dev;
11b75249 557 struct device *dev = hisi_hba->dev;
abda97c2 558
685b6d6e
JG
559 if (hisi_hba->hw->alloc_dev)
560 sas_dev = hisi_hba->hw->alloc_dev(device);
561 else
562 sas_dev = hisi_sas_alloc_dev(device);
abda97c2
JG
563 if (!sas_dev) {
564 dev_err(dev, "fail alloc dev: max support %d devices\n",
565 HISI_SAS_MAX_DEVICES);
566 return -EINVAL;
567 }
568
569 device->lldd_dev = sas_dev;
570 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
571
572 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
573 int phy_no;
574 u8 phy_num = parent_dev->ex_dev.num_phys;
575 struct ex_phy *phy;
576
577 for (phy_no = 0; phy_no < phy_num; phy_no++) {
578 phy = &parent_dev->ex_dev.ex_phy[phy_no];
579 if (SAS_ADDR(phy->attached_sas_addr) ==
580 SAS_ADDR(device->sas_addr)) {
581 sas_dev->attached_phy = phy_no;
582 break;
583 }
584 }
585
586 if (phy_no == phy_num) {
587 dev_info(dev, "dev found: no attached "
588 "dev:%016llx at ex:%016llx\n",
589 SAS_ADDR(device->sas_addr),
590 SAS_ADDR(parent_dev->sas_addr));
591 return -EINVAL;
592 }
593 }
594
f1c88211
XC
595 dev_info(dev, "dev[%d:%x] found\n",
596 sas_dev->device_id, sas_dev->dev_type);
597
abda97c2
JG
598 return 0;
599}
600
31eec8a6
JG
601static int hisi_sas_slave_configure(struct scsi_device *sdev)
602{
603 struct domain_device *dev = sdev_to_domain_dev(sdev);
604 int ret = sas_slave_configure(sdev);
605
606 if (ret)
607 return ret;
608 if (!dev_is_sata(dev))
609 sas_change_queue_depth(sdev, 64);
610
611 return 0;
612}
613
701f75ec
JG
614static void hisi_sas_scan_start(struct Scsi_Host *shost)
615{
616 struct hisi_hba *hisi_hba = shost_priv(shost);
701f75ec 617
396b8044 618 hisi_hba->hw->phys_init(hisi_hba);
701f75ec
JG
619}
620
621static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
622{
623 struct hisi_hba *hisi_hba = shost_priv(shost);
624 struct sas_ha_struct *sha = &hisi_hba->sha;
625
396b8044
JG
626 /* Wait for PHY up interrupt to occur */
627 if (time < HZ)
701f75ec
JG
628 return 0;
629
630 sas_drain_work(sha);
631 return 1;
632}
633
66139921
JG
634static void hisi_sas_phyup_work(struct work_struct *work)
635{
636 struct hisi_sas_phy *phy =
e537b62b 637 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
66139921
JG
638 struct hisi_hba *hisi_hba = phy->hisi_hba;
639 struct asd_sas_phy *sas_phy = &phy->sas_phy;
640 int phy_no = sas_phy->id;
641
642 hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
643 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
644}
976867e6 645
057c3d1f
XT
646static void hisi_sas_linkreset_work(struct work_struct *work)
647{
648 struct hisi_sas_phy *phy =
649 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
650 struct asd_sas_phy *sas_phy = &phy->sas_phy;
651
652 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
653}
654
e537b62b
XT
655static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
656 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
057c3d1f 657 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
e537b62b
XT
658};
659
660bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
661 enum hisi_sas_phy_event event)
662{
663 struct hisi_hba *hisi_hba = phy->hisi_hba;
664
665 if (WARN_ON(event >= HISI_PHYES_NUM))
666 return false;
667
668 return queue_work(hisi_hba->wq, &phy->works[event]);
669}
670EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
671
976867e6
JG
672static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
673{
674 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
675 struct asd_sas_phy *sas_phy = &phy->sas_phy;
e537b62b 676 int i;
976867e6
JG
677
678 phy->hisi_hba = hisi_hba;
679 phy->port = NULL;
eba8c20c
XT
680 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
681 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
976867e6
JG
682 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
683 sas_phy->class = SAS;
684 sas_phy->iproto = SAS_PROTOCOL_ALL;
685 sas_phy->tproto = 0;
686 sas_phy->type = PHY_TYPE_PHYSICAL;
687 sas_phy->role = PHY_ROLE_INITIATOR;
688 sas_phy->oob_mode = OOB_NOT_CONNECTED;
689 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
690 sas_phy->id = phy_no;
691 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
692 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
693 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
694 sas_phy->lldd_phy = phy;
66139921 695
e537b62b
XT
696 for (i = 0; i < HISI_PHYES_NUM; i++)
697 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
976867e6
JG
698}
699
184a4635
JG
700static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
701{
702 struct sas_ha_struct *sas_ha = sas_phy->ha;
703 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
704 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
705 struct asd_sas_port *sas_port = sas_phy->port;
2e244f0f 706 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
184a4635
JG
707 unsigned long flags;
708
709 if (!sas_port)
710 return;
711
712 spin_lock_irqsave(&hisi_hba->lock, flags);
713 port->port_attached = 1;
714 port->id = phy->port_id;
715 phy->port = port;
716 sas_port->lldd_port = port;
717 spin_unlock_irqrestore(&hisi_hba->lock, flags);
718}
719
d3c4dd4e 720static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
405314df 721 struct hisi_sas_slot *slot)
184a4635 722{
d3c4dd4e
JG
723 if (task) {
724 unsigned long flags;
725 struct task_status_struct *ts;
184a4635 726
d3c4dd4e 727 ts = &task->task_status;
184a4635 728
d3c4dd4e
JG
729 ts->resp = SAS_TASK_COMPLETE;
730 ts->stat = SAS_ABORTED_TASK;
731 spin_lock_irqsave(&task->task_state_lock, flags);
732 task->task_state_flags &=
733 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
734 task->task_state_flags |= SAS_TASK_STATE_DONE;
735 spin_unlock_irqrestore(&task->task_state_lock, flags);
736 }
184a4635 737
405314df 738 hisi_sas_slot_task_free(hisi_hba, task, slot);
184a4635
JG
739}
740
405314df 741/* hisi_hba.lock should be locked */
184a4635
JG
742static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
743 struct domain_device *device)
744{
405314df
JG
745 struct hisi_sas_slot *slot, *slot2;
746 struct hisi_sas_device *sas_dev = device->lldd_dev;
184a4635 747
405314df
JG
748 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
749 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
184a4635
JG
750}
751
4d0951ee 752void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
06ec0fb9 753{
405314df
JG
754 struct hisi_sas_device *sas_dev;
755 struct domain_device *device;
06ec0fb9
XC
756 int i;
757
405314df
JG
758 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
759 sas_dev = &hisi_hba->devices[i];
760 device = sas_dev->sas_device;
06ec0fb9 761
405314df
JG
762 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
763 !device)
06ec0fb9 764 continue;
405314df
JG
765
766 hisi_sas_release_task(hisi_hba, device);
06ec0fb9
XC
767 }
768}
4d0951ee 769EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
06ec0fb9 770
d30ff263
XC
771static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
772 struct domain_device *device)
773{
774 if (hisi_hba->hw->dereg_device)
775 hisi_hba->hw->dereg_device(hisi_hba, device);
776}
777
abda97c2
JG
778static void hisi_sas_dev_gone(struct domain_device *device)
779{
780 struct hisi_sas_device *sas_dev = device->lldd_dev;
781 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
11b75249 782 struct device *dev = hisi_hba->dev;
abda97c2 783
f1c88211 784 dev_info(dev, "dev[%d:%x] is gone\n",
abda97c2
JG
785 sas_dev->device_id, sas_dev->dev_type);
786
f8e45ec2
XC
787 if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
788 hisi_sas_internal_task_abort(hisi_hba, device,
40f2702b
JG
789 HISI_SAS_INT_ABT_DEV, 0);
790
f8e45ec2
XC
791 hisi_sas_dereg_device(hisi_hba, device);
792
793 hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
794 device->lldd_dev = NULL;
f8e45ec2 795 }
d30ff263 796
0258141a
XT
797 if (hisi_hba->hw->free_device)
798 hisi_hba->hw->free_device(sas_dev);
abda97c2 799 sas_dev->dev_type = SAS_PHY_UNUSED;
abda97c2 800}
42e7a693
JG
801
802static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
803{
804 return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
805}
806
e4189d53
JG
807static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
808 void *funcdata)
809{
810 struct sas_ha_struct *sas_ha = sas_phy->ha;
811 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
812 int phy_no = sas_phy->id;
813
814 switch (func) {
815 case PHY_FUNC_HARD_RESET:
816 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
817 break;
818
819 case PHY_FUNC_LINK_RESET:
b4c67a6c
JG
820 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
821 msleep(100);
1eb8eeac 822 hisi_hba->hw->phy_start(hisi_hba, phy_no);
e4189d53
JG
823 break;
824
825 case PHY_FUNC_DISABLE:
826 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
827 break;
828
829 case PHY_FUNC_SET_LINK_RATE:
2ae75787
XC
830 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
831 break;
c52108c6
XT
832 case PHY_FUNC_GET_EVENTS:
833 if (hisi_hba->hw->get_events) {
834 hisi_hba->hw->get_events(hisi_hba, phy_no);
835 break;
836 }
837 /* fallthru */
e4189d53
JG
838 case PHY_FUNC_RELEASE_SPINUP_HOLD:
839 default:
840 return -EOPNOTSUPP;
841 }
842 return 0;
843}
184a4635 844
0efff300
JG
845static void hisi_sas_task_done(struct sas_task *task)
846{
847 if (!del_timer(&task->slow_task->timer))
848 return;
849 complete(&task->slow_task->completion);
850}
851
77570eed 852static void hisi_sas_tmf_timedout(struct timer_list *t)
0efff300 853{
77570eed
KC
854 struct sas_task_slow *slow = from_timer(slow, t, timer);
855 struct sas_task *task = slow->task;
f64a6988
XC
856 unsigned long flags;
857
858 spin_lock_irqsave(&task->task_state_lock, flags);
859 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
860 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
861 spin_unlock_irqrestore(&task->task_state_lock, flags);
0efff300 862
0efff300
JG
863 complete(&task->slow_task->completion);
864}
865
866#define TASK_TIMEOUT 20
867#define TASK_RETRY 3
bb9abc4a 868#define INTERNAL_ABORT_TIMEOUT 6
0efff300
JG
869static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
870 void *parameter, u32 para_len,
871 struct hisi_sas_tmf_task *tmf)
872{
873 struct hisi_sas_device *sas_dev = device->lldd_dev;
874 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
11b75249 875 struct device *dev = hisi_hba->dev;
0efff300
JG
876 struct sas_task *task;
877 int res, retry;
878
879 for (retry = 0; retry < TASK_RETRY; retry++) {
880 task = sas_alloc_slow_task(GFP_KERNEL);
881 if (!task)
882 return -ENOMEM;
883
884 task->dev = device;
885 task->task_proto = device->tproto;
886
7c594f04
XC
887 if (dev_is_sata(device)) {
888 task->ata_task.device_control_reg_update = 1;
889 memcpy(&task->ata_task.fis, parameter, para_len);
890 } else {
891 memcpy(&task->ssp_task, parameter, para_len);
892 }
0efff300
JG
893 task->task_done = hisi_sas_task_done;
894
841b86f3 895 task->slow_task->timer.function = hisi_sas_tmf_timedout;
0efff300
JG
896 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
897 add_timer(&task->slow_task->timer);
898
899 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
900
901 if (res) {
902 del_timer(&task->slow_task->timer);
903 dev_err(dev, "abort tmf: executing internal task failed: %d\n",
904 res);
905 goto ex_err;
906 }
907
908 wait_for_completion(&task->slow_task->completion);
909 res = TMF_RESP_FUNC_FAILED;
910 /* Even TMF timed out, return direct. */
911 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
912 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
d3c4dd4e
JG
913 struct hisi_sas_slot *slot = task->lldd_task;
914
f1c88211 915 dev_err(dev, "abort tmf: TMF task timeout and not done\n");
d3c4dd4e
JG
916 if (slot)
917 slot->task = NULL;
918
0efff300 919 goto ex_err;
f1c88211
XC
920 } else
921 dev_err(dev, "abort tmf: TMF task timeout\n");
0efff300
JG
922 }
923
924 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1af1b808 925 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
0efff300
JG
926 res = TMF_RESP_FUNC_COMPLETE;
927 break;
928 }
929
4ffde482
JG
930 if (task->task_status.resp == SAS_TASK_COMPLETE &&
931 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
932 res = TMF_RESP_FUNC_SUCC;
933 break;
934 }
935
0efff300
JG
936 if (task->task_status.resp == SAS_TASK_COMPLETE &&
937 task->task_status.stat == SAS_DATA_UNDERRUN) {
938 /* no error, but return the number of bytes of
939 * underrun
940 */
941 dev_warn(dev, "abort tmf: task to dev %016llx "
942 "resp: 0x%x sts 0x%x underrun\n",
943 SAS_ADDR(device->sas_addr),
944 task->task_status.resp,
945 task->task_status.stat);
946 res = task->task_status.residual;
947 break;
948 }
949
950 if (task->task_status.resp == SAS_TASK_COMPLETE &&
951 task->task_status.stat == SAS_DATA_OVERRUN) {
952 dev_warn(dev, "abort tmf: blocked task error\n");
953 res = -EMSGSIZE;
954 break;
955 }
956
957 dev_warn(dev, "abort tmf: task to dev "
958 "%016llx resp: 0x%x status 0x%x\n",
959 SAS_ADDR(device->sas_addr), task->task_status.resp,
960 task->task_status.stat);
961 sas_free_task(task);
962 task = NULL;
963 }
964ex_err:
d2d7e7a0
XC
965 if (retry == TASK_RETRY)
966 dev_warn(dev, "abort tmf: executing internal task failed!\n");
0efff300
JG
967 sas_free_task(task);
968 return res;
969}
970
7c594f04
XC
971static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
972 bool reset, int pmp, u8 *fis)
973{
974 struct ata_taskfile tf;
975
976 ata_tf_init(dev, &tf);
977 if (reset)
978 tf.ctl |= ATA_SRST;
979 else
980 tf.ctl &= ~ATA_SRST;
981 tf.command = ATA_CMD_DEV_RESET;
982 ata_tf_to_fis(&tf, pmp, 0, fis);
983}
984
985static int hisi_sas_softreset_ata_disk(struct domain_device *device)
986{
987 u8 fis[20] = {0};
988 struct ata_port *ap = device->sata_dev.ap;
989 struct ata_link *link;
990 int rc = TMF_RESP_FUNC_FAILED;
991 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
11b75249 992 struct device *dev = hisi_hba->dev;
7c594f04
XC
993 int s = sizeof(struct host_to_dev_fis);
994 unsigned long flags;
995
996 ata_for_each_link(link, ap, EDGE) {
997 int pmp = sata_srst_pmp(link);
998
999 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1000 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
1001 if (rc != TMF_RESP_FUNC_COMPLETE)
1002 break;
1003 }
1004
1005 if (rc == TMF_RESP_FUNC_COMPLETE) {
1006 ata_for_each_link(link, ap, EDGE) {
1007 int pmp = sata_srst_pmp(link);
1008
1009 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1010 rc = hisi_sas_exec_internal_tmf_task(device, fis,
1011 s, NULL);
1012 if (rc != TMF_RESP_FUNC_COMPLETE)
1013 dev_err(dev, "ata disk de-reset failed\n");
1014 }
1015 } else {
1016 dev_err(dev, "ata disk reset failed\n");
1017 }
1018
1019 if (rc == TMF_RESP_FUNC_COMPLETE) {
1020 spin_lock_irqsave(&hisi_hba->lock, flags);
1021 hisi_sas_release_task(hisi_hba, device);
1022 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1023 }
1024
1025 return rc;
1026}
1027
0efff300
JG
1028static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
1029 u8 *lun, struct hisi_sas_tmf_task *tmf)
1030{
1031 struct sas_ssp_task ssp_task;
1032
1033 if (!(device->tproto & SAS_PROTOCOL_SSP))
1034 return TMF_RESP_FUNC_ESUPP;
1035
1036 memcpy(ssp_task.LUN, lun, 8);
1037
1038 return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
1039 sizeof(ssp_task), tmf);
1040}
1041
a669bdbf 1042static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
917d3bda 1043{
a669bdbf 1044 u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
917d3bda
XT
1045 int i;
1046
1047 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
a669bdbf
XT
1048 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1049 struct domain_device *device = sas_dev->sas_device;
1050 struct asd_sas_port *sas_port;
1051 struct hisi_sas_port *port;
1052 struct hisi_sas_phy *phy = NULL;
1053 struct asd_sas_phy *sas_phy;
1054
917d3bda 1055 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
a669bdbf 1056 || !device || !device->port)
917d3bda
XT
1057 continue;
1058
a669bdbf
XT
1059 sas_port = device->port;
1060 port = to_hisi_sas_port(sas_port);
1061
1062 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1063 if (state & BIT(sas_phy->id)) {
1064 phy = sas_phy->lldd_phy;
1065 break;
1066 }
1067
1068 if (phy) {
1069 port->id = phy->port_id;
917d3bda 1070
a669bdbf
XT
1071 /* Update linkrate of directly attached device. */
1072 if (!device->parent)
1073 device->linkrate = phy->sas_phy.linkrate;
917d3bda 1074
a669bdbf
XT
1075 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1076 } else
1077 port->id = 0xff;
917d3bda
XT
1078 }
1079}
1080
1081static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1082 u32 state)
1083{
1084 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1085 struct asd_sas_port *_sas_port = NULL;
1086 int phy_no;
1087
1088 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1089 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1090 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1091 struct asd_sas_port *sas_port = sas_phy->port;
917d3bda
XT
1092 bool do_port_check = !!(_sas_port != sas_port);
1093
1094 if (!sas_phy->phy->enabled)
1095 continue;
1096
1097 /* Report PHY state change to libsas */
a669bdbf
XT
1098 if (state & BIT(phy_no)) {
1099 if (do_port_check && sas_port && sas_port->port_dev) {
917d3bda
XT
1100 struct domain_device *dev = sas_port->port_dev;
1101
1102 _sas_port = sas_port;
917d3bda
XT
1103
1104 if (DEV_IS_EXPANDER(dev->dev_type))
1105 sas_ha->notify_port_event(sas_phy,
1106 PORTE_BROADCAST_RCVD);
1107 }
1108 } else if (old_state & (1 << phy_no))
1109 /* PHY down but was up before */
1110 hisi_sas_phy_down(hisi_hba, phy_no, 0);
1111
1112 }
917d3bda
XT
1113}
1114
06ec0fb9
XC
1115static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1116{
917d3bda
XT
1117 struct device *dev = hisi_hba->dev;
1118 struct Scsi_Host *shost = hisi_hba->shost;
1119 u32 old_state, state;
1120 unsigned long flags;
06ec0fb9
XC
1121 int rc;
1122
1123 if (!hisi_hba->hw->soft_reset)
1124 return -1;
1125
917d3bda
XT
1126 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1127 return -1;
06ec0fb9 1128
fb51e7a8 1129 dev_info(dev, "controller resetting...\n");
917d3bda 1130 old_state = hisi_hba->hw->get_phys_state(hisi_hba);
06ec0fb9 1131
917d3bda
XT
1132 scsi_block_requests(shost);
1133 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1134 rc = hisi_hba->hw->soft_reset(hisi_hba);
1135 if (rc) {
1136 dev_warn(dev, "controller reset failed (%d)\n", rc);
1137 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
fb51e7a8 1138 scsi_unblock_requests(shost);
917d3bda
XT
1139 goto out;
1140 }
1141 spin_lock_irqsave(&hisi_hba->lock, flags);
1142 hisi_sas_release_tasks(hisi_hba);
1143 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1144
917d3bda
XT
1145 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1146
1147 /* Init and wait for PHYs to come up and all libsas event finished. */
1148 hisi_hba->hw->phys_init(hisi_hba);
1149 msleep(1000);
a669bdbf 1150 hisi_sas_refresh_port_id(hisi_hba);
fb51e7a8 1151 scsi_unblock_requests(shost);
917d3bda
XT
1152
1153 state = hisi_hba->hw->get_phys_state(hisi_hba);
1154 hisi_sas_rescan_topology(hisi_hba, old_state, state);
fb51e7a8 1155 dev_info(dev, "controller reset complete\n");
06ec0fb9
XC
1156
1157out:
06ec0fb9 1158 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
917d3bda 1159
06ec0fb9
XC
1160 return rc;
1161}
1162
0efff300
JG
1163static int hisi_sas_abort_task(struct sas_task *task)
1164{
1165 struct scsi_lun lun;
1166 struct hisi_sas_tmf_task tmf_task;
1167 struct domain_device *device = task->dev;
1168 struct hisi_sas_device *sas_dev = device->lldd_dev;
1169 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
11b75249 1170 struct device *dev = hisi_hba->dev;
0efff300
JG
1171 int rc = TMF_RESP_FUNC_FAILED;
1172 unsigned long flags;
1173
1174 if (!sas_dev) {
1175 dev_warn(dev, "Device has been removed\n");
1176 return TMF_RESP_FUNC_FAILED;
1177 }
1178
0efff300 1179 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
0efff300
JG
1180 rc = TMF_RESP_FUNC_COMPLETE;
1181 goto out;
1182 }
1183
0efff300
JG
1184 sas_dev->dev_status = HISI_SAS_DEV_EH;
1185 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1186 struct scsi_cmnd *cmnd = task->uldd_task;
1187 struct hisi_sas_slot *slot = task->lldd_task;
1188 u32 tag = slot->idx;
c35279f2 1189 int rc2;
0efff300
JG
1190
1191 int_to_scsilun(cmnd->device->lun, &lun);
1192 tmf_task.tmf = TMF_ABORT_TASK;
1193 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1194
1195 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1196 &tmf_task);
1197
c35279f2
JG
1198 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1199 HISI_SAS_INT_ABT_CMD, tag);
813709f2
XT
1200 if (rc2 < 0) {
1201 dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1202 return TMF_RESP_FUNC_FAILED;
1203 }
1204
c35279f2
JG
1205 /*
1206 * If the TMF finds that the IO is not in the device and also
1207 * the internal abort does not succeed, then it is safe to
1208 * free the slot.
1209 * Note: if the internal abort succeeds then the slot
1210 * will have already been completed
1211 */
1212 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
0efff300 1213 if (task->lldd_task) {
0efff300 1214 spin_lock_irqsave(&hisi_hba->lock, flags);
c35279f2 1215 hisi_sas_do_release_task(hisi_hba, task, slot);
0efff300
JG
1216 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1217 }
1218 }
0efff300
JG
1219 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1220 task->task_proto & SAS_PROTOCOL_STP) {
1221 if (task->dev->dev_type == SAS_SATA_DEV) {
813709f2
XT
1222 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1223 HISI_SAS_INT_ABT_DEV, 0);
1224 if (rc < 0) {
1225 dev_err(dev, "abort task: internal abort failed\n");
1226 goto out;
1227 }
d30ff263 1228 hisi_sas_dereg_device(hisi_hba, device);
7c594f04 1229 rc = hisi_sas_softreset_ata_disk(device);
0efff300 1230 }
eb045e04 1231 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
dc8a49ca
JG
1232 /* SMP */
1233 struct hisi_sas_slot *slot = task->lldd_task;
1234 u32 tag = slot->idx;
0efff300 1235
ccbfe5a0
XC
1236 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1237 HISI_SAS_INT_ABT_CMD, tag);
813709f2
XT
1238 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1239 task->lldd_task) {
ccbfe5a0
XC
1240 spin_lock_irqsave(&hisi_hba->lock, flags);
1241 hisi_sas_do_release_task(hisi_hba, task, slot);
1242 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1243 }
0efff300
JG
1244 }
1245
1246out:
1247 if (rc != TMF_RESP_FUNC_COMPLETE)
1248 dev_notice(dev, "abort task: rc=%d\n", rc);
1249 return rc;
1250}
1251
1252static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1253{
2a038131
XT
1254 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1255 struct device *dev = hisi_hba->dev;
0efff300
JG
1256 struct hisi_sas_tmf_task tmf_task;
1257 int rc = TMF_RESP_FUNC_FAILED;
2a038131
XT
1258 unsigned long flags;
1259
1260 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1261 HISI_SAS_INT_ABT_DEV, 0);
1262 if (rc < 0) {
1263 dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1264 return TMF_RESP_FUNC_FAILED;
1265 }
1266 hisi_sas_dereg_device(hisi_hba, device);
0efff300
JG
1267
1268 tmf_task.tmf = TMF_ABORT_TASK_SET;
1269 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1270
2a038131
XT
1271 if (rc == TMF_RESP_FUNC_COMPLETE) {
1272 spin_lock_irqsave(&hisi_hba->lock, flags);
1273 hisi_sas_release_task(hisi_hba, device);
1274 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1275 }
1276
0efff300
JG
1277 return rc;
1278}
1279
1280static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1281{
1282 int rc = TMF_RESP_FUNC_FAILED;
1283 struct hisi_sas_tmf_task tmf_task;
1284
1285 tmf_task.tmf = TMF_CLEAR_ACA;
1286 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1287
1288 return rc;
1289}
1290
1291static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1292{
1293 struct sas_phy *phy = sas_get_local_phy(device);
1294 int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1295 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1296 rc = sas_phy_reset(phy, reset_type);
1297 sas_put_local_phy(phy);
1298 msleep(2000);
1299 return rc;
1300}
1301
1302static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1303{
1304 struct hisi_sas_device *sas_dev = device->lldd_dev;
1305 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
813709f2 1306 struct device *dev = hisi_hba->dev;
0efff300 1307 int rc = TMF_RESP_FUNC_FAILED;
813709f2 1308 unsigned long flags;
0efff300
JG
1309
1310 if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1311 return TMF_RESP_FUNC_FAILED;
1312 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1313
813709f2 1314 rc = hisi_sas_internal_task_abort(hisi_hba, device,
d30ff263 1315 HISI_SAS_INT_ABT_DEV, 0);
813709f2
XT
1316 if (rc < 0) {
1317 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1318 return TMF_RESP_FUNC_FAILED;
1319 }
d30ff263
XC
1320 hisi_sas_dereg_device(hisi_hba, device);
1321
0efff300
JG
1322 rc = hisi_sas_debug_I_T_nexus_reset(device);
1323
9960a24a 1324 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) {
6131243a
XC
1325 spin_lock_irqsave(&hisi_hba->lock, flags);
1326 hisi_sas_release_task(hisi_hba, device);
1327 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1328 }
1329 return rc;
0efff300
JG
1330}
1331
1332static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1333{
0efff300
JG
1334 struct hisi_sas_device *sas_dev = device->lldd_dev;
1335 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
11b75249 1336 struct device *dev = hisi_hba->dev;
0efff300
JG
1337 unsigned long flags;
1338 int rc = TMF_RESP_FUNC_FAILED;
1339
0efff300 1340 sas_dev->dev_status = HISI_SAS_DEV_EH;
055945df
JG
1341 if (dev_is_sata(device)) {
1342 struct sas_phy *phy;
1343
1344 /* Clear internal IO and then hardreset */
1345 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1346 HISI_SAS_INT_ABT_DEV, 0);
813709f2
XT
1347 if (rc < 0) {
1348 dev_err(dev, "lu_reset: internal abort failed\n");
055945df 1349 goto out;
813709f2 1350 }
d30ff263 1351 hisi_sas_dereg_device(hisi_hba, device);
0efff300 1352
055945df
JG
1353 phy = sas_get_local_phy(device);
1354
1355 rc = sas_phy_reset(phy, 1);
1356
1357 if (rc == 0) {
1358 spin_lock_irqsave(&hisi_hba->lock, flags);
1359 hisi_sas_release_task(hisi_hba, device);
1360 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1361 }
1362 sas_put_local_phy(phy);
1363 } else {
1364 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
1365
2a038131
XT
1366 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1367 HISI_SAS_INT_ABT_DEV, 0);
1368 if (rc < 0) {
1369 dev_err(dev, "lu_reset: internal abort failed\n");
1370 goto out;
1371 }
1372 hisi_sas_dereg_device(hisi_hba, device);
1373
055945df
JG
1374 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1375 if (rc == TMF_RESP_FUNC_COMPLETE) {
1376 spin_lock_irqsave(&hisi_hba->lock, flags);
1377 hisi_sas_release_task(hisi_hba, device);
1378 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1379 }
1380 }
1381out:
14d3f397 1382 if (rc != TMF_RESP_FUNC_COMPLETE)
ad604832 1383 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
14d3f397 1384 sas_dev->device_id, rc);
0efff300
JG
1385 return rc;
1386}
1387
8b05ad6a
JG
1388static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1389{
1390 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
e402acdb 1391 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
8b05ad6a 1392
e402acdb
XT
1393 queue_work(hisi_hba->wq, &r.work);
1394 wait_for_completion(r.completion);
1395 if (r.done)
1396 return TMF_RESP_FUNC_COMPLETE;
1397
1398 return TMF_RESP_FUNC_FAILED;
8b05ad6a
JG
1399}
1400
0efff300
JG
1401static int hisi_sas_query_task(struct sas_task *task)
1402{
1403 struct scsi_lun lun;
1404 struct hisi_sas_tmf_task tmf_task;
1405 int rc = TMF_RESP_FUNC_FAILED;
1406
1407 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1408 struct scsi_cmnd *cmnd = task->uldd_task;
1409 struct domain_device *device = task->dev;
1410 struct hisi_sas_slot *slot = task->lldd_task;
1411 u32 tag = slot->idx;
1412
1413 int_to_scsilun(cmnd->device->lun, &lun);
1414 tmf_task.tmf = TMF_QUERY_TASK;
1415 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1416
1417 rc = hisi_sas_debug_issue_ssp_tmf(device,
1418 lun.scsi_lun,
1419 &tmf_task);
1420 switch (rc) {
1421 /* The task is still in Lun, release it then */
1422 case TMF_RESP_FUNC_SUCC:
1423 /* The task is not in Lun or failed, reset the phy */
1424 case TMF_RESP_FUNC_FAILED:
1425 case TMF_RESP_FUNC_COMPLETE:
1426 break;
997ee43c
XC
1427 default:
1428 rc = TMF_RESP_FUNC_FAILED;
1429 break;
0efff300
JG
1430 }
1431 }
1432 return rc;
1433}
1434
441c2740 1435static int
ad604832 1436hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
441c2740
JG
1437 struct sas_task *task, int abort_flag,
1438 int task_tag)
1439{
1440 struct domain_device *device = task->dev;
1441 struct hisi_sas_device *sas_dev = device->lldd_dev;
11b75249 1442 struct device *dev = hisi_hba->dev;
441c2740
JG
1443 struct hisi_sas_port *port;
1444 struct hisi_sas_slot *slot;
2e244f0f 1445 struct asd_sas_port *sas_port = device->port;
441c2740 1446 struct hisi_sas_cmd_hdr *cmd_hdr_base;
b1a49412 1447 struct hisi_sas_dq *dq = sas_dev->dq;
441c2740 1448 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
b1a49412 1449 unsigned long flags, flags_dq;
441c2740 1450
917d3bda 1451 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
06ec0fb9
XC
1452 return -EINVAL;
1453
441c2740
JG
1454 if (!device->port)
1455 return -1;
1456
2e244f0f 1457 port = to_hisi_sas_port(sas_port);
441c2740
JG
1458
1459 /* simply get a slot and send abort command */
b1a49412 1460 spin_lock_irqsave(&hisi_hba->lock, flags);
441c2740 1461 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
b1a49412
XC
1462 if (rc) {
1463 spin_unlock_irqrestore(&hisi_hba->lock, flags);
441c2740 1464 goto err_out;
b1a49412
XC
1465 }
1466 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1467
1468 spin_lock_irqsave(&dq->lock, flags_dq);
1469 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
441c2740
JG
1470 if (rc)
1471 goto err_out_tag;
1472
b1a49412
XC
1473 dlvry_queue = dq->id;
1474 dlvry_queue_slot = dq->wr_point;
1475
441c2740
JG
1476 slot = &hisi_hba->slot_info[slot_idx];
1477 memset(slot, 0, sizeof(struct hisi_sas_slot));
1478
1479 slot->idx = slot_idx;
1480 slot->n_elem = n_elem;
1481 slot->dlvry_queue = dlvry_queue;
1482 slot->dlvry_queue_slot = dlvry_queue_slot;
1483 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1484 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1485 slot->task = task;
1486 slot->port = port;
1487 task->lldd_task = slot;
1488
031da09c
XC
1489 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
1490 GFP_ATOMIC, &slot->buf_dma);
1491 if (!slot->buf) {
1492 rc = -ENOMEM;
1493 goto err_out_tag;
1494 }
1495
441c2740 1496 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
031da09c
XC
1497 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1498 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
441c2740
JG
1499
1500 rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1501 abort_flag, task_tag);
1502 if (rc)
031da09c 1503 goto err_out_buf;
441c2740 1504
9feaf909 1505 spin_lock_irqsave(&hisi_hba->lock, flags);
405314df 1506 list_add_tail(&slot->entry, &sas_dev->list);
9feaf909 1507 spin_unlock_irqrestore(&hisi_hba->lock, flags);
54c9dd2d 1508 spin_lock_irqsave(&task->task_state_lock, flags);
441c2740 1509 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
54c9dd2d 1510 spin_unlock_irqrestore(&task->task_state_lock, flags);
441c2740 1511
b1a49412 1512 dq->slot_prep = slot;
441c2740 1513
b1a49412
XC
1514 /* send abort command to the chip */
1515 hisi_hba->hw->start_delivery(dq);
1516 spin_unlock_irqrestore(&dq->lock, flags_dq);
441c2740
JG
1517
1518 return 0;
1519
031da09c
XC
1520err_out_buf:
1521 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
1522 slot->buf_dma);
441c2740 1523err_out_tag:
b1a49412 1524 spin_lock_irqsave(&hisi_hba->lock, flags);
441c2740 1525 hisi_sas_slot_index_free(hisi_hba, slot_idx);
b1a49412
XC
1526 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1527 spin_unlock_irqrestore(&dq->lock, flags_dq);
441c2740
JG
1528err_out:
1529 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1530
1531 return rc;
1532}
1533
1534/**
1535 * hisi_sas_internal_task_abort -- execute an internal
1536 * abort command for single IO command or a device
1537 * @hisi_hba: host controller struct
1538 * @device: domain device
1539 * @abort_flag: mode of operation, device or single IO
1540 * @tag: tag of IO to be aborted (only relevant to single
1541 * IO mode)
1542 */
1543static int
1544hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1545 struct domain_device *device,
1546 int abort_flag, int tag)
1547{
1548 struct sas_task *task;
1549 struct hisi_sas_device *sas_dev = device->lldd_dev;
11b75249 1550 struct device *dev = hisi_hba->dev;
441c2740 1551 int res;
441c2740 1552
813709f2
XT
1553 /*
1554 * The interface is not realized means this HW don't support internal
1555 * abort, or don't need to do internal abort. Then here, we return
1556 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
1557 * the internal abort has been executed and returned CQ.
1558 */
441c2740 1559 if (!hisi_hba->hw->prep_abort)
813709f2 1560 return TMF_RESP_FUNC_FAILED;
441c2740
JG
1561
1562 task = sas_alloc_slow_task(GFP_KERNEL);
1563 if (!task)
1564 return -ENOMEM;
1565
1566 task->dev = device;
1567 task->task_proto = device->tproto;
1568 task->task_done = hisi_sas_task_done;
841b86f3 1569 task->slow_task->timer.function = hisi_sas_tmf_timedout;
bb9abc4a 1570 task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT*HZ;
441c2740
JG
1571 add_timer(&task->slow_task->timer);
1572
441c2740
JG
1573 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1574 task, abort_flag, tag);
441c2740
JG
1575 if (res) {
1576 del_timer(&task->slow_task->timer);
1577 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1578 res);
1579 goto exit;
1580 }
1581 wait_for_completion(&task->slow_task->completion);
1582 res = TMF_RESP_FUNC_FAILED;
1583
f64a6988
XC
1584 /* Internal abort timed out */
1585 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1586 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1587 struct hisi_sas_slot *slot = task->lldd_task;
1588
1589 if (slot)
1590 slot->task = NULL;
f1c88211 1591 dev_err(dev, "internal task abort: timeout and not done.\n");
813709f2 1592 res = -EIO;
f692a677 1593 goto exit;
f1c88211
XC
1594 } else
1595 dev_err(dev, "internal task abort: timeout.\n");
f64a6988
XC
1596 }
1597
441c2740
JG
1598 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1599 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1600 res = TMF_RESP_FUNC_COMPLETE;
1601 goto exit;
1602 }
1603
c35279f2
JG
1604 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1605 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1606 res = TMF_RESP_FUNC_SUCC;
1607 goto exit;
1608 }
1609
441c2740 1610exit:
297d7302 1611 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
441c2740
JG
1612 "resp: 0x%x sts 0x%x\n",
1613 SAS_ADDR(device->sas_addr),
1614 task,
1615 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1616 task->task_status.stat);
1617 sas_free_task(task);
1618
1619 return res;
1620}
1621
184a4635
JG
1622static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1623{
1624 hisi_sas_port_notify_formed(sas_phy);
1625}
1626
336bd78b
XC
1627static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
1628{
1629}
1630
6379c560
XT
1631static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
1632 u8 reg_index, u8 reg_count, u8 *write_data)
1633{
1634 struct hisi_hba *hisi_hba = sha->lldd_ha;
1635
1636 if (!hisi_hba->hw->write_gpio)
1637 return -EOPNOTSUPP;
1638
1639 return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
1640 reg_index, reg_count, write_data);
1641}
1642
184a4635
JG
1643static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1644{
1645 phy->phy_attached = 0;
1646 phy->phy_type = 0;
1647 phy->port = NULL;
1648}
1649
1650void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1651{
1652 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1653 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1654 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1655
1656 if (rdy) {
1657 /* Phy down but ready */
1658 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1659 hisi_sas_port_notify_formed(sas_phy);
1660 } else {
1661 struct hisi_sas_port *port = phy->port;
1662
1663 /* Phy down and not ready */
1664 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1665 sas_phy_disconnected(sas_phy);
1666
1667 if (port) {
1668 if (phy->phy_type & PORT_TYPE_SAS) {
1669 int port_id = port->id;
1670
1671 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1672 port_id))
1673 port->port_attached = 0;
1674 } else if (phy->phy_type & PORT_TYPE_SATA)
1675 port->port_attached = 0;
1676 }
1677 hisi_sas_phy_disconnected(phy);
1678 }
1679}
1680EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1681
571295f8
XT
1682void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
1683{
1684 int i;
1685
1686 for (i = 0; i < hisi_hba->queue_count; i++) {
1687 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1688
1689 tasklet_kill(&cq->tasklet);
1690 }
1691}
1692EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
06ec0fb9 1693
e21fe3a5
JG
1694struct scsi_transport_template *hisi_sas_stt;
1695EXPORT_SYMBOL_GPL(hisi_sas_stt);
e8899fad 1696
1e15feac 1697static struct device_attribute *host_attrs[] = {
8eea9dd8
JY
1698 &dev_attr_phy_event_threshold,
1699 NULL,
1700};
1701
e21fe3a5 1702static struct scsi_host_template _hisi_sas_sht = {
7eb7869f
JG
1703 .module = THIS_MODULE,
1704 .name = DRV_NAME,
1705 .queuecommand = sas_queuecommand,
1706 .target_alloc = sas_target_alloc,
31eec8a6 1707 .slave_configure = hisi_sas_slave_configure,
701f75ec
JG
1708 .scan_finished = hisi_sas_scan_finished,
1709 .scan_start = hisi_sas_scan_start,
7eb7869f
JG
1710 .change_queue_depth = sas_change_queue_depth,
1711 .bios_param = sas_bios_param,
1712 .can_queue = 1,
1713 .this_id = -1,
1714 .sg_tablesize = SG_ALL,
1715 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
1716 .use_clustering = ENABLE_CLUSTERING,
1717 .eh_device_reset_handler = sas_eh_device_reset_handler,
cc199e78 1718 .eh_target_reset_handler = sas_eh_target_reset_handler,
7eb7869f
JG
1719 .target_destroy = sas_target_destroy,
1720 .ioctl = sas_ioctl,
8eea9dd8 1721 .shost_attrs = host_attrs,
7eb7869f 1722};
e21fe3a5
JG
1723struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
1724EXPORT_SYMBOL_GPL(hisi_sas_sht);
7eb7869f 1725
e8899fad 1726static struct sas_domain_function_template hisi_sas_transport_ops = {
abda97c2
JG
1727 .lldd_dev_found = hisi_sas_dev_found,
1728 .lldd_dev_gone = hisi_sas_dev_gone,
42e7a693 1729 .lldd_execute_task = hisi_sas_queue_command,
e4189d53 1730 .lldd_control_phy = hisi_sas_control_phy,
0efff300
JG
1731 .lldd_abort_task = hisi_sas_abort_task,
1732 .lldd_abort_task_set = hisi_sas_abort_task_set,
1733 .lldd_clear_aca = hisi_sas_clear_aca,
1734 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
1735 .lldd_lu_reset = hisi_sas_lu_reset,
1736 .lldd_query_task = hisi_sas_query_task,
8b05ad6a 1737 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
184a4635 1738 .lldd_port_formed = hisi_sas_port_formed,
336bd78b 1739 .lldd_port_deformed = hisi_sas_port_deformed,
6379c560 1740 .lldd_write_gpio = hisi_sas_write_gpio,
e8899fad
JG
1741};
1742
06ec0fb9
XC
1743void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
1744{
1745 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1746
1747 for (i = 0; i < hisi_hba->queue_count; i++) {
1748 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1749 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1750
1751 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1752 memset(hisi_hba->cmd_hdr[i], 0, s);
1753 dq->wr_point = 0;
1754
1755 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1756 memset(hisi_hba->complete_hdr[i], 0, s);
1757 cq->rd_point = 0;
1758 }
1759
1760 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
1761 memset(hisi_hba->initial_fis, 0, s);
1762
1763 s = max_command_entries * sizeof(struct hisi_sas_iost);
1764 memset(hisi_hba->iost, 0, s);
1765
1766 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1767 memset(hisi_hba->breakpoint, 0, s);
1768
3297ded1 1769 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
06ec0fb9
XC
1770 memset(hisi_hba->sata_breakpoint, 0, s);
1771}
1772EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
1773
e21fe3a5 1774int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
6be6de18 1775{
11b75249 1776 struct device *dev = hisi_hba->dev;
a8d547bd 1777 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
6be6de18 1778
fa42d80d 1779 spin_lock_init(&hisi_hba->lock);
976867e6
JG
1780 for (i = 0; i < hisi_hba->n_phy; i++) {
1781 hisi_sas_phy_init(hisi_hba, i);
1782 hisi_hba->port[i].port_attached = 0;
1783 hisi_hba->port[i].id = -1;
976867e6
JG
1784 }
1785
af740dbe
JG
1786 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1787 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
1788 hisi_hba->devices[i].device_id = i;
1789 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
1790 }
1791
6be6de18 1792 for (i = 0; i < hisi_hba->queue_count; i++) {
9101a079 1793 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
4fde02ad 1794 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
9101a079
JG
1795
1796 /* Completion queue structure */
1797 cq->id = i;
1798 cq->hisi_hba = hisi_hba;
1799
4fde02ad 1800 /* Delivery queue structure */
39bade0c 1801 spin_lock_init(&dq->lock);
4fde02ad
JG
1802 dq->id = i;
1803 dq->hisi_hba = hisi_hba;
1804
6be6de18
JG
1805 /* Delivery queue */
1806 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1807 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1808 &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1809 if (!hisi_hba->cmd_hdr[i])
1810 goto err_out;
6be6de18
JG
1811
1812 /* Completion queue */
1813 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1814 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1815 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1816 if (!hisi_hba->complete_hdr[i])
1817 goto err_out;
6be6de18
JG
1818 }
1819
f557e32c
XT
1820 s = sizeof(struct hisi_sas_slot_buf_table);
1821 hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
1822 if (!hisi_hba->buffer_pool)
6be6de18
JG
1823 goto err_out;
1824
1825 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
4f4e21b8 1826 hisi_hba->itct = dma_zalloc_coherent(dev, s, &hisi_hba->itct_dma,
6be6de18
JG
1827 GFP_KERNEL);
1828 if (!hisi_hba->itct)
1829 goto err_out;
1830
a8d547bd 1831 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
6be6de18
JG
1832 sizeof(struct hisi_sas_slot),
1833 GFP_KERNEL);
1834 if (!hisi_hba->slot_info)
1835 goto err_out;
1836
a8d547bd 1837 s = max_command_entries * sizeof(struct hisi_sas_iost);
6be6de18
JG
1838 hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1839 GFP_KERNEL);
1840 if (!hisi_hba->iost)
1841 goto err_out;
1842
a8d547bd 1843 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
6be6de18
JG
1844 hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1845 &hisi_hba->breakpoint_dma, GFP_KERNEL);
1846 if (!hisi_hba->breakpoint)
1847 goto err_out;
1848
a8d547bd 1849 hisi_hba->slot_index_count = max_command_entries;
433f5696 1850 s = hisi_hba->slot_index_count / BITS_PER_BYTE;
257efd1f
JG
1851 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1852 if (!hisi_hba->slot_index_tags)
1853 goto err_out;
1854
6be6de18
JG
1855 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1856 hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1857 &hisi_hba->initial_fis_dma, GFP_KERNEL);
1858 if (!hisi_hba->initial_fis)
1859 goto err_out;
6be6de18 1860
3297ded1 1861 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
6be6de18
JG
1862 hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1863 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1864 if (!hisi_hba->sata_breakpoint)
1865 goto err_out;
06ec0fb9 1866 hisi_sas_init_mem(hisi_hba);
6be6de18 1867
257efd1f
JG
1868 hisi_sas_slot_index_init(hisi_hba);
1869
7e9080e1
JG
1870 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1871 if (!hisi_hba->wq) {
1872 dev_err(dev, "sas_alloc: failed to create workqueue\n");
1873 goto err_out;
1874 }
1875
6be6de18
JG
1876 return 0;
1877err_out:
1878 return -ENOMEM;
1879}
e21fe3a5 1880EXPORT_SYMBOL_GPL(hisi_sas_alloc);
6be6de18 1881
e21fe3a5 1882void hisi_sas_free(struct hisi_hba *hisi_hba)
89d53322 1883{
11b75249 1884 struct device *dev = hisi_hba->dev;
a8d547bd 1885 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
89d53322
JG
1886
1887 for (i = 0; i < hisi_hba->queue_count; i++) {
1888 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1889 if (hisi_hba->cmd_hdr[i])
1890 dma_free_coherent(dev, s,
1891 hisi_hba->cmd_hdr[i],
1892 hisi_hba->cmd_hdr_dma[i]);
1893
1894 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1895 if (hisi_hba->complete_hdr[i])
1896 dma_free_coherent(dev, s,
1897 hisi_hba->complete_hdr[i],
1898 hisi_hba->complete_hdr_dma[i]);
1899 }
1900
f557e32c 1901 dma_pool_destroy(hisi_hba->buffer_pool);
89d53322
JG
1902
1903 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1904 if (hisi_hba->itct)
1905 dma_free_coherent(dev, s,
1906 hisi_hba->itct, hisi_hba->itct_dma);
1907
a8d547bd 1908 s = max_command_entries * sizeof(struct hisi_sas_iost);
89d53322
JG
1909 if (hisi_hba->iost)
1910 dma_free_coherent(dev, s,
1911 hisi_hba->iost, hisi_hba->iost_dma);
1912
a8d547bd 1913 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
89d53322
JG
1914 if (hisi_hba->breakpoint)
1915 dma_free_coherent(dev, s,
1916 hisi_hba->breakpoint,
1917 hisi_hba->breakpoint_dma);
1918
1919
1920 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1921 if (hisi_hba->initial_fis)
1922 dma_free_coherent(dev, s,
1923 hisi_hba->initial_fis,
1924 hisi_hba->initial_fis_dma);
1925
3297ded1 1926 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
89d53322
JG
1927 if (hisi_hba->sata_breakpoint)
1928 dma_free_coherent(dev, s,
1929 hisi_hba->sata_breakpoint,
1930 hisi_hba->sata_breakpoint_dma);
1931
7e9080e1
JG
1932 if (hisi_hba->wq)
1933 destroy_workqueue(hisi_hba->wq);
89d53322 1934}
e21fe3a5 1935EXPORT_SYMBOL_GPL(hisi_sas_free);
6be6de18 1936
b4241f0f 1937void hisi_sas_rst_work_handler(struct work_struct *work)
06ec0fb9
XC
1938{
1939 struct hisi_hba *hisi_hba =
1940 container_of(work, struct hisi_hba, rst_work);
1941
1942 hisi_sas_controller_reset(hisi_hba);
1943}
b4241f0f 1944EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
06ec0fb9 1945
e402acdb
XT
1946void hisi_sas_sync_rst_work_handler(struct work_struct *work)
1947{
1948 struct hisi_sas_rst *rst =
1949 container_of(work, struct hisi_sas_rst, work);
1950
1951 if (!hisi_sas_controller_reset(rst->hisi_hba))
1952 rst->done = true;
1953 complete(rst->completion);
1954}
1955EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
1956
0fa24c19 1957int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
7eb7869f 1958{
0fa24c19
JG
1959 struct device *dev = hisi_hba->dev;
1960 struct platform_device *pdev = hisi_hba->platform_dev;
1961 struct device_node *np = pdev ? pdev->dev.of_node : NULL;
3bc45af8 1962 struct clk *refclk;
7eb7869f 1963
4d558c77 1964 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
0fa24c19
JG
1965 SAS_ADDR_SIZE)) {
1966 dev_err(dev, "could not get property sas-addr\n");
1967 return -ENOENT;
1968 }
e26b2f40 1969
4d558c77 1970 if (np) {
0fa24c19
JG
1971 /*
1972 * These properties are only required for platform device-based
1973 * controller with DT firmware.
1974 */
4d558c77
JG
1975 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
1976 "hisilicon,sas-syscon");
0fa24c19
JG
1977 if (IS_ERR(hisi_hba->ctrl)) {
1978 dev_err(dev, "could not get syscon\n");
1979 return -ENOENT;
1980 }
e26b2f40 1981
4d558c77 1982 if (device_property_read_u32(dev, "ctrl-reset-reg",
0fa24c19
JG
1983 &hisi_hba->ctrl_reset_reg)) {
1984 dev_err(dev,
1985 "could not get property ctrl-reset-reg\n");
1986 return -ENOENT;
1987 }
e26b2f40 1988
4d558c77 1989 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
0fa24c19
JG
1990 &hisi_hba->ctrl_reset_sts_reg)) {
1991 dev_err(dev,
1992 "could not get property ctrl-reset-sts-reg\n");
1993 return -ENOENT;
1994 }
e26b2f40 1995
4d558c77 1996 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
0fa24c19
JG
1997 &hisi_hba->ctrl_clock_ena_reg)) {
1998 dev_err(dev,
1999 "could not get property ctrl-clock-ena-reg\n");
2000 return -ENOENT;
2001 }
4d558c77
JG
2002 }
2003
0fa24c19 2004 refclk = devm_clk_get(dev, NULL);
3bc45af8 2005 if (IS_ERR(refclk))
87e287c1 2006 dev_dbg(dev, "no ref clk property\n");
3bc45af8
JG
2007 else
2008 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2009
0fa24c19
JG
2010 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2011 dev_err(dev, "could not get property phy-count\n");
2012 return -ENOENT;
2013 }
e26b2f40 2014
4d558c77 2015 if (device_property_read_u32(dev, "queue-count",
0fa24c19
JG
2016 &hisi_hba->queue_count)) {
2017 dev_err(dev, "could not get property queue-count\n");
2018 return -ENOENT;
2019 }
2020
2021 return 0;
2022}
2023EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2024
2025static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2026 const struct hisi_sas_hw *hw)
2027{
2028 struct resource *res;
2029 struct Scsi_Host *shost;
2030 struct hisi_hba *hisi_hba;
2031 struct device *dev = &pdev->dev;
2032
e21fe3a5 2033 shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
0fa24c19
JG
2034 if (!shost) {
2035 dev_err(dev, "scsi host alloc failed\n");
2036 return NULL;
2037 }
2038 hisi_hba = shost_priv(shost);
2039
2040 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2041 hisi_hba->hw = hw;
2042 hisi_hba->dev = dev;
2043 hisi_hba->platform_dev = pdev;
2044 hisi_hba->shost = shost;
2045 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2046
77570eed 2047 timer_setup(&hisi_hba->timer, NULL, 0);
0fa24c19
JG
2048
2049 if (hisi_sas_get_fw_info(hisi_hba) < 0)
e26b2f40
JG
2050 goto err_out;
2051
a6f2c7ff
JG
2052 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
2053 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2054 dev_err(dev, "No usable DMA addressing method\n");
2055 goto err_out;
2056 }
2057
e26b2f40
JG
2058 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2059 hisi_hba->regs = devm_ioremap_resource(dev, res);
2060 if (IS_ERR(hisi_hba->regs))
2061 goto err_out;
2062
6379c560
XT
2063 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2064 if (res) {
2065 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2066 if (IS_ERR(hisi_hba->sgpio_regs))
2067 goto err_out;
2068 }
2069
89d53322
JG
2070 if (hisi_sas_alloc(hisi_hba, shost)) {
2071 hisi_sas_free(hisi_hba);
6be6de18 2072 goto err_out;
89d53322 2073 }
6be6de18 2074
7eb7869f
JG
2075 return shost;
2076err_out:
76aae5f6 2077 scsi_host_put(shost);
7eb7869f
JG
2078 dev_err(dev, "shost alloc failed\n");
2079 return NULL;
2080}
2081
e21fe3a5 2082void hisi_sas_init_add(struct hisi_hba *hisi_hba)
5d74242e
JG
2083{
2084 int i;
2085
2086 for (i = 0; i < hisi_hba->n_phy; i++)
2087 memcpy(&hisi_hba->phy[i].dev_sas_addr,
2088 hisi_hba->sas_addr,
2089 SAS_ADDR_SIZE);
2090}
e21fe3a5 2091EXPORT_SYMBOL_GPL(hisi_sas_init_add);
5d74242e 2092
7eb7869f
JG
2093int hisi_sas_probe(struct platform_device *pdev,
2094 const struct hisi_sas_hw *hw)
2095{
2096 struct Scsi_Host *shost;
2097 struct hisi_hba *hisi_hba;
2098 struct device *dev = &pdev->dev;
2099 struct asd_sas_phy **arr_phy;
2100 struct asd_sas_port **arr_port;
2101 struct sas_ha_struct *sha;
2102 int rc, phy_nr, port_nr, i;
2103
2104 shost = hisi_sas_shost_alloc(pdev, hw);
d37a0082
XT
2105 if (!shost)
2106 return -ENOMEM;
7eb7869f
JG
2107
2108 sha = SHOST_TO_SAS_HA(shost);
2109 hisi_hba = shost_priv(shost);
2110 platform_set_drvdata(pdev, sha);
50cb916f 2111
7eb7869f
JG
2112 phy_nr = port_nr = hisi_hba->n_phy;
2113
2114 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2115 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
d37a0082
XT
2116 if (!arr_phy || !arr_port) {
2117 rc = -ENOMEM;
2118 goto err_out_ha;
2119 }
7eb7869f
JG
2120
2121 sha->sas_phy = arr_phy;
2122 sha->sas_port = arr_port;
7eb7869f
JG
2123 sha->lldd_ha = hisi_hba;
2124
2125 shost->transportt = hisi_sas_stt;
2126 shost->max_id = HISI_SAS_MAX_DEVICES;
2127 shost->max_lun = ~0;
2128 shost->max_channel = 1;
2129 shost->max_cmd_len = 16;
2130 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
a8d547bd
JG
2131 shost->can_queue = hisi_hba->hw->max_command_entries;
2132 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
7eb7869f
JG
2133
2134 sha->sas_ha_name = DRV_NAME;
11b75249 2135 sha->dev = hisi_hba->dev;
7eb7869f
JG
2136 sha->lldd_module = THIS_MODULE;
2137 sha->sas_addr = &hisi_hba->sas_addr[0];
2138 sha->num_phys = hisi_hba->n_phy;
2139 sha->core.shost = hisi_hba->shost;
2140
2141 for (i = 0; i < hisi_hba->n_phy; i++) {
2142 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2143 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2144 }
2145
5d74242e
JG
2146 hisi_sas_init_add(hisi_hba);
2147
7eb7869f
JG
2148 rc = scsi_add_host(shost, &pdev->dev);
2149 if (rc)
2150 goto err_out_ha;
2151
2152 rc = sas_register_ha(sha);
2153 if (rc)
2154 goto err_out_register_ha;
2155
0757f041
XC
2156 rc = hisi_hba->hw->hw_init(hisi_hba);
2157 if (rc)
2158 goto err_out_register_ha;
2159
7eb7869f
JG
2160 scsi_scan_host(shost);
2161
2162 return 0;
2163
2164err_out_register_ha:
2165 scsi_remove_host(shost);
2166err_out_ha:
d37a0082 2167 hisi_sas_free(hisi_hba);
76aae5f6 2168 scsi_host_put(shost);
7eb7869f
JG
2169 return rc;
2170}
2171EXPORT_SYMBOL_GPL(hisi_sas_probe);
2172
89d53322
JG
2173int hisi_sas_remove(struct platform_device *pdev)
2174{
2175 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2176 struct hisi_hba *hisi_hba = sha->lldd_ha;
d37a0082 2177 struct Scsi_Host *shost = sha->core.shost;
89d53322 2178
5df41af4
XC
2179 if (timer_pending(&hisi_hba->timer))
2180 del_timer(&hisi_hba->timer);
2181
89d53322
JG
2182 sas_unregister_ha(sha);
2183 sas_remove_host(sha->core.shost);
2184
2185 hisi_sas_free(hisi_hba);
76aae5f6 2186 scsi_host_put(shost);
89d53322
JG
2187 return 0;
2188}
2189EXPORT_SYMBOL_GPL(hisi_sas_remove);
2190
e8899fad
JG
2191static __init int hisi_sas_init(void)
2192{
e8899fad
JG
2193 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2194 if (!hisi_sas_stt)
2195 return -ENOMEM;
2196
2197 return 0;
2198}
2199
2200static __exit void hisi_sas_exit(void)
2201{
2202 sas_release_transport(hisi_sas_stt);
2203}
2204
2205module_init(hisi_sas_init);
2206module_exit(hisi_sas_exit);
2207
e8899fad
JG
2208MODULE_LICENSE("GPL");
2209MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2210MODULE_DESCRIPTION("HISILICON SAS controller driver");
2211MODULE_ALIAS("platform:" DRV_NAME);