]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/hisi_sas/hisi_sas_main.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / hisi_sas / hisi_sas_main.c
CommitLineData
e8899fad
JG
1/*
2 * Copyright (c) 2015 Linaro Ltd.
3 * Copyright (c) 2015 Hisilicon Limited.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 */
11
12#include "hisi_sas.h"
13#define DRV_NAME "hisi_sas"
14
42e7a693
JG
15#define DEV_IS_GONE(dev) \
16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
17
cac9b2a2
JG
18static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19 u8 *lun, struct hisi_sas_tmf_task *tmf);
441c2740
JG
20static int
21hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22 struct domain_device *device,
23 int abort_flag, int tag);
7c594f04 24static int hisi_sas_softreset_ata_disk(struct domain_device *device);
cac9b2a2 25
6c7bb8a1
XC
26u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
27{
28 switch (cmd) {
29 case ATA_CMD_FPDMA_WRITE:
30 case ATA_CMD_FPDMA_READ:
31 case ATA_CMD_FPDMA_RECV:
32 case ATA_CMD_FPDMA_SEND:
33 case ATA_CMD_NCQ_NON_DATA:
34 return HISI_SAS_SATA_PROTOCOL_FPDMA;
35
36 case ATA_CMD_DOWNLOAD_MICRO:
37 case ATA_CMD_ID_ATA:
38 case ATA_CMD_PMP_READ:
39 case ATA_CMD_READ_LOG_EXT:
40 case ATA_CMD_PIO_READ:
41 case ATA_CMD_PIO_READ_EXT:
42 case ATA_CMD_PMP_WRITE:
43 case ATA_CMD_WRITE_LOG_EXT:
44 case ATA_CMD_PIO_WRITE:
45 case ATA_CMD_PIO_WRITE_EXT:
46 return HISI_SAS_SATA_PROTOCOL_PIO;
47
48 case ATA_CMD_DSM:
49 case ATA_CMD_DOWNLOAD_MICRO_DMA:
50 case ATA_CMD_PMP_READ_DMA:
51 case ATA_CMD_PMP_WRITE_DMA:
52 case ATA_CMD_READ:
53 case ATA_CMD_READ_EXT:
54 case ATA_CMD_READ_LOG_DMA_EXT:
55 case ATA_CMD_READ_STREAM_DMA_EXT:
56 case ATA_CMD_TRUSTED_RCV_DMA:
57 case ATA_CMD_TRUSTED_SND_DMA:
58 case ATA_CMD_WRITE:
59 case ATA_CMD_WRITE_EXT:
60 case ATA_CMD_WRITE_FUA_EXT:
61 case ATA_CMD_WRITE_QUEUED:
62 case ATA_CMD_WRITE_LOG_DMA_EXT:
63 case ATA_CMD_WRITE_STREAM_DMA_EXT:
ab2afcbc 64 case ATA_CMD_ZAC_MGMT_IN:
6c7bb8a1
XC
65 return HISI_SAS_SATA_PROTOCOL_DMA;
66
67 case ATA_CMD_CHK_POWER:
68 case ATA_CMD_DEV_RESET:
69 case ATA_CMD_EDD:
70 case ATA_CMD_FLUSH:
71 case ATA_CMD_FLUSH_EXT:
72 case ATA_CMD_VERIFY:
73 case ATA_CMD_VERIFY_EXT:
74 case ATA_CMD_SET_FEATURES:
75 case ATA_CMD_STANDBY:
76 case ATA_CMD_STANDBYNOW1:
ab2afcbc 77 case ATA_CMD_ZAC_MGMT_OUT:
6c7bb8a1
XC
78 return HISI_SAS_SATA_PROTOCOL_NONDATA;
79 default:
80 if (direction == DMA_NONE)
81 return HISI_SAS_SATA_PROTOCOL_NONDATA;
82 return HISI_SAS_SATA_PROTOCOL_PIO;
83 }
84}
85EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
86
75904077
XC
87void hisi_sas_sata_done(struct sas_task *task,
88 struct hisi_sas_slot *slot)
89{
90 struct task_status_struct *ts = &task->task_status;
91 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
f557e32c
XT
92 struct hisi_sas_status_buffer *status_buf =
93 hisi_sas_status_buf_addr_mem(slot);
94 u8 *iu = &status_buf->iu[0];
95 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
75904077
XC
96
97 resp->frame_len = sizeof(struct dev_to_host_fis);
98 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
99
100 ts->buf_valid_size = sizeof(*resp);
101}
102EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
103
318913c6
XC
104int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
105{
106 struct ata_queued_cmd *qc = task->uldd_task;
107
108 if (qc) {
109 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
110 qc->tf.command == ATA_CMD_FPDMA_READ) {
111 *tag = qc->tag;
112 return 1;
113 }
114 }
115 return 0;
116}
117EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
118
42e7a693
JG
119static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
120{
121 return device->port->ha->lldd_ha;
122}
123
2e244f0f
JG
124struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
125{
126 return container_of(sas_port, struct hisi_sas_port, sas_port);
127}
128EXPORT_SYMBOL_GPL(to_hisi_sas_port);
129
257efd1f
JG
130static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
131{
132 void *bitmap = hisi_hba->slot_index_tags;
133
134 clear_bit(slot_idx, bitmap);
135}
136
42e7a693
JG
137static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
138{
139 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
140}
141
142static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
143{
144 void *bitmap = hisi_hba->slot_index_tags;
145
146 set_bit(slot_idx, bitmap);
147}
148
149static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
150{
151 unsigned int index;
152 void *bitmap = hisi_hba->slot_index_tags;
153
154 index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count);
155 if (index >= hisi_hba->slot_index_count)
156 return -SAS_QUEUE_FULL;
157 hisi_sas_slot_index_set(hisi_hba, index);
158 *slot_idx = index;
159 return 0;
160}
161
257efd1f
JG
162static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
163{
164 int i;
165
166 for (i = 0; i < hisi_hba->slot_index_count; ++i)
167 hisi_sas_slot_index_clear(hisi_hba, i);
168}
27a3f229
JG
169
170void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
171 struct hisi_sas_slot *slot)
172{
27a3f229 173
d3c4dd4e 174 if (task) {
11b75249 175 struct device *dev = hisi_hba->dev;
d3c4dd4e
JG
176 struct domain_device *device = task->dev;
177 struct hisi_sas_device *sas_dev = device->lldd_dev;
27a3f229 178
03ab5334
XT
179 if (!task->lldd_task)
180 return;
181
182 task->lldd_task = NULL;
183
d3c4dd4e
JG
184 if (!sas_protocol_ata(task->task_proto))
185 if (slot->n_elem)
186 dma_unmap_sg(dev, task->scatter, slot->n_elem,
187 task->data_dir);
188
d3c4dd4e
JG
189 if (sas_dev)
190 atomic64_dec(&sas_dev->running_req);
191 }
27a3f229 192
f557e32c
XT
193 if (slot->buf)
194 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
27a3f229 195
27a3f229 196 list_del_init(&slot->entry);
03ab5334 197 slot->buf = NULL;
27a3f229
JG
198 slot->task = NULL;
199 slot->port = NULL;
200 hisi_sas_slot_index_free(hisi_hba, slot->idx);
d3c4dd4e 201
59ba49f9 202 /* slot memory is fully zeroed when it is reused */
27a3f229
JG
203}
204EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
205
66ee999b
JG
206static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
207 struct hisi_sas_slot *slot)
208{
209 return hisi_hba->hw->prep_smp(hisi_hba, slot);
210}
211
42e7a693
JG
212static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
213 struct hisi_sas_slot *slot, int is_tmf,
214 struct hisi_sas_tmf_task *tmf)
215{
216 return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
217}
218
6f2ff1a1
JG
219static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
220 struct hisi_sas_slot *slot)
221{
222 return hisi_hba->hw->prep_stp(hisi_hba, slot);
223}
224
441c2740
JG
225static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
226 struct hisi_sas_slot *slot,
227 int device_id, int abort_flag, int tag_to_abort)
228{
229 return hisi_hba->hw->prep_abort(hisi_hba, slot,
230 device_id, abort_flag, tag_to_abort);
231}
232
cac9b2a2
JG
233/*
234 * This function will issue an abort TMF regardless of whether the
235 * task is in the sdev or not. Then it will do the task complete
236 * cleanup and callbacks.
237 */
238static void hisi_sas_slot_abort(struct work_struct *work)
239{
240 struct hisi_sas_slot *abort_slot =
241 container_of(work, struct hisi_sas_slot, abort_slot);
242 struct sas_task *task = abort_slot->task;
243 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
244 struct scsi_cmnd *cmnd = task->uldd_task;
245 struct hisi_sas_tmf_task tmf_task;
cac9b2a2 246 struct scsi_lun lun;
11b75249 247 struct device *dev = hisi_hba->dev;
cac9b2a2 248 int tag = abort_slot->idx;
da7b66e7 249 unsigned long flags;
cac9b2a2
JG
250
251 if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
252 dev_err(dev, "cannot abort slot for non-ssp task\n");
253 goto out;
254 }
255
256 int_to_scsilun(cmnd->device->lun, &lun);
257 tmf_task.tmf = TMF_ABORT_TASK;
258 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
259
260 hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
261out:
262 /* Do cleanup for this task */
da7b66e7 263 spin_lock_irqsave(&hisi_hba->lock, flags);
cac9b2a2 264 hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
da7b66e7 265 spin_unlock_irqrestore(&hisi_hba->lock, flags);
cac9b2a2
JG
266 if (task->task_done)
267 task->task_done(task);
cac9b2a2
JG
268}
269
b1a49412
XC
270static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
271 *dq, int is_tmf, struct hisi_sas_tmf_task *tmf,
272 int *pass)
42e7a693 273{
b1a49412 274 struct hisi_hba *hisi_hba = dq->hisi_hba;
42e7a693
JG
275 struct domain_device *device = task->dev;
276 struct hisi_sas_device *sas_dev = device->lldd_dev;
277 struct hisi_sas_port *port;
278 struct hisi_sas_slot *slot;
279 struct hisi_sas_cmd_hdr *cmd_hdr_base;
2e244f0f 280 struct asd_sas_port *sas_port = device->port;
11b75249 281 struct device *dev = hisi_hba->dev;
42e7a693 282 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
54c9dd2d 283 unsigned long flags;
42e7a693 284
2e244f0f 285 if (!sas_port) {
42e7a693
JG
286 struct task_status_struct *ts = &task->task_status;
287
288 ts->resp = SAS_TASK_UNDELIVERED;
289 ts->stat = SAS_PHY_DOWN;
290 /*
291 * libsas will use dev->port, should
292 * not call task_done for sata
293 */
294 if (device->dev_type != SAS_SATA_DEV)
295 task->task_done(task);
ddabca21 296 return SAS_PHY_DOWN;
42e7a693
JG
297 }
298
299 if (DEV_IS_GONE(sas_dev)) {
300 if (sas_dev)
ad604832 301 dev_info(dev, "task prep: device %d not ready\n",
42e7a693
JG
302 sas_dev->device_id);
303 else
304 dev_info(dev, "task prep: device %016llx not ready\n",
305 SAS_ADDR(device->sas_addr));
306
ddabca21 307 return SAS_PHY_DOWN;
42e7a693 308 }
2e244f0f
JG
309
310 port = to_hisi_sas_port(sas_port);
9859f24e 311 if (port && !port->port_attached) {
09fe9ecb 312 dev_info(dev, "task prep: %s port%d not attach device\n",
6073b771 313 (dev_is_sata(device)) ?
09fe9ecb
JG
314 "SATA/STP" : "SAS",
315 device->port->id);
316
317 return SAS_PHY_DOWN;
42e7a693
JG
318 }
319
320 if (!sas_protocol_ata(task->task_proto)) {
321 if (task->num_scatter) {
322 n_elem = dma_map_sg(dev, task->scatter,
323 task->num_scatter, task->data_dir);
324 if (!n_elem) {
325 rc = -ENOMEM;
326 goto prep_out;
327 }
328 }
329 } else
330 n_elem = task->num_scatter;
331
b1a49412 332 spin_lock_irqsave(&hisi_hba->lock, flags);
685b6d6e
JG
333 if (hisi_hba->hw->slot_index_alloc)
334 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
335 device);
336 else
337 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
b1a49412
XC
338 if (rc) {
339 spin_unlock_irqrestore(&hisi_hba->lock, flags);
42e7a693 340 goto err_out;
b1a49412
XC
341 }
342 spin_unlock_irqrestore(&hisi_hba->lock, flags);
343
344 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
42e7a693
JG
345 if (rc)
346 goto err_out_tag;
347
b1a49412
XC
348 dlvry_queue = dq->id;
349 dlvry_queue_slot = dq->wr_point;
42e7a693
JG
350 slot = &hisi_hba->slot_info[slot_idx];
351 memset(slot, 0, sizeof(struct hisi_sas_slot));
352
353 slot->idx = slot_idx;
354 slot->n_elem = n_elem;
355 slot->dlvry_queue = dlvry_queue;
356 slot->dlvry_queue_slot = dlvry_queue_slot;
357 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
358 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
359 slot->task = task;
360 slot->port = port;
361 task->lldd_task = slot;
cac9b2a2 362 INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
42e7a693 363
f557e32c
XT
364 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
365 GFP_ATOMIC, &slot->buf_dma);
366 if (!slot->buf) {
9c9d18e7 367 rc = -ENOMEM;
42e7a693 368 goto err_out_slot_buf;
9c9d18e7 369 }
42e7a693 370 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
f557e32c
XT
371 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
372 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
42e7a693
JG
373
374 switch (task->task_proto) {
66ee999b
JG
375 case SAS_PROTOCOL_SMP:
376 rc = hisi_sas_task_prep_smp(hisi_hba, slot);
377 break;
42e7a693
JG
378 case SAS_PROTOCOL_SSP:
379 rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
380 break;
381 case SAS_PROTOCOL_SATA:
382 case SAS_PROTOCOL_STP:
383 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
6f2ff1a1
JG
384 rc = hisi_sas_task_prep_ata(hisi_hba, slot);
385 break;
42e7a693
JG
386 default:
387 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
388 task->task_proto);
389 rc = -EINVAL;
390 break;
391 }
392
393 if (rc) {
394 dev_err(dev, "task prep: rc = 0x%x\n", rc);
f557e32c 395 goto err_out_buf;
42e7a693
JG
396 }
397
405314df 398 list_add_tail(&slot->entry, &sas_dev->list);
54c9dd2d 399 spin_lock_irqsave(&task->task_state_lock, flags);
42e7a693 400 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
54c9dd2d 401 spin_unlock_irqrestore(&task->task_state_lock, flags);
42e7a693 402
b1a49412 403 dq->slot_prep = slot;
42e7a693 404
f696cc32 405 atomic64_inc(&sas_dev->running_req);
42e7a693
JG
406 ++(*pass);
407
9c9d18e7 408 return 0;
42e7a693 409
f557e32c
XT
410err_out_buf:
411 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
412 slot->buf_dma);
42e7a693
JG
413err_out_slot_buf:
414 /* Nothing to be done */
415err_out_tag:
b1a49412 416 spin_lock_irqsave(&hisi_hba->lock, flags);
42e7a693 417 hisi_sas_slot_index_free(hisi_hba, slot_idx);
b1a49412 418 spin_unlock_irqrestore(&hisi_hba->lock, flags);
42e7a693
JG
419err_out:
420 dev_err(dev, "task prep: failed[%d]!\n", rc);
421 if (!sas_protocol_ata(task->task_proto))
422 if (n_elem)
423 dma_unmap_sg(dev, task->scatter, n_elem,
424 task->data_dir);
425prep_out:
426 return rc;
427}
428
429static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
430 int is_tmf, struct hisi_sas_tmf_task *tmf)
431{
432 u32 rc;
433 u32 pass = 0;
434 unsigned long flags;
435 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
11b75249 436 struct device *dev = hisi_hba->dev;
b1a49412
XC
437 struct domain_device *device = task->dev;
438 struct hisi_sas_device *sas_dev = device->lldd_dev;
439 struct hisi_sas_dq *dq = sas_dev->dq;
42e7a693 440
689ad4fb 441 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
06ec0fb9
XC
442 return -EINVAL;
443
42e7a693 444 /* protect task_prep and start_delivery sequence */
b1a49412
XC
445 spin_lock_irqsave(&dq->lock, flags);
446 rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass);
42e7a693
JG
447 if (rc)
448 dev_err(dev, "task exec: failed[%d]!\n", rc);
449
450 if (likely(pass))
b1a49412
XC
451 hisi_hba->hw->start_delivery(dq);
452 spin_unlock_irqrestore(&dq->lock, flags);
42e7a693
JG
453
454 return rc;
455}
257efd1f 456
66139921
JG
457static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
458{
459 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
460 struct asd_sas_phy *sas_phy = &phy->sas_phy;
461 struct sas_ha_struct *sas_ha;
462
463 if (!phy->phy_attached)
464 return;
465
466 sas_ha = &hisi_hba->sha;
467 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
468
469 if (sas_phy->phy) {
470 struct sas_phy *sphy = sas_phy->phy;
471
472 sphy->negotiated_linkrate = sas_phy->linkrate;
66139921 473 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
2ae75787
XC
474 sphy->maximum_linkrate_hw =
475 hisi_hba->hw->phy_get_max_linkrate();
476 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
477 sphy->minimum_linkrate = phy->minimum_linkrate;
478
479 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
480 sphy->maximum_linkrate = phy->maximum_linkrate;
66139921
JG
481 }
482
483 if (phy->phy_type & PORT_TYPE_SAS) {
484 struct sas_identify_frame *id;
485
486 id = (struct sas_identify_frame *)phy->frame_rcvd;
487 id->dev_type = phy->identify.device_type;
488 id->initiator_bits = SAS_PROTOCOL_ALL;
489 id->target_bits = phy->identify.target_port_protocols;
490 } else if (phy->phy_type & PORT_TYPE_SATA) {
491 /*Nothing*/
492 }
493
494 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
495 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
496}
497
abda97c2
JG
498static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
499{
500 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
501 struct hisi_sas_device *sas_dev = NULL;
502 int i;
503
504 spin_lock(&hisi_hba->lock);
505 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
506 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
b1a49412
XC
507 int queue = i % hisi_hba->queue_count;
508 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
509
abda97c2
JG
510 hisi_hba->devices[i].device_id = i;
511 sas_dev = &hisi_hba->devices[i];
512 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
513 sas_dev->dev_type = device->dev_type;
514 sas_dev->hisi_hba = hisi_hba;
515 sas_dev->sas_device = device;
b1a49412 516 sas_dev->dq = dq;
405314df 517 INIT_LIST_HEAD(&hisi_hba->devices[i].list);
abda97c2
JG
518 break;
519 }
520 }
521 spin_unlock(&hisi_hba->lock);
522
523 return sas_dev;
524}
525
526static int hisi_sas_dev_found(struct domain_device *device)
527{
528 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
529 struct domain_device *parent_dev = device->parent;
530 struct hisi_sas_device *sas_dev;
11b75249 531 struct device *dev = hisi_hba->dev;
abda97c2 532
685b6d6e
JG
533 if (hisi_hba->hw->alloc_dev)
534 sas_dev = hisi_hba->hw->alloc_dev(device);
535 else
536 sas_dev = hisi_sas_alloc_dev(device);
abda97c2
JG
537 if (!sas_dev) {
538 dev_err(dev, "fail alloc dev: max support %d devices\n",
539 HISI_SAS_MAX_DEVICES);
540 return -EINVAL;
541 }
542
543 device->lldd_dev = sas_dev;
544 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
545
546 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
547 int phy_no;
548 u8 phy_num = parent_dev->ex_dev.num_phys;
549 struct ex_phy *phy;
550
551 for (phy_no = 0; phy_no < phy_num; phy_no++) {
552 phy = &parent_dev->ex_dev.ex_phy[phy_no];
553 if (SAS_ADDR(phy->attached_sas_addr) ==
554 SAS_ADDR(device->sas_addr)) {
555 sas_dev->attached_phy = phy_no;
556 break;
557 }
558 }
559
560 if (phy_no == phy_num) {
561 dev_info(dev, "dev found: no attached "
562 "dev:%016llx at ex:%016llx\n",
563 SAS_ADDR(device->sas_addr),
564 SAS_ADDR(parent_dev->sas_addr));
565 return -EINVAL;
566 }
567 }
568
569 return 0;
570}
571
31eec8a6
JG
572static int hisi_sas_slave_configure(struct scsi_device *sdev)
573{
574 struct domain_device *dev = sdev_to_domain_dev(sdev);
575 int ret = sas_slave_configure(sdev);
576
577 if (ret)
578 return ret;
579 if (!dev_is_sata(dev))
580 sas_change_queue_depth(sdev, 64);
581
582 return 0;
583}
584
701f75ec
JG
585static void hisi_sas_scan_start(struct Scsi_Host *shost)
586{
587 struct hisi_hba *hisi_hba = shost_priv(shost);
701f75ec 588
396b8044 589 hisi_hba->hw->phys_init(hisi_hba);
701f75ec
JG
590}
591
592static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
593{
594 struct hisi_hba *hisi_hba = shost_priv(shost);
595 struct sas_ha_struct *sha = &hisi_hba->sha;
596
396b8044
JG
597 /* Wait for PHY up interrupt to occur */
598 if (time < HZ)
701f75ec
JG
599 return 0;
600
601 sas_drain_work(sha);
602 return 1;
603}
604
66139921
JG
605static void hisi_sas_phyup_work(struct work_struct *work)
606{
607 struct hisi_sas_phy *phy =
608 container_of(work, struct hisi_sas_phy, phyup_ws);
609 struct hisi_hba *hisi_hba = phy->hisi_hba;
610 struct asd_sas_phy *sas_phy = &phy->sas_phy;
611 int phy_no = sas_phy->id;
612
613 hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
614 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
615}
976867e6
JG
616
617static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
618{
619 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
620 struct asd_sas_phy *sas_phy = &phy->sas_phy;
621
622 phy->hisi_hba = hisi_hba;
623 phy->port = NULL;
624 init_timer(&phy->timer);
625 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
626 sas_phy->class = SAS;
627 sas_phy->iproto = SAS_PROTOCOL_ALL;
628 sas_phy->tproto = 0;
629 sas_phy->type = PHY_TYPE_PHYSICAL;
630 sas_phy->role = PHY_ROLE_INITIATOR;
631 sas_phy->oob_mode = OOB_NOT_CONNECTED;
632 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
633 sas_phy->id = phy_no;
634 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
635 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
636 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
637 sas_phy->lldd_phy = phy;
66139921
JG
638
639 INIT_WORK(&phy->phyup_ws, hisi_sas_phyup_work);
976867e6
JG
640}
641
184a4635
JG
642static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
643{
644 struct sas_ha_struct *sas_ha = sas_phy->ha;
645 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
646 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
647 struct asd_sas_port *sas_port = sas_phy->port;
2e244f0f 648 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
184a4635
JG
649 unsigned long flags;
650
651 if (!sas_port)
652 return;
653
654 spin_lock_irqsave(&hisi_hba->lock, flags);
655 port->port_attached = 1;
656 port->id = phy->port_id;
657 phy->port = port;
658 sas_port->lldd_port = port;
659 spin_unlock_irqrestore(&hisi_hba->lock, flags);
660}
661
d3c4dd4e 662static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
405314df 663 struct hisi_sas_slot *slot)
184a4635 664{
d3c4dd4e
JG
665 if (task) {
666 unsigned long flags;
667 struct task_status_struct *ts;
184a4635 668
d3c4dd4e 669 ts = &task->task_status;
184a4635 670
d3c4dd4e
JG
671 ts->resp = SAS_TASK_COMPLETE;
672 ts->stat = SAS_ABORTED_TASK;
673 spin_lock_irqsave(&task->task_state_lock, flags);
674 task->task_state_flags &=
675 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
676 task->task_state_flags |= SAS_TASK_STATE_DONE;
677 spin_unlock_irqrestore(&task->task_state_lock, flags);
678 }
184a4635 679
405314df 680 hisi_sas_slot_task_free(hisi_hba, task, slot);
184a4635
JG
681}
682
405314df 683/* hisi_hba.lock should be locked */
184a4635
JG
684static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
685 struct domain_device *device)
686{
405314df
JG
687 struct hisi_sas_slot *slot, *slot2;
688 struct hisi_sas_device *sas_dev = device->lldd_dev;
184a4635 689
405314df
JG
690 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
691 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
184a4635
JG
692}
693
06ec0fb9
XC
694static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
695{
405314df
JG
696 struct hisi_sas_device *sas_dev;
697 struct domain_device *device;
06ec0fb9
XC
698 int i;
699
405314df
JG
700 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
701 sas_dev = &hisi_hba->devices[i];
702 device = sas_dev->sas_device;
06ec0fb9 703
405314df
JG
704 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
705 !device)
06ec0fb9 706 continue;
405314df
JG
707
708 hisi_sas_release_task(hisi_hba, device);
06ec0fb9
XC
709 }
710}
711
d30ff263
XC
712static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
713 struct domain_device *device)
714{
715 if (hisi_hba->hw->dereg_device)
716 hisi_hba->hw->dereg_device(hisi_hba, device);
717}
718
abda97c2
JG
719static void hisi_sas_dev_gone(struct domain_device *device)
720{
721 struct hisi_sas_device *sas_dev = device->lldd_dev;
722 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
11b75249 723 struct device *dev = hisi_hba->dev;
ad604832 724 int dev_id = sas_dev->device_id;
abda97c2 725
ad604832 726 dev_info(dev, "found dev[%d:%x] is gone\n",
abda97c2
JG
727 sas_dev->device_id, sas_dev->dev_type);
728
40f2702b
JG
729 hisi_sas_internal_task_abort(hisi_hba, device,
730 HISI_SAS_INT_ABT_DEV, 0);
731
d30ff263
XC
732 hisi_sas_dereg_device(hisi_hba, device);
733
abda97c2
JG
734 hisi_hba->hw->free_device(hisi_hba, sas_dev);
735 device->lldd_dev = NULL;
736 memset(sas_dev, 0, sizeof(*sas_dev));
737 sas_dev->device_id = dev_id;
738 sas_dev->dev_type = SAS_PHY_UNUSED;
739 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
740}
42e7a693
JG
741
742static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
743{
744 return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
745}
746
e4189d53
JG
747static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
748 void *funcdata)
749{
750 struct sas_ha_struct *sas_ha = sas_phy->ha;
751 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
752 int phy_no = sas_phy->id;
753
754 switch (func) {
755 case PHY_FUNC_HARD_RESET:
756 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
757 break;
758
759 case PHY_FUNC_LINK_RESET:
b4c67a6c
JG
760 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
761 msleep(100);
a0c341d7 762 hisi_hba->hw->phy_start(hisi_hba, phy_no);
e4189d53
JG
763 break;
764
765 case PHY_FUNC_DISABLE:
766 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
767 break;
768
769 case PHY_FUNC_SET_LINK_RATE:
2ae75787
XC
770 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
771 break;
ce999301
XT
772 case PHY_FUNC_GET_EVENTS:
773 if (hisi_hba->hw->get_events) {
774 hisi_hba->hw->get_events(hisi_hba, phy_no);
775 break;
776 }
777 /* fallthru */
e4189d53
JG
778 case PHY_FUNC_RELEASE_SPINUP_HOLD:
779 default:
780 return -EOPNOTSUPP;
781 }
782 return 0;
783}
184a4635 784
0efff300
JG
785static void hisi_sas_task_done(struct sas_task *task)
786{
787 if (!del_timer(&task->slow_task->timer))
788 return;
789 complete(&task->slow_task->completion);
790}
791
792static void hisi_sas_tmf_timedout(unsigned long data)
793{
794 struct sas_task *task = (struct sas_task *)data;
f64a6988
XC
795 unsigned long flags;
796
797 spin_lock_irqsave(&task->task_state_lock, flags);
798 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
799 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
800 spin_unlock_irqrestore(&task->task_state_lock, flags);
0efff300 801
0efff300
JG
802 complete(&task->slow_task->completion);
803}
804
805#define TASK_TIMEOUT 20
806#define TASK_RETRY 3
807static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
808 void *parameter, u32 para_len,
809 struct hisi_sas_tmf_task *tmf)
810{
811 struct hisi_sas_device *sas_dev = device->lldd_dev;
812 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
11b75249 813 struct device *dev = hisi_hba->dev;
0efff300
JG
814 struct sas_task *task;
815 int res, retry;
816
817 for (retry = 0; retry < TASK_RETRY; retry++) {
818 task = sas_alloc_slow_task(GFP_KERNEL);
819 if (!task)
820 return -ENOMEM;
821
822 task->dev = device;
823 task->task_proto = device->tproto;
824
7c594f04
XC
825 if (dev_is_sata(device)) {
826 task->ata_task.device_control_reg_update = 1;
827 memcpy(&task->ata_task.fis, parameter, para_len);
828 } else {
829 memcpy(&task->ssp_task, parameter, para_len);
830 }
0efff300
JG
831 task->task_done = hisi_sas_task_done;
832
833 task->slow_task->timer.data = (unsigned long) task;
834 task->slow_task->timer.function = hisi_sas_tmf_timedout;
835 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
836 add_timer(&task->slow_task->timer);
837
838 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
839
840 if (res) {
841 del_timer(&task->slow_task->timer);
842 dev_err(dev, "abort tmf: executing internal task failed: %d\n",
843 res);
844 goto ex_err;
845 }
846
847 wait_for_completion(&task->slow_task->completion);
848 res = TMF_RESP_FUNC_FAILED;
849 /* Even TMF timed out, return direct. */
850 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
851 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
d3c4dd4e
JG
852 struct hisi_sas_slot *slot = task->lldd_task;
853
7c594f04 854 dev_err(dev, "abort tmf: TMF task timeout\n");
d3c4dd4e
JG
855 if (slot)
856 slot->task = NULL;
857
0efff300
JG
858 goto ex_err;
859 }
860 }
861
862 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1af1b808 863 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
0efff300
JG
864 res = TMF_RESP_FUNC_COMPLETE;
865 break;
866 }
867
4ffde482
JG
868 if (task->task_status.resp == SAS_TASK_COMPLETE &&
869 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
870 res = TMF_RESP_FUNC_SUCC;
871 break;
872 }
873
0efff300
JG
874 if (task->task_status.resp == SAS_TASK_COMPLETE &&
875 task->task_status.stat == SAS_DATA_UNDERRUN) {
876 /* no error, but return the number of bytes of
877 * underrun
878 */
879 dev_warn(dev, "abort tmf: task to dev %016llx "
880 "resp: 0x%x sts 0x%x underrun\n",
881 SAS_ADDR(device->sas_addr),
882 task->task_status.resp,
883 task->task_status.stat);
884 res = task->task_status.residual;
885 break;
886 }
887
888 if (task->task_status.resp == SAS_TASK_COMPLETE &&
889 task->task_status.stat == SAS_DATA_OVERRUN) {
890 dev_warn(dev, "abort tmf: blocked task error\n");
891 res = -EMSGSIZE;
892 break;
893 }
894
895 dev_warn(dev, "abort tmf: task to dev "
896 "%016llx resp: 0x%x status 0x%x\n",
897 SAS_ADDR(device->sas_addr), task->task_status.resp,
898 task->task_status.stat);
899 sas_free_task(task);
900 task = NULL;
901 }
902ex_err:
d2d7e7a0
XC
903 if (retry == TASK_RETRY)
904 dev_warn(dev, "abort tmf: executing internal task failed!\n");
0efff300
JG
905 sas_free_task(task);
906 return res;
907}
908
7c594f04
XC
909static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
910 bool reset, int pmp, u8 *fis)
911{
912 struct ata_taskfile tf;
913
914 ata_tf_init(dev, &tf);
915 if (reset)
916 tf.ctl |= ATA_SRST;
917 else
918 tf.ctl &= ~ATA_SRST;
919 tf.command = ATA_CMD_DEV_RESET;
920 ata_tf_to_fis(&tf, pmp, 0, fis);
921}
922
923static int hisi_sas_softreset_ata_disk(struct domain_device *device)
924{
925 u8 fis[20] = {0};
926 struct ata_port *ap = device->sata_dev.ap;
927 struct ata_link *link;
928 int rc = TMF_RESP_FUNC_FAILED;
929 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
11b75249 930 struct device *dev = hisi_hba->dev;
7c594f04
XC
931 int s = sizeof(struct host_to_dev_fis);
932 unsigned long flags;
933
934 ata_for_each_link(link, ap, EDGE) {
935 int pmp = sata_srst_pmp(link);
936
937 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
938 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
939 if (rc != TMF_RESP_FUNC_COMPLETE)
940 break;
941 }
942
943 if (rc == TMF_RESP_FUNC_COMPLETE) {
944 ata_for_each_link(link, ap, EDGE) {
945 int pmp = sata_srst_pmp(link);
946
947 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
948 rc = hisi_sas_exec_internal_tmf_task(device, fis,
949 s, NULL);
950 if (rc != TMF_RESP_FUNC_COMPLETE)
951 dev_err(dev, "ata disk de-reset failed\n");
952 }
953 } else {
954 dev_err(dev, "ata disk reset failed\n");
955 }
956
957 if (rc == TMF_RESP_FUNC_COMPLETE) {
958 spin_lock_irqsave(&hisi_hba->lock, flags);
959 hisi_sas_release_task(hisi_hba, device);
960 spin_unlock_irqrestore(&hisi_hba->lock, flags);
961 }
962
963 return rc;
964}
965
0efff300
JG
966static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
967 u8 *lun, struct hisi_sas_tmf_task *tmf)
968{
969 struct sas_ssp_task ssp_task;
970
971 if (!(device->tproto & SAS_PROTOCOL_SSP))
972 return TMF_RESP_FUNC_ESUPP;
973
974 memcpy(ssp_task.LUN, lun, 8);
975
976 return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
977 sizeof(ssp_task), tmf);
978}
979
689ad4fb
XT
980static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba,
981 struct asd_sas_port *sas_port, enum sas_linkrate linkrate)
982{
983 struct hisi_sas_device *sas_dev;
984 struct domain_device *device;
985 int i;
986
987 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
988 sas_dev = &hisi_hba->devices[i];
989 device = sas_dev->sas_device;
990 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
991 || !device || (device->port != sas_port))
992 continue;
993
994 hisi_hba->hw->free_device(hisi_hba, sas_dev);
995
996 /* Update linkrate of directly attached device. */
997 if (!device->parent)
998 device->linkrate = linkrate;
999
1000 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1001 }
1002}
1003
1004static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1005 u32 state)
1006{
1007 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1008 struct asd_sas_port *_sas_port = NULL;
1009 int phy_no;
1010
1011 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1012 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1013 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1014 struct asd_sas_port *sas_port = sas_phy->port;
1015 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
1016 bool do_port_check = !!(_sas_port != sas_port);
1017
1018 if (!sas_phy->phy->enabled)
1019 continue;
1020
1021 /* Report PHY state change to libsas */
1022 if (state & (1 << phy_no)) {
1023 if (do_port_check && sas_port) {
1024 struct domain_device *dev = sas_port->port_dev;
1025
1026 _sas_port = sas_port;
1027 port->id = phy->port_id;
1028 hisi_sas_refresh_port_id(hisi_hba,
1029 sas_port, sas_phy->linkrate);
1030
1031 if (DEV_IS_EXPANDER(dev->dev_type))
1032 sas_ha->notify_port_event(sas_phy,
1033 PORTE_BROADCAST_RCVD);
1034 }
1035 } else if (old_state & (1 << phy_no))
1036 /* PHY down but was up before */
1037 hisi_sas_phy_down(hisi_hba, phy_no, 0);
1038
1039 }
1040
1041 drain_workqueue(hisi_hba->shost->work_q);
1042}
1043
06ec0fb9
XC
1044static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1045{
689ad4fb
XT
1046 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1047 struct device *dev = hisi_hba->dev;
1048 struct Scsi_Host *shost = hisi_hba->shost;
1049 u32 old_state, state;
1050 unsigned long flags;
06ec0fb9
XC
1051 int rc;
1052
1053 if (!hisi_hba->hw->soft_reset)
1054 return -1;
1055
689ad4fb
XT
1056 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1057 return -1;
06ec0fb9 1058
689ad4fb
XT
1059 dev_dbg(dev, "controller resetting...\n");
1060 old_state = hisi_hba->hw->get_phys_state(hisi_hba);
06ec0fb9 1061
689ad4fb
XT
1062 scsi_block_requests(shost);
1063 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1064 rc = hisi_hba->hw->soft_reset(hisi_hba);
1065 if (rc) {
1066 dev_warn(dev, "controller reset failed (%d)\n", rc);
1067 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1068 goto out;
1069 }
1070 spin_lock_irqsave(&hisi_hba->lock, flags);
1071 hisi_sas_release_tasks(hisi_hba);
1072 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1073
1074 sas_ha->notify_ha_event(sas_ha, HAE_RESET);
1075 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1076
1077 /* Init and wait for PHYs to come up and all libsas event finished. */
1078 hisi_hba->hw->phys_init(hisi_hba);
1079 msleep(1000);
1080 drain_workqueue(hisi_hba->wq);
1081 drain_workqueue(shost->work_q);
1082
1083 state = hisi_hba->hw->get_phys_state(hisi_hba);
1084 hisi_sas_rescan_topology(hisi_hba, old_state, state);
1085 dev_dbg(dev, "controller reset complete\n");
06ec0fb9
XC
1086
1087out:
689ad4fb 1088 scsi_unblock_requests(shost);
06ec0fb9 1089 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
689ad4fb 1090
06ec0fb9
XC
1091 return rc;
1092}
1093
0efff300
JG
1094static int hisi_sas_abort_task(struct sas_task *task)
1095{
1096 struct scsi_lun lun;
1097 struct hisi_sas_tmf_task tmf_task;
1098 struct domain_device *device = task->dev;
1099 struct hisi_sas_device *sas_dev = device->lldd_dev;
1100 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
11b75249 1101 struct device *dev = hisi_hba->dev;
0efff300
JG
1102 int rc = TMF_RESP_FUNC_FAILED;
1103 unsigned long flags;
1104
1105 if (!sas_dev) {
1106 dev_warn(dev, "Device has been removed\n");
1107 return TMF_RESP_FUNC_FAILED;
1108 }
1109
0efff300 1110 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
0efff300
JG
1111 rc = TMF_RESP_FUNC_COMPLETE;
1112 goto out;
1113 }
1114
0efff300
JG
1115 sas_dev->dev_status = HISI_SAS_DEV_EH;
1116 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1117 struct scsi_cmnd *cmnd = task->uldd_task;
1118 struct hisi_sas_slot *slot = task->lldd_task;
1119 u32 tag = slot->idx;
c35279f2 1120 int rc2;
0efff300
JG
1121
1122 int_to_scsilun(cmnd->device->lun, &lun);
1123 tmf_task.tmf = TMF_ABORT_TASK;
1124 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1125
1126 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1127 &tmf_task);
1128
c35279f2
JG
1129 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1130 HISI_SAS_INT_ABT_CMD, tag);
1131 /*
1132 * If the TMF finds that the IO is not in the device and also
1133 * the internal abort does not succeed, then it is safe to
1134 * free the slot.
1135 * Note: if the internal abort succeeds then the slot
1136 * will have already been completed
1137 */
1138 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
0efff300 1139 if (task->lldd_task) {
0efff300 1140 spin_lock_irqsave(&hisi_hba->lock, flags);
c35279f2 1141 hisi_sas_do_release_task(hisi_hba, task, slot);
0efff300
JG
1142 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1143 }
1144 }
0efff300
JG
1145 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1146 task->task_proto & SAS_PROTOCOL_STP) {
1147 if (task->dev->dev_type == SAS_SATA_DEV) {
dc8a49ca
JG
1148 hisi_sas_internal_task_abort(hisi_hba, device,
1149 HISI_SAS_INT_ABT_DEV, 0);
d30ff263 1150 hisi_sas_dereg_device(hisi_hba, device);
7c594f04 1151 rc = hisi_sas_softreset_ata_disk(device);
0efff300 1152 }
eb045e04 1153 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
dc8a49ca
JG
1154 /* SMP */
1155 struct hisi_sas_slot *slot = task->lldd_task;
1156 u32 tag = slot->idx;
0efff300 1157
ccbfe5a0
XC
1158 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1159 HISI_SAS_INT_ABT_CMD, tag);
289a1f19 1160 if (rc == TMF_RESP_FUNC_FAILED && task->lldd_task) {
ccbfe5a0
XC
1161 spin_lock_irqsave(&hisi_hba->lock, flags);
1162 hisi_sas_do_release_task(hisi_hba, task, slot);
1163 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1164 }
0efff300
JG
1165 }
1166
1167out:
1168 if (rc != TMF_RESP_FUNC_COMPLETE)
1169 dev_notice(dev, "abort task: rc=%d\n", rc);
1170 return rc;
1171}
1172
1173static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1174{
1175 struct hisi_sas_tmf_task tmf_task;
1176 int rc = TMF_RESP_FUNC_FAILED;
1177
1178 tmf_task.tmf = TMF_ABORT_TASK_SET;
1179 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1180
1181 return rc;
1182}
1183
1184static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1185{
1186 int rc = TMF_RESP_FUNC_FAILED;
1187 struct hisi_sas_tmf_task tmf_task;
1188
1189 tmf_task.tmf = TMF_CLEAR_ACA;
1190 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1191
1192 return rc;
1193}
1194
1195static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1196{
1197 struct sas_phy *phy = sas_get_local_phy(device);
1198 int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1199 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1200 rc = sas_phy_reset(phy, reset_type);
1201 sas_put_local_phy(phy);
1202 msleep(2000);
1203 return rc;
1204}
1205
1206static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1207{
1208 struct hisi_sas_device *sas_dev = device->lldd_dev;
1209 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1210 unsigned long flags;
1211 int rc = TMF_RESP_FUNC_FAILED;
1212
1213 if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1214 return TMF_RESP_FUNC_FAILED;
1215 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1216
d30ff263
XC
1217 hisi_sas_internal_task_abort(hisi_hba, device,
1218 HISI_SAS_INT_ABT_DEV, 0);
1219 hisi_sas_dereg_device(hisi_hba, device);
1220
0efff300
JG
1221 rc = hisi_sas_debug_I_T_nexus_reset(device);
1222
6131243a
XC
1223 if (rc == TMF_RESP_FUNC_COMPLETE) {
1224 spin_lock_irqsave(&hisi_hba->lock, flags);
1225 hisi_sas_release_task(hisi_hba, device);
1226 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1227 }
1228 return rc;
0efff300
JG
1229}
1230
1231static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1232{
0efff300
JG
1233 struct hisi_sas_device *sas_dev = device->lldd_dev;
1234 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
11b75249 1235 struct device *dev = hisi_hba->dev;
0efff300
JG
1236 unsigned long flags;
1237 int rc = TMF_RESP_FUNC_FAILED;
1238
0efff300 1239 sas_dev->dev_status = HISI_SAS_DEV_EH;
055945df
JG
1240 if (dev_is_sata(device)) {
1241 struct sas_phy *phy;
1242
1243 /* Clear internal IO and then hardreset */
1244 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1245 HISI_SAS_INT_ABT_DEV, 0);
1246 if (rc == TMF_RESP_FUNC_FAILED)
1247 goto out;
d30ff263 1248 hisi_sas_dereg_device(hisi_hba, device);
0efff300 1249
055945df
JG
1250 phy = sas_get_local_phy(device);
1251
1252 rc = sas_phy_reset(phy, 1);
1253
1254 if (rc == 0) {
1255 spin_lock_irqsave(&hisi_hba->lock, flags);
1256 hisi_sas_release_task(hisi_hba, device);
1257 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1258 }
1259 sas_put_local_phy(phy);
1260 } else {
1261 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
1262
1263 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1264 if (rc == TMF_RESP_FUNC_COMPLETE) {
1265 spin_lock_irqsave(&hisi_hba->lock, flags);
1266 hisi_sas_release_task(hisi_hba, device);
1267 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1268 }
1269 }
1270out:
14d3f397 1271 if (rc != TMF_RESP_FUNC_COMPLETE)
ad604832 1272 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
14d3f397 1273 sas_dev->device_id, rc);
0efff300
JG
1274 return rc;
1275}
1276
8b05ad6a
JG
1277static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1278{
1279 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1280
1281 return hisi_sas_controller_reset(hisi_hba);
1282}
1283
0efff300
JG
1284static int hisi_sas_query_task(struct sas_task *task)
1285{
1286 struct scsi_lun lun;
1287 struct hisi_sas_tmf_task tmf_task;
1288 int rc = TMF_RESP_FUNC_FAILED;
1289
1290 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1291 struct scsi_cmnd *cmnd = task->uldd_task;
1292 struct domain_device *device = task->dev;
1293 struct hisi_sas_slot *slot = task->lldd_task;
1294 u32 tag = slot->idx;
1295
1296 int_to_scsilun(cmnd->device->lun, &lun);
1297 tmf_task.tmf = TMF_QUERY_TASK;
1298 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1299
1300 rc = hisi_sas_debug_issue_ssp_tmf(device,
1301 lun.scsi_lun,
1302 &tmf_task);
1303 switch (rc) {
1304 /* The task is still in Lun, release it then */
1305 case TMF_RESP_FUNC_SUCC:
1306 /* The task is not in Lun or failed, reset the phy */
1307 case TMF_RESP_FUNC_FAILED:
1308 case TMF_RESP_FUNC_COMPLETE:
1309 break;
997ee43c
XC
1310 default:
1311 rc = TMF_RESP_FUNC_FAILED;
1312 break;
0efff300
JG
1313 }
1314 }
1315 return rc;
1316}
1317
441c2740 1318static int
ad604832 1319hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
441c2740
JG
1320 struct sas_task *task, int abort_flag,
1321 int task_tag)
1322{
1323 struct domain_device *device = task->dev;
1324 struct hisi_sas_device *sas_dev = device->lldd_dev;
11b75249 1325 struct device *dev = hisi_hba->dev;
441c2740
JG
1326 struct hisi_sas_port *port;
1327 struct hisi_sas_slot *slot;
2e244f0f 1328 struct asd_sas_port *sas_port = device->port;
441c2740 1329 struct hisi_sas_cmd_hdr *cmd_hdr_base;
b1a49412 1330 struct hisi_sas_dq *dq = sas_dev->dq;
441c2740 1331 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
b1a49412 1332 unsigned long flags, flags_dq;
441c2740 1333
689ad4fb 1334 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
06ec0fb9
XC
1335 return -EINVAL;
1336
441c2740
JG
1337 if (!device->port)
1338 return -1;
1339
2e244f0f 1340 port = to_hisi_sas_port(sas_port);
441c2740
JG
1341
1342 /* simply get a slot and send abort command */
b1a49412 1343 spin_lock_irqsave(&hisi_hba->lock, flags);
441c2740 1344 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
b1a49412
XC
1345 if (rc) {
1346 spin_unlock_irqrestore(&hisi_hba->lock, flags);
441c2740 1347 goto err_out;
b1a49412
XC
1348 }
1349 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1350
1351 spin_lock_irqsave(&dq->lock, flags_dq);
1352 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
441c2740
JG
1353 if (rc)
1354 goto err_out_tag;
1355
b1a49412
XC
1356 dlvry_queue = dq->id;
1357 dlvry_queue_slot = dq->wr_point;
1358
441c2740
JG
1359 slot = &hisi_hba->slot_info[slot_idx];
1360 memset(slot, 0, sizeof(struct hisi_sas_slot));
1361
1362 slot->idx = slot_idx;
1363 slot->n_elem = n_elem;
1364 slot->dlvry_queue = dlvry_queue;
1365 slot->dlvry_queue_slot = dlvry_queue_slot;
1366 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1367 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1368 slot->task = task;
1369 slot->port = port;
1370 task->lldd_task = slot;
1371
1372 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1373
1374 rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1375 abort_flag, task_tag);
1376 if (rc)
1377 goto err_out_tag;
1378
405314df
JG
1379
1380 list_add_tail(&slot->entry, &sas_dev->list);
54c9dd2d 1381 spin_lock_irqsave(&task->task_state_lock, flags);
441c2740 1382 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
54c9dd2d 1383 spin_unlock_irqrestore(&task->task_state_lock, flags);
441c2740 1384
b1a49412 1385 dq->slot_prep = slot;
441c2740 1386
f696cc32
JG
1387 atomic64_inc(&sas_dev->running_req);
1388
b1a49412
XC
1389 /* send abort command to the chip */
1390 hisi_hba->hw->start_delivery(dq);
1391 spin_unlock_irqrestore(&dq->lock, flags_dq);
441c2740
JG
1392
1393 return 0;
1394
1395err_out_tag:
b1a49412 1396 spin_lock_irqsave(&hisi_hba->lock, flags);
441c2740 1397 hisi_sas_slot_index_free(hisi_hba, slot_idx);
b1a49412
XC
1398 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1399 spin_unlock_irqrestore(&dq->lock, flags_dq);
441c2740
JG
1400err_out:
1401 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1402
1403 return rc;
1404}
1405
1406/**
1407 * hisi_sas_internal_task_abort -- execute an internal
1408 * abort command for single IO command or a device
1409 * @hisi_hba: host controller struct
1410 * @device: domain device
1411 * @abort_flag: mode of operation, device or single IO
1412 * @tag: tag of IO to be aborted (only relevant to single
1413 * IO mode)
1414 */
1415static int
1416hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1417 struct domain_device *device,
1418 int abort_flag, int tag)
1419{
1420 struct sas_task *task;
1421 struct hisi_sas_device *sas_dev = device->lldd_dev;
11b75249 1422 struct device *dev = hisi_hba->dev;
441c2740 1423 int res;
441c2740
JG
1424
1425 if (!hisi_hba->hw->prep_abort)
1426 return -EOPNOTSUPP;
1427
1428 task = sas_alloc_slow_task(GFP_KERNEL);
1429 if (!task)
1430 return -ENOMEM;
1431
1432 task->dev = device;
1433 task->task_proto = device->tproto;
1434 task->task_done = hisi_sas_task_done;
1435 task->slow_task->timer.data = (unsigned long)task;
1436 task->slow_task->timer.function = hisi_sas_tmf_timedout;
0844a3ff 1437 task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110);
441c2740
JG
1438 add_timer(&task->slow_task->timer);
1439
441c2740
JG
1440 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1441 task, abort_flag, tag);
441c2740
JG
1442 if (res) {
1443 del_timer(&task->slow_task->timer);
1444 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1445 res);
1446 goto exit;
1447 }
1448 wait_for_completion(&task->slow_task->completion);
1449 res = TMF_RESP_FUNC_FAILED;
1450
f64a6988
XC
1451 /* Internal abort timed out */
1452 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1453 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1454 struct hisi_sas_slot *slot = task->lldd_task;
1455
1456 if (slot)
1457 slot->task = NULL;
1458 dev_err(dev, "internal task abort: timeout.\n");
5b9a3e85 1459 goto exit;
f64a6988
XC
1460 }
1461 }
1462
441c2740
JG
1463 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1464 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1465 res = TMF_RESP_FUNC_COMPLETE;
1466 goto exit;
1467 }
1468
c35279f2
JG
1469 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1470 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1471 res = TMF_RESP_FUNC_SUCC;
1472 goto exit;
1473 }
1474
441c2740 1475exit:
297d7302 1476 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
441c2740
JG
1477 "resp: 0x%x sts 0x%x\n",
1478 SAS_ADDR(device->sas_addr),
1479 task,
1480 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1481 task->task_status.stat);
1482 sas_free_task(task);
1483
1484 return res;
1485}
1486
184a4635
JG
1487static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1488{
1489 hisi_sas_port_notify_formed(sas_phy);
1490}
1491
b96b97af
XT
1492static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
1493 u8 reg_index, u8 reg_count, u8 *write_data)
1494{
1495 struct hisi_hba *hisi_hba = sha->lldd_ha;
1496
1497 if (!hisi_hba->hw->write_gpio)
1498 return -EOPNOTSUPP;
1499
1500 return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
1501 reg_index, reg_count, write_data);
1502}
1503
184a4635
JG
1504static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1505{
1506 phy->phy_attached = 0;
1507 phy->phy_type = 0;
1508 phy->port = NULL;
1509}
1510
1511void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1512{
1513 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1514 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1515 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1516
1517 if (rdy) {
1518 /* Phy down but ready */
1519 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1520 hisi_sas_port_notify_formed(sas_phy);
1521 } else {
1522 struct hisi_sas_port *port = phy->port;
1523
1524 /* Phy down and not ready */
1525 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1526 sas_phy_disconnected(sas_phy);
1527
1528 if (port) {
1529 if (phy->phy_type & PORT_TYPE_SAS) {
1530 int port_id = port->id;
1531
1532 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1533 port_id))
1534 port->port_attached = 0;
1535 } else if (phy->phy_type & PORT_TYPE_SATA)
1536 port->port_attached = 0;
1537 }
1538 hisi_sas_phy_disconnected(phy);
1539 }
1540}
1541EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1542
d3b286e6
XT
1543void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
1544{
1545 int i;
1546
1547 for (i = 0; i < hisi_hba->queue_count; i++) {
1548 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1549
1550 tasklet_kill(&cq->tasklet);
1551 }
1552}
1553EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
06ec0fb9 1554
e21fe3a5
JG
1555struct scsi_transport_template *hisi_sas_stt;
1556EXPORT_SYMBOL_GPL(hisi_sas_stt);
e8899fad 1557
e21fe3a5 1558static struct scsi_host_template _hisi_sas_sht = {
7eb7869f
JG
1559 .module = THIS_MODULE,
1560 .name = DRV_NAME,
1561 .queuecommand = sas_queuecommand,
1562 .target_alloc = sas_target_alloc,
31eec8a6 1563 .slave_configure = hisi_sas_slave_configure,
701f75ec
JG
1564 .scan_finished = hisi_sas_scan_finished,
1565 .scan_start = hisi_sas_scan_start,
7eb7869f
JG
1566 .change_queue_depth = sas_change_queue_depth,
1567 .bios_param = sas_bios_param,
1568 .can_queue = 1,
1569 .this_id = -1,
1570 .sg_tablesize = SG_ALL,
1571 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
1572 .use_clustering = ENABLE_CLUSTERING,
1573 .eh_device_reset_handler = sas_eh_device_reset_handler,
1574 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
1575 .target_destroy = sas_target_destroy,
1576 .ioctl = sas_ioctl,
1577};
e21fe3a5
JG
1578struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
1579EXPORT_SYMBOL_GPL(hisi_sas_sht);
7eb7869f 1580
e8899fad 1581static struct sas_domain_function_template hisi_sas_transport_ops = {
abda97c2
JG
1582 .lldd_dev_found = hisi_sas_dev_found,
1583 .lldd_dev_gone = hisi_sas_dev_gone,
42e7a693 1584 .lldd_execute_task = hisi_sas_queue_command,
e4189d53 1585 .lldd_control_phy = hisi_sas_control_phy,
0efff300
JG
1586 .lldd_abort_task = hisi_sas_abort_task,
1587 .lldd_abort_task_set = hisi_sas_abort_task_set,
1588 .lldd_clear_aca = hisi_sas_clear_aca,
1589 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
1590 .lldd_lu_reset = hisi_sas_lu_reset,
1591 .lldd_query_task = hisi_sas_query_task,
8b05ad6a 1592 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
184a4635 1593 .lldd_port_formed = hisi_sas_port_formed,
b96b97af 1594 .lldd_write_gpio = hisi_sas_write_gpio,
e8899fad
JG
1595};
1596
06ec0fb9
XC
1597void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
1598{
1599 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1600
1601 for (i = 0; i < hisi_hba->queue_count; i++) {
1602 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1603 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1604
1605 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1606 memset(hisi_hba->cmd_hdr[i], 0, s);
1607 dq->wr_point = 0;
1608
1609 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1610 memset(hisi_hba->complete_hdr[i], 0, s);
1611 cq->rd_point = 0;
1612 }
1613
1614 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
1615 memset(hisi_hba->initial_fis, 0, s);
1616
1617 s = max_command_entries * sizeof(struct hisi_sas_iost);
1618 memset(hisi_hba->iost, 0, s);
1619
1620 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1621 memset(hisi_hba->breakpoint, 0, s);
1622
1623 s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
1624 memset(hisi_hba->sata_breakpoint, 0, s);
1625}
1626EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
1627
e21fe3a5 1628int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
6be6de18 1629{
11b75249 1630 struct device *dev = hisi_hba->dev;
a8d547bd 1631 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
6be6de18 1632
fa42d80d 1633 spin_lock_init(&hisi_hba->lock);
976867e6
JG
1634 for (i = 0; i < hisi_hba->n_phy; i++) {
1635 hisi_sas_phy_init(hisi_hba, i);
1636 hisi_hba->port[i].port_attached = 0;
1637 hisi_hba->port[i].id = -1;
976867e6
JG
1638 }
1639
af740dbe
JG
1640 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1641 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
1642 hisi_hba->devices[i].device_id = i;
1643 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
1644 }
1645
6be6de18 1646 for (i = 0; i < hisi_hba->queue_count; i++) {
9101a079 1647 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
4fde02ad 1648 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
9101a079
JG
1649
1650 /* Completion queue structure */
1651 cq->id = i;
1652 cq->hisi_hba = hisi_hba;
1653
4fde02ad
JG
1654 /* Delivery queue structure */
1655 dq->id = i;
1656 dq->hisi_hba = hisi_hba;
1657
6be6de18
JG
1658 /* Delivery queue */
1659 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1660 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1661 &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1662 if (!hisi_hba->cmd_hdr[i])
1663 goto err_out;
6be6de18
JG
1664
1665 /* Completion queue */
1666 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1667 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1668 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1669 if (!hisi_hba->complete_hdr[i])
1670 goto err_out;
6be6de18
JG
1671 }
1672
f557e32c
XT
1673 s = sizeof(struct hisi_sas_slot_buf_table);
1674 hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
1675 if (!hisi_hba->buffer_pool)
6be6de18
JG
1676 goto err_out;
1677
1678 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1679 hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma,
1680 GFP_KERNEL);
1681 if (!hisi_hba->itct)
1682 goto err_out;
1683
1684 memset(hisi_hba->itct, 0, s);
1685
a8d547bd 1686 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
6be6de18
JG
1687 sizeof(struct hisi_sas_slot),
1688 GFP_KERNEL);
1689 if (!hisi_hba->slot_info)
1690 goto err_out;
1691
a8d547bd 1692 s = max_command_entries * sizeof(struct hisi_sas_iost);
6be6de18
JG
1693 hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1694 GFP_KERNEL);
1695 if (!hisi_hba->iost)
1696 goto err_out;
1697
a8d547bd 1698 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
6be6de18
JG
1699 hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1700 &hisi_hba->breakpoint_dma, GFP_KERNEL);
1701 if (!hisi_hba->breakpoint)
1702 goto err_out;
1703
a8d547bd 1704 hisi_hba->slot_index_count = max_command_entries;
433f5696 1705 s = hisi_hba->slot_index_count / BITS_PER_BYTE;
257efd1f
JG
1706 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1707 if (!hisi_hba->slot_index_tags)
1708 goto err_out;
1709
6be6de18
JG
1710 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1711 hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1712 &hisi_hba->initial_fis_dma, GFP_KERNEL);
1713 if (!hisi_hba->initial_fis)
1714 goto err_out;
6be6de18 1715
a8d547bd 1716 s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
6be6de18
JG
1717 hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1718 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1719 if (!hisi_hba->sata_breakpoint)
1720 goto err_out;
06ec0fb9 1721 hisi_sas_init_mem(hisi_hba);
6be6de18 1722
257efd1f
JG
1723 hisi_sas_slot_index_init(hisi_hba);
1724
7e9080e1
JG
1725 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1726 if (!hisi_hba->wq) {
1727 dev_err(dev, "sas_alloc: failed to create workqueue\n");
1728 goto err_out;
1729 }
1730
6be6de18
JG
1731 return 0;
1732err_out:
1733 return -ENOMEM;
1734}
e21fe3a5 1735EXPORT_SYMBOL_GPL(hisi_sas_alloc);
6be6de18 1736
e21fe3a5 1737void hisi_sas_free(struct hisi_hba *hisi_hba)
89d53322 1738{
11b75249 1739 struct device *dev = hisi_hba->dev;
a8d547bd 1740 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
89d53322
JG
1741
1742 for (i = 0; i < hisi_hba->queue_count; i++) {
1743 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1744 if (hisi_hba->cmd_hdr[i])
1745 dma_free_coherent(dev, s,
1746 hisi_hba->cmd_hdr[i],
1747 hisi_hba->cmd_hdr_dma[i]);
1748
1749 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1750 if (hisi_hba->complete_hdr[i])
1751 dma_free_coherent(dev, s,
1752 hisi_hba->complete_hdr[i],
1753 hisi_hba->complete_hdr_dma[i]);
1754 }
1755
f557e32c 1756 dma_pool_destroy(hisi_hba->buffer_pool);
89d53322
JG
1757
1758 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1759 if (hisi_hba->itct)
1760 dma_free_coherent(dev, s,
1761 hisi_hba->itct, hisi_hba->itct_dma);
1762
a8d547bd 1763 s = max_command_entries * sizeof(struct hisi_sas_iost);
89d53322
JG
1764 if (hisi_hba->iost)
1765 dma_free_coherent(dev, s,
1766 hisi_hba->iost, hisi_hba->iost_dma);
1767
a8d547bd 1768 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
89d53322
JG
1769 if (hisi_hba->breakpoint)
1770 dma_free_coherent(dev, s,
1771 hisi_hba->breakpoint,
1772 hisi_hba->breakpoint_dma);
1773
1774
1775 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1776 if (hisi_hba->initial_fis)
1777 dma_free_coherent(dev, s,
1778 hisi_hba->initial_fis,
1779 hisi_hba->initial_fis_dma);
1780
a8d547bd 1781 s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
89d53322
JG
1782 if (hisi_hba->sata_breakpoint)
1783 dma_free_coherent(dev, s,
1784 hisi_hba->sata_breakpoint,
1785 hisi_hba->sata_breakpoint_dma);
1786
7e9080e1
JG
1787 if (hisi_hba->wq)
1788 destroy_workqueue(hisi_hba->wq);
89d53322 1789}
e21fe3a5 1790EXPORT_SYMBOL_GPL(hisi_sas_free);
6be6de18 1791
06ec0fb9
XC
1792static void hisi_sas_rst_work_handler(struct work_struct *work)
1793{
1794 struct hisi_hba *hisi_hba =
1795 container_of(work, struct hisi_hba, rst_work);
1796
1797 hisi_sas_controller_reset(hisi_hba);
1798}
1799
0fa24c19 1800int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
7eb7869f 1801{
0fa24c19
JG
1802 struct device *dev = hisi_hba->dev;
1803 struct platform_device *pdev = hisi_hba->platform_dev;
1804 struct device_node *np = pdev ? pdev->dev.of_node : NULL;
3bc45af8 1805 struct clk *refclk;
7eb7869f 1806
4d558c77 1807 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
0fa24c19
JG
1808 SAS_ADDR_SIZE)) {
1809 dev_err(dev, "could not get property sas-addr\n");
1810 return -ENOENT;
1811 }
e26b2f40 1812
4d558c77 1813 if (np) {
0fa24c19
JG
1814 /*
1815 * These properties are only required for platform device-based
1816 * controller with DT firmware.
1817 */
4d558c77
JG
1818 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
1819 "hisilicon,sas-syscon");
0fa24c19
JG
1820 if (IS_ERR(hisi_hba->ctrl)) {
1821 dev_err(dev, "could not get syscon\n");
1822 return -ENOENT;
1823 }
e26b2f40 1824
4d558c77 1825 if (device_property_read_u32(dev, "ctrl-reset-reg",
0fa24c19
JG
1826 &hisi_hba->ctrl_reset_reg)) {
1827 dev_err(dev,
1828 "could not get property ctrl-reset-reg\n");
1829 return -ENOENT;
1830 }
e26b2f40 1831
4d558c77 1832 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
0fa24c19
JG
1833 &hisi_hba->ctrl_reset_sts_reg)) {
1834 dev_err(dev,
1835 "could not get property ctrl-reset-sts-reg\n");
1836 return -ENOENT;
1837 }
e26b2f40 1838
4d558c77 1839 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
0fa24c19
JG
1840 &hisi_hba->ctrl_clock_ena_reg)) {
1841 dev_err(dev,
1842 "could not get property ctrl-clock-ena-reg\n");
1843 return -ENOENT;
1844 }
4d558c77
JG
1845 }
1846
0fa24c19 1847 refclk = devm_clk_get(dev, NULL);
3bc45af8 1848 if (IS_ERR(refclk))
87e287c1 1849 dev_dbg(dev, "no ref clk property\n");
3bc45af8
JG
1850 else
1851 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
1852
0fa24c19
JG
1853 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
1854 dev_err(dev, "could not get property phy-count\n");
1855 return -ENOENT;
1856 }
e26b2f40 1857
4d558c77 1858 if (device_property_read_u32(dev, "queue-count",
0fa24c19
JG
1859 &hisi_hba->queue_count)) {
1860 dev_err(dev, "could not get property queue-count\n");
1861 return -ENOENT;
1862 }
1863
1864 return 0;
1865}
1866EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
1867
1868static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
1869 const struct hisi_sas_hw *hw)
1870{
1871 struct resource *res;
1872 struct Scsi_Host *shost;
1873 struct hisi_hba *hisi_hba;
1874 struct device *dev = &pdev->dev;
1875
e21fe3a5 1876 shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
0fa24c19
JG
1877 if (!shost) {
1878 dev_err(dev, "scsi host alloc failed\n");
1879 return NULL;
1880 }
1881 hisi_hba = shost_priv(shost);
1882
1883 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
1884 hisi_hba->hw = hw;
1885 hisi_hba->dev = dev;
1886 hisi_hba->platform_dev = pdev;
1887 hisi_hba->shost = shost;
1888 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
1889
1890 init_timer(&hisi_hba->timer);
1891
1892 if (hisi_sas_get_fw_info(hisi_hba) < 0)
e26b2f40
JG
1893 goto err_out;
1894
a6f2c7ff
JG
1895 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
1896 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
1897 dev_err(dev, "No usable DMA addressing method\n");
1898 goto err_out;
1899 }
1900
e26b2f40
JG
1901 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1902 hisi_hba->regs = devm_ioremap_resource(dev, res);
1903 if (IS_ERR(hisi_hba->regs))
1904 goto err_out;
1905
b96b97af
XT
1906 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1907 if (res) {
1908 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
1909 if (IS_ERR(hisi_hba->sgpio_regs))
1910 goto err_out;
1911 }
1912
89d53322
JG
1913 if (hisi_sas_alloc(hisi_hba, shost)) {
1914 hisi_sas_free(hisi_hba);
6be6de18 1915 goto err_out;
89d53322 1916 }
6be6de18 1917
7eb7869f
JG
1918 return shost;
1919err_out:
d37a0082 1920 kfree(shost);
7eb7869f
JG
1921 dev_err(dev, "shost alloc failed\n");
1922 return NULL;
1923}
1924
e21fe3a5 1925void hisi_sas_init_add(struct hisi_hba *hisi_hba)
5d74242e
JG
1926{
1927 int i;
1928
1929 for (i = 0; i < hisi_hba->n_phy; i++)
1930 memcpy(&hisi_hba->phy[i].dev_sas_addr,
1931 hisi_hba->sas_addr,
1932 SAS_ADDR_SIZE);
1933}
e21fe3a5 1934EXPORT_SYMBOL_GPL(hisi_sas_init_add);
5d74242e 1935
7eb7869f
JG
1936int hisi_sas_probe(struct platform_device *pdev,
1937 const struct hisi_sas_hw *hw)
1938{
1939 struct Scsi_Host *shost;
1940 struct hisi_hba *hisi_hba;
1941 struct device *dev = &pdev->dev;
1942 struct asd_sas_phy **arr_phy;
1943 struct asd_sas_port **arr_port;
1944 struct sas_ha_struct *sha;
1945 int rc, phy_nr, port_nr, i;
1946
1947 shost = hisi_sas_shost_alloc(pdev, hw);
d37a0082
XT
1948 if (!shost)
1949 return -ENOMEM;
7eb7869f
JG
1950
1951 sha = SHOST_TO_SAS_HA(shost);
1952 hisi_hba = shost_priv(shost);
1953 platform_set_drvdata(pdev, sha);
50cb916f 1954
7eb7869f
JG
1955 phy_nr = port_nr = hisi_hba->n_phy;
1956
1957 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
1958 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
d37a0082
XT
1959 if (!arr_phy || !arr_port) {
1960 rc = -ENOMEM;
1961 goto err_out_ha;
1962 }
7eb7869f
JG
1963
1964 sha->sas_phy = arr_phy;
1965 sha->sas_port = arr_port;
7eb7869f
JG
1966 sha->lldd_ha = hisi_hba;
1967
1968 shost->transportt = hisi_sas_stt;
1969 shost->max_id = HISI_SAS_MAX_DEVICES;
1970 shost->max_lun = ~0;
1971 shost->max_channel = 1;
1972 shost->max_cmd_len = 16;
1973 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
a8d547bd
JG
1974 shost->can_queue = hisi_hba->hw->max_command_entries;
1975 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
7eb7869f
JG
1976
1977 sha->sas_ha_name = DRV_NAME;
11b75249 1978 sha->dev = hisi_hba->dev;
7eb7869f
JG
1979 sha->lldd_module = THIS_MODULE;
1980 sha->sas_addr = &hisi_hba->sas_addr[0];
1981 sha->num_phys = hisi_hba->n_phy;
1982 sha->core.shost = hisi_hba->shost;
1983
1984 for (i = 0; i < hisi_hba->n_phy; i++) {
1985 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
1986 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
1987 }
1988
5d74242e
JG
1989 hisi_sas_init_add(hisi_hba);
1990
7eb7869f
JG
1991 rc = scsi_add_host(shost, &pdev->dev);
1992 if (rc)
1993 goto err_out_ha;
1994
1995 rc = sas_register_ha(sha);
1996 if (rc)
1997 goto err_out_register_ha;
1998
0757f041
XC
1999 rc = hisi_hba->hw->hw_init(hisi_hba);
2000 if (rc)
2001 goto err_out_register_ha;
2002
7eb7869f
JG
2003 scsi_scan_host(shost);
2004
2005 return 0;
2006
2007err_out_register_ha:
2008 scsi_remove_host(shost);
2009err_out_ha:
d37a0082 2010 hisi_sas_free(hisi_hba);
7eb7869f
JG
2011 kfree(shost);
2012 return rc;
2013}
2014EXPORT_SYMBOL_GPL(hisi_sas_probe);
2015
89d53322
JG
2016int hisi_sas_remove(struct platform_device *pdev)
2017{
2018 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2019 struct hisi_hba *hisi_hba = sha->lldd_ha;
d37a0082 2020 struct Scsi_Host *shost = sha->core.shost;
89d53322 2021
89d53322
JG
2022 sas_unregister_ha(sha);
2023 sas_remove_host(sha->core.shost);
2024
2025 hisi_sas_free(hisi_hba);
d37a0082 2026 kfree(shost);
89d53322
JG
2027 return 0;
2028}
2029EXPORT_SYMBOL_GPL(hisi_sas_remove);
2030
e8899fad
JG
2031static __init int hisi_sas_init(void)
2032{
2033 pr_info("hisi_sas: driver version %s\n", DRV_VERSION);
2034
2035 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2036 if (!hisi_sas_stt)
2037 return -ENOMEM;
2038
2039 return 0;
2040}
2041
2042static __exit void hisi_sas_exit(void)
2043{
2044 sas_release_transport(hisi_sas_stt);
2045}
2046
2047module_init(hisi_sas_init);
2048module_exit(hisi_sas_exit);
2049
2050MODULE_VERSION(DRV_VERSION);
2051MODULE_LICENSE("GPL");
2052MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2053MODULE_DESCRIPTION("HISILICON SAS controller driver");
2054MODULE_ALIAS("platform:" DRV_NAME);