]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/hisi_sas/hisi_sas_main.c
scsi: hisi_sas: us start_phy in PHY_FUNC_LINK_RESET
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / hisi_sas / hisi_sas_main.c
CommitLineData
e8899fad
JG
1/*
2 * Copyright (c) 2015 Linaro Ltd.
3 * Copyright (c) 2015 Hisilicon Limited.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 */
11
12#include "hisi_sas.h"
13#define DRV_NAME "hisi_sas"
14
42e7a693
JG
15#define DEV_IS_GONE(dev) \
16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
17
cac9b2a2
JG
18static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19 u8 *lun, struct hisi_sas_tmf_task *tmf);
441c2740
JG
20static int
21hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22 struct domain_device *device,
23 int abort_flag, int tag);
7c594f04 24static int hisi_sas_softreset_ata_disk(struct domain_device *device);
cac9b2a2 25
6c7bb8a1
XC
26u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
27{
28 switch (cmd) {
29 case ATA_CMD_FPDMA_WRITE:
30 case ATA_CMD_FPDMA_READ:
31 case ATA_CMD_FPDMA_RECV:
32 case ATA_CMD_FPDMA_SEND:
33 case ATA_CMD_NCQ_NON_DATA:
34 return HISI_SAS_SATA_PROTOCOL_FPDMA;
35
36 case ATA_CMD_DOWNLOAD_MICRO:
37 case ATA_CMD_ID_ATA:
38 case ATA_CMD_PMP_READ:
39 case ATA_CMD_READ_LOG_EXT:
40 case ATA_CMD_PIO_READ:
41 case ATA_CMD_PIO_READ_EXT:
42 case ATA_CMD_PMP_WRITE:
43 case ATA_CMD_WRITE_LOG_EXT:
44 case ATA_CMD_PIO_WRITE:
45 case ATA_CMD_PIO_WRITE_EXT:
46 return HISI_SAS_SATA_PROTOCOL_PIO;
47
48 case ATA_CMD_DSM:
49 case ATA_CMD_DOWNLOAD_MICRO_DMA:
50 case ATA_CMD_PMP_READ_DMA:
51 case ATA_CMD_PMP_WRITE_DMA:
52 case ATA_CMD_READ:
53 case ATA_CMD_READ_EXT:
54 case ATA_CMD_READ_LOG_DMA_EXT:
55 case ATA_CMD_READ_STREAM_DMA_EXT:
56 case ATA_CMD_TRUSTED_RCV_DMA:
57 case ATA_CMD_TRUSTED_SND_DMA:
58 case ATA_CMD_WRITE:
59 case ATA_CMD_WRITE_EXT:
60 case ATA_CMD_WRITE_FUA_EXT:
61 case ATA_CMD_WRITE_QUEUED:
62 case ATA_CMD_WRITE_LOG_DMA_EXT:
63 case ATA_CMD_WRITE_STREAM_DMA_EXT:
ab2afcbc 64 case ATA_CMD_ZAC_MGMT_IN:
6c7bb8a1
XC
65 return HISI_SAS_SATA_PROTOCOL_DMA;
66
67 case ATA_CMD_CHK_POWER:
68 case ATA_CMD_DEV_RESET:
69 case ATA_CMD_EDD:
70 case ATA_CMD_FLUSH:
71 case ATA_CMD_FLUSH_EXT:
72 case ATA_CMD_VERIFY:
73 case ATA_CMD_VERIFY_EXT:
74 case ATA_CMD_SET_FEATURES:
75 case ATA_CMD_STANDBY:
76 case ATA_CMD_STANDBYNOW1:
ab2afcbc 77 case ATA_CMD_ZAC_MGMT_OUT:
6c7bb8a1
XC
78 return HISI_SAS_SATA_PROTOCOL_NONDATA;
79 default:
80 if (direction == DMA_NONE)
81 return HISI_SAS_SATA_PROTOCOL_NONDATA;
82 return HISI_SAS_SATA_PROTOCOL_PIO;
83 }
84}
85EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
86
75904077
XC
87void hisi_sas_sata_done(struct sas_task *task,
88 struct hisi_sas_slot *slot)
89{
90 struct task_status_struct *ts = &task->task_status;
91 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
f557e32c
XT
92 struct hisi_sas_status_buffer *status_buf =
93 hisi_sas_status_buf_addr_mem(slot);
94 u8 *iu = &status_buf->iu[0];
95 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
75904077
XC
96
97 resp->frame_len = sizeof(struct dev_to_host_fis);
98 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
99
100 ts->buf_valid_size = sizeof(*resp);
101}
102EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
103
318913c6
XC
104int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
105{
106 struct ata_queued_cmd *qc = task->uldd_task;
107
108 if (qc) {
109 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
110 qc->tf.command == ATA_CMD_FPDMA_READ) {
111 *tag = qc->tag;
112 return 1;
113 }
114 }
115 return 0;
116}
117EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
118
42e7a693
JG
119static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
120{
121 return device->port->ha->lldd_ha;
122}
123
2e244f0f
JG
124struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
125{
126 return container_of(sas_port, struct hisi_sas_port, sas_port);
127}
128EXPORT_SYMBOL_GPL(to_hisi_sas_port);
129
257efd1f
JG
130static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
131{
132 void *bitmap = hisi_hba->slot_index_tags;
133
134 clear_bit(slot_idx, bitmap);
135}
136
42e7a693
JG
137static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
138{
139 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
140}
141
142static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
143{
144 void *bitmap = hisi_hba->slot_index_tags;
145
146 set_bit(slot_idx, bitmap);
147}
148
149static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
150{
151 unsigned int index;
152 void *bitmap = hisi_hba->slot_index_tags;
153
154 index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count);
155 if (index >= hisi_hba->slot_index_count)
156 return -SAS_QUEUE_FULL;
157 hisi_sas_slot_index_set(hisi_hba, index);
158 *slot_idx = index;
159 return 0;
160}
161
257efd1f
JG
162static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
163{
164 int i;
165
166 for (i = 0; i < hisi_hba->slot_index_count; ++i)
167 hisi_sas_slot_index_clear(hisi_hba, i);
168}
27a3f229
JG
169
170void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
171 struct hisi_sas_slot *slot)
172{
27a3f229 173
d3c4dd4e 174 if (task) {
11b75249 175 struct device *dev = hisi_hba->dev;
d3c4dd4e
JG
176 struct domain_device *device = task->dev;
177 struct hisi_sas_device *sas_dev = device->lldd_dev;
27a3f229 178
d3c4dd4e
JG
179 if (!sas_protocol_ata(task->task_proto))
180 if (slot->n_elem)
181 dma_unmap_sg(dev, task->scatter, slot->n_elem,
182 task->data_dir);
183
184 task->lldd_task = NULL;
185
186 if (sas_dev)
187 atomic64_dec(&sas_dev->running_req);
188 }
27a3f229 189
f557e32c
XT
190 if (slot->buf)
191 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
27a3f229 192
27a3f229
JG
193
194 list_del_init(&slot->entry);
27a3f229
JG
195 slot->task = NULL;
196 slot->port = NULL;
197 hisi_sas_slot_index_free(hisi_hba, slot->idx);
d3c4dd4e 198
59ba49f9 199 /* slot memory is fully zeroed when it is reused */
27a3f229
JG
200}
201EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
202
66ee999b
JG
203static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
204 struct hisi_sas_slot *slot)
205{
206 return hisi_hba->hw->prep_smp(hisi_hba, slot);
207}
208
42e7a693
JG
209static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
210 struct hisi_sas_slot *slot, int is_tmf,
211 struct hisi_sas_tmf_task *tmf)
212{
213 return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
214}
215
6f2ff1a1
JG
216static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
217 struct hisi_sas_slot *slot)
218{
219 return hisi_hba->hw->prep_stp(hisi_hba, slot);
220}
221
441c2740
JG
222static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
223 struct hisi_sas_slot *slot,
224 int device_id, int abort_flag, int tag_to_abort)
225{
226 return hisi_hba->hw->prep_abort(hisi_hba, slot,
227 device_id, abort_flag, tag_to_abort);
228}
229
cac9b2a2
JG
230/*
231 * This function will issue an abort TMF regardless of whether the
232 * task is in the sdev or not. Then it will do the task complete
233 * cleanup and callbacks.
234 */
235static void hisi_sas_slot_abort(struct work_struct *work)
236{
237 struct hisi_sas_slot *abort_slot =
238 container_of(work, struct hisi_sas_slot, abort_slot);
239 struct sas_task *task = abort_slot->task;
240 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
241 struct scsi_cmnd *cmnd = task->uldd_task;
242 struct hisi_sas_tmf_task tmf_task;
cac9b2a2 243 struct scsi_lun lun;
11b75249 244 struct device *dev = hisi_hba->dev;
cac9b2a2 245 int tag = abort_slot->idx;
da7b66e7 246 unsigned long flags;
cac9b2a2
JG
247
248 if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
249 dev_err(dev, "cannot abort slot for non-ssp task\n");
250 goto out;
251 }
252
253 int_to_scsilun(cmnd->device->lun, &lun);
254 tmf_task.tmf = TMF_ABORT_TASK;
255 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
256
257 hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
258out:
259 /* Do cleanup for this task */
da7b66e7 260 spin_lock_irqsave(&hisi_hba->lock, flags);
cac9b2a2 261 hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
da7b66e7 262 spin_unlock_irqrestore(&hisi_hba->lock, flags);
cac9b2a2
JG
263 if (task->task_done)
264 task->task_done(task);
cac9b2a2
JG
265}
266
b1a49412
XC
267static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
268 *dq, int is_tmf, struct hisi_sas_tmf_task *tmf,
269 int *pass)
42e7a693 270{
b1a49412 271 struct hisi_hba *hisi_hba = dq->hisi_hba;
42e7a693
JG
272 struct domain_device *device = task->dev;
273 struct hisi_sas_device *sas_dev = device->lldd_dev;
274 struct hisi_sas_port *port;
275 struct hisi_sas_slot *slot;
276 struct hisi_sas_cmd_hdr *cmd_hdr_base;
2e244f0f 277 struct asd_sas_port *sas_port = device->port;
11b75249 278 struct device *dev = hisi_hba->dev;
42e7a693 279 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
54c9dd2d 280 unsigned long flags;
42e7a693 281
2e244f0f 282 if (!sas_port) {
42e7a693
JG
283 struct task_status_struct *ts = &task->task_status;
284
285 ts->resp = SAS_TASK_UNDELIVERED;
286 ts->stat = SAS_PHY_DOWN;
287 /*
288 * libsas will use dev->port, should
289 * not call task_done for sata
290 */
291 if (device->dev_type != SAS_SATA_DEV)
292 task->task_done(task);
ddabca21 293 return SAS_PHY_DOWN;
42e7a693
JG
294 }
295
296 if (DEV_IS_GONE(sas_dev)) {
297 if (sas_dev)
ad604832 298 dev_info(dev, "task prep: device %d not ready\n",
42e7a693
JG
299 sas_dev->device_id);
300 else
301 dev_info(dev, "task prep: device %016llx not ready\n",
302 SAS_ADDR(device->sas_addr));
303
ddabca21 304 return SAS_PHY_DOWN;
42e7a693 305 }
2e244f0f
JG
306
307 port = to_hisi_sas_port(sas_port);
9859f24e 308 if (port && !port->port_attached) {
09fe9ecb 309 dev_info(dev, "task prep: %s port%d not attach device\n",
6073b771 310 (dev_is_sata(device)) ?
09fe9ecb
JG
311 "SATA/STP" : "SAS",
312 device->port->id);
313
314 return SAS_PHY_DOWN;
42e7a693
JG
315 }
316
317 if (!sas_protocol_ata(task->task_proto)) {
318 if (task->num_scatter) {
319 n_elem = dma_map_sg(dev, task->scatter,
320 task->num_scatter, task->data_dir);
321 if (!n_elem) {
322 rc = -ENOMEM;
323 goto prep_out;
324 }
325 }
326 } else
327 n_elem = task->num_scatter;
328
b1a49412 329 spin_lock_irqsave(&hisi_hba->lock, flags);
685b6d6e
JG
330 if (hisi_hba->hw->slot_index_alloc)
331 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
332 device);
333 else
334 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
b1a49412
XC
335 if (rc) {
336 spin_unlock_irqrestore(&hisi_hba->lock, flags);
42e7a693 337 goto err_out;
b1a49412
XC
338 }
339 spin_unlock_irqrestore(&hisi_hba->lock, flags);
340
341 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
42e7a693
JG
342 if (rc)
343 goto err_out_tag;
344
b1a49412
XC
345 dlvry_queue = dq->id;
346 dlvry_queue_slot = dq->wr_point;
42e7a693
JG
347 slot = &hisi_hba->slot_info[slot_idx];
348 memset(slot, 0, sizeof(struct hisi_sas_slot));
349
350 slot->idx = slot_idx;
351 slot->n_elem = n_elem;
352 slot->dlvry_queue = dlvry_queue;
353 slot->dlvry_queue_slot = dlvry_queue_slot;
354 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
355 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
356 slot->task = task;
357 slot->port = port;
358 task->lldd_task = slot;
cac9b2a2 359 INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
42e7a693 360
f557e32c
XT
361 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
362 GFP_ATOMIC, &slot->buf_dma);
363 if (!slot->buf) {
9c9d18e7 364 rc = -ENOMEM;
42e7a693 365 goto err_out_slot_buf;
9c9d18e7 366 }
42e7a693 367 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
f557e32c
XT
368 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
369 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
42e7a693
JG
370
371 switch (task->task_proto) {
66ee999b
JG
372 case SAS_PROTOCOL_SMP:
373 rc = hisi_sas_task_prep_smp(hisi_hba, slot);
374 break;
42e7a693
JG
375 case SAS_PROTOCOL_SSP:
376 rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
377 break;
378 case SAS_PROTOCOL_SATA:
379 case SAS_PROTOCOL_STP:
380 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
6f2ff1a1
JG
381 rc = hisi_sas_task_prep_ata(hisi_hba, slot);
382 break;
42e7a693
JG
383 default:
384 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
385 task->task_proto);
386 rc = -EINVAL;
387 break;
388 }
389
390 if (rc) {
391 dev_err(dev, "task prep: rc = 0x%x\n", rc);
f557e32c 392 goto err_out_buf;
42e7a693
JG
393 }
394
405314df 395 list_add_tail(&slot->entry, &sas_dev->list);
54c9dd2d 396 spin_lock_irqsave(&task->task_state_lock, flags);
42e7a693 397 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
54c9dd2d 398 spin_unlock_irqrestore(&task->task_state_lock, flags);
42e7a693 399
b1a49412 400 dq->slot_prep = slot;
42e7a693 401
f696cc32 402 atomic64_inc(&sas_dev->running_req);
42e7a693
JG
403 ++(*pass);
404
9c9d18e7 405 return 0;
42e7a693 406
f557e32c
XT
407err_out_buf:
408 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
409 slot->buf_dma);
42e7a693
JG
410err_out_slot_buf:
411 /* Nothing to be done */
412err_out_tag:
b1a49412 413 spin_lock_irqsave(&hisi_hba->lock, flags);
42e7a693 414 hisi_sas_slot_index_free(hisi_hba, slot_idx);
b1a49412 415 spin_unlock_irqrestore(&hisi_hba->lock, flags);
42e7a693
JG
416err_out:
417 dev_err(dev, "task prep: failed[%d]!\n", rc);
418 if (!sas_protocol_ata(task->task_proto))
419 if (n_elem)
420 dma_unmap_sg(dev, task->scatter, n_elem,
421 task->data_dir);
422prep_out:
423 return rc;
424}
425
426static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
427 int is_tmf, struct hisi_sas_tmf_task *tmf)
428{
429 u32 rc;
430 u32 pass = 0;
431 unsigned long flags;
432 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
11b75249 433 struct device *dev = hisi_hba->dev;
b1a49412
XC
434 struct domain_device *device = task->dev;
435 struct hisi_sas_device *sas_dev = device->lldd_dev;
436 struct hisi_sas_dq *dq = sas_dev->dq;
42e7a693 437
689ad4fb 438 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
06ec0fb9
XC
439 return -EINVAL;
440
42e7a693 441 /* protect task_prep and start_delivery sequence */
b1a49412
XC
442 spin_lock_irqsave(&dq->lock, flags);
443 rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass);
42e7a693
JG
444 if (rc)
445 dev_err(dev, "task exec: failed[%d]!\n", rc);
446
447 if (likely(pass))
b1a49412
XC
448 hisi_hba->hw->start_delivery(dq);
449 spin_unlock_irqrestore(&dq->lock, flags);
42e7a693
JG
450
451 return rc;
452}
257efd1f 453
66139921
JG
454static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
455{
456 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
457 struct asd_sas_phy *sas_phy = &phy->sas_phy;
458 struct sas_ha_struct *sas_ha;
459
460 if (!phy->phy_attached)
461 return;
462
463 sas_ha = &hisi_hba->sha;
464 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
465
466 if (sas_phy->phy) {
467 struct sas_phy *sphy = sas_phy->phy;
468
469 sphy->negotiated_linkrate = sas_phy->linkrate;
66139921 470 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
2ae75787
XC
471 sphy->maximum_linkrate_hw =
472 hisi_hba->hw->phy_get_max_linkrate();
473 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
474 sphy->minimum_linkrate = phy->minimum_linkrate;
475
476 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
477 sphy->maximum_linkrate = phy->maximum_linkrate;
66139921
JG
478 }
479
480 if (phy->phy_type & PORT_TYPE_SAS) {
481 struct sas_identify_frame *id;
482
483 id = (struct sas_identify_frame *)phy->frame_rcvd;
484 id->dev_type = phy->identify.device_type;
485 id->initiator_bits = SAS_PROTOCOL_ALL;
486 id->target_bits = phy->identify.target_port_protocols;
487 } else if (phy->phy_type & PORT_TYPE_SATA) {
488 /*Nothing*/
489 }
490
491 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
492 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
493}
494
abda97c2
JG
495static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
496{
497 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
498 struct hisi_sas_device *sas_dev = NULL;
499 int i;
500
501 spin_lock(&hisi_hba->lock);
502 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
503 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
b1a49412
XC
504 int queue = i % hisi_hba->queue_count;
505 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
506
abda97c2
JG
507 hisi_hba->devices[i].device_id = i;
508 sas_dev = &hisi_hba->devices[i];
509 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
510 sas_dev->dev_type = device->dev_type;
511 sas_dev->hisi_hba = hisi_hba;
512 sas_dev->sas_device = device;
b1a49412 513 sas_dev->dq = dq;
405314df 514 INIT_LIST_HEAD(&hisi_hba->devices[i].list);
abda97c2
JG
515 break;
516 }
517 }
518 spin_unlock(&hisi_hba->lock);
519
520 return sas_dev;
521}
522
523static int hisi_sas_dev_found(struct domain_device *device)
524{
525 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
526 struct domain_device *parent_dev = device->parent;
527 struct hisi_sas_device *sas_dev;
11b75249 528 struct device *dev = hisi_hba->dev;
abda97c2 529
685b6d6e
JG
530 if (hisi_hba->hw->alloc_dev)
531 sas_dev = hisi_hba->hw->alloc_dev(device);
532 else
533 sas_dev = hisi_sas_alloc_dev(device);
abda97c2
JG
534 if (!sas_dev) {
535 dev_err(dev, "fail alloc dev: max support %d devices\n",
536 HISI_SAS_MAX_DEVICES);
537 return -EINVAL;
538 }
539
540 device->lldd_dev = sas_dev;
541 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
542
543 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
544 int phy_no;
545 u8 phy_num = parent_dev->ex_dev.num_phys;
546 struct ex_phy *phy;
547
548 for (phy_no = 0; phy_no < phy_num; phy_no++) {
549 phy = &parent_dev->ex_dev.ex_phy[phy_no];
550 if (SAS_ADDR(phy->attached_sas_addr) ==
551 SAS_ADDR(device->sas_addr)) {
552 sas_dev->attached_phy = phy_no;
553 break;
554 }
555 }
556
557 if (phy_no == phy_num) {
558 dev_info(dev, "dev found: no attached "
559 "dev:%016llx at ex:%016llx\n",
560 SAS_ADDR(device->sas_addr),
561 SAS_ADDR(parent_dev->sas_addr));
562 return -EINVAL;
563 }
564 }
565
566 return 0;
567}
568
31eec8a6
JG
569static int hisi_sas_slave_configure(struct scsi_device *sdev)
570{
571 struct domain_device *dev = sdev_to_domain_dev(sdev);
572 int ret = sas_slave_configure(sdev);
573
574 if (ret)
575 return ret;
576 if (!dev_is_sata(dev))
577 sas_change_queue_depth(sdev, 64);
578
579 return 0;
580}
581
701f75ec
JG
582static void hisi_sas_scan_start(struct Scsi_Host *shost)
583{
584 struct hisi_hba *hisi_hba = shost_priv(shost);
701f75ec 585
396b8044 586 hisi_hba->hw->phys_init(hisi_hba);
701f75ec
JG
587}
588
589static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
590{
591 struct hisi_hba *hisi_hba = shost_priv(shost);
592 struct sas_ha_struct *sha = &hisi_hba->sha;
593
396b8044
JG
594 /* Wait for PHY up interrupt to occur */
595 if (time < HZ)
701f75ec
JG
596 return 0;
597
598 sas_drain_work(sha);
599 return 1;
600}
601
66139921
JG
602static void hisi_sas_phyup_work(struct work_struct *work)
603{
604 struct hisi_sas_phy *phy =
605 container_of(work, struct hisi_sas_phy, phyup_ws);
606 struct hisi_hba *hisi_hba = phy->hisi_hba;
607 struct asd_sas_phy *sas_phy = &phy->sas_phy;
608 int phy_no = sas_phy->id;
609
610 hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
611 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
612}
976867e6
JG
613
614static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
615{
616 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
617 struct asd_sas_phy *sas_phy = &phy->sas_phy;
618
619 phy->hisi_hba = hisi_hba;
620 phy->port = NULL;
621 init_timer(&phy->timer);
622 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
623 sas_phy->class = SAS;
624 sas_phy->iproto = SAS_PROTOCOL_ALL;
625 sas_phy->tproto = 0;
626 sas_phy->type = PHY_TYPE_PHYSICAL;
627 sas_phy->role = PHY_ROLE_INITIATOR;
628 sas_phy->oob_mode = OOB_NOT_CONNECTED;
629 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
630 sas_phy->id = phy_no;
631 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
632 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
633 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
634 sas_phy->lldd_phy = phy;
66139921
JG
635
636 INIT_WORK(&phy->phyup_ws, hisi_sas_phyup_work);
976867e6
JG
637}
638
184a4635
JG
639static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
640{
641 struct sas_ha_struct *sas_ha = sas_phy->ha;
642 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
643 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
644 struct asd_sas_port *sas_port = sas_phy->port;
2e244f0f 645 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
184a4635
JG
646 unsigned long flags;
647
648 if (!sas_port)
649 return;
650
651 spin_lock_irqsave(&hisi_hba->lock, flags);
652 port->port_attached = 1;
653 port->id = phy->port_id;
654 phy->port = port;
655 sas_port->lldd_port = port;
656 spin_unlock_irqrestore(&hisi_hba->lock, flags);
657}
658
d3c4dd4e 659static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
405314df 660 struct hisi_sas_slot *slot)
184a4635 661{
d3c4dd4e
JG
662 if (task) {
663 unsigned long flags;
664 struct task_status_struct *ts;
184a4635 665
d3c4dd4e 666 ts = &task->task_status;
184a4635 667
d3c4dd4e
JG
668 ts->resp = SAS_TASK_COMPLETE;
669 ts->stat = SAS_ABORTED_TASK;
670 spin_lock_irqsave(&task->task_state_lock, flags);
671 task->task_state_flags &=
672 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
673 task->task_state_flags |= SAS_TASK_STATE_DONE;
674 spin_unlock_irqrestore(&task->task_state_lock, flags);
675 }
184a4635 676
405314df 677 hisi_sas_slot_task_free(hisi_hba, task, slot);
184a4635
JG
678}
679
405314df 680/* hisi_hba.lock should be locked */
184a4635
JG
681static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
682 struct domain_device *device)
683{
405314df
JG
684 struct hisi_sas_slot *slot, *slot2;
685 struct hisi_sas_device *sas_dev = device->lldd_dev;
184a4635 686
405314df
JG
687 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
688 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
184a4635
JG
689}
690
06ec0fb9
XC
691static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
692{
405314df
JG
693 struct hisi_sas_device *sas_dev;
694 struct domain_device *device;
06ec0fb9
XC
695 int i;
696
405314df
JG
697 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
698 sas_dev = &hisi_hba->devices[i];
699 device = sas_dev->sas_device;
06ec0fb9 700
405314df
JG
701 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
702 !device)
06ec0fb9 703 continue;
405314df
JG
704
705 hisi_sas_release_task(hisi_hba, device);
06ec0fb9
XC
706 }
707}
708
d30ff263
XC
709static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
710 struct domain_device *device)
711{
712 if (hisi_hba->hw->dereg_device)
713 hisi_hba->hw->dereg_device(hisi_hba, device);
714}
715
abda97c2
JG
716static void hisi_sas_dev_gone(struct domain_device *device)
717{
718 struct hisi_sas_device *sas_dev = device->lldd_dev;
719 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
11b75249 720 struct device *dev = hisi_hba->dev;
ad604832 721 int dev_id = sas_dev->device_id;
abda97c2 722
ad604832 723 dev_info(dev, "found dev[%d:%x] is gone\n",
abda97c2
JG
724 sas_dev->device_id, sas_dev->dev_type);
725
40f2702b
JG
726 hisi_sas_internal_task_abort(hisi_hba, device,
727 HISI_SAS_INT_ABT_DEV, 0);
728
d30ff263
XC
729 hisi_sas_dereg_device(hisi_hba, device);
730
abda97c2
JG
731 hisi_hba->hw->free_device(hisi_hba, sas_dev);
732 device->lldd_dev = NULL;
733 memset(sas_dev, 0, sizeof(*sas_dev));
734 sas_dev->device_id = dev_id;
735 sas_dev->dev_type = SAS_PHY_UNUSED;
736 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
737}
42e7a693
JG
738
739static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
740{
741 return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
742}
743
e4189d53
JG
744static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
745 void *funcdata)
746{
747 struct sas_ha_struct *sas_ha = sas_phy->ha;
748 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
749 int phy_no = sas_phy->id;
750
751 switch (func) {
752 case PHY_FUNC_HARD_RESET:
753 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
754 break;
755
756 case PHY_FUNC_LINK_RESET:
b4c67a6c
JG
757 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
758 msleep(100);
a0c341d7 759 hisi_hba->hw->phy_start(hisi_hba, phy_no);
e4189d53
JG
760 break;
761
762 case PHY_FUNC_DISABLE:
763 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
764 break;
765
766 case PHY_FUNC_SET_LINK_RATE:
2ae75787
XC
767 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
768 break;
ce999301
XT
769 case PHY_FUNC_GET_EVENTS:
770 if (hisi_hba->hw->get_events) {
771 hisi_hba->hw->get_events(hisi_hba, phy_no);
772 break;
773 }
774 /* fallthru */
e4189d53
JG
775 case PHY_FUNC_RELEASE_SPINUP_HOLD:
776 default:
777 return -EOPNOTSUPP;
778 }
779 return 0;
780}
184a4635 781
0efff300
JG
782static void hisi_sas_task_done(struct sas_task *task)
783{
784 if (!del_timer(&task->slow_task->timer))
785 return;
786 complete(&task->slow_task->completion);
787}
788
789static void hisi_sas_tmf_timedout(unsigned long data)
790{
791 struct sas_task *task = (struct sas_task *)data;
f64a6988
XC
792 unsigned long flags;
793
794 spin_lock_irqsave(&task->task_state_lock, flags);
795 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
796 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
797 spin_unlock_irqrestore(&task->task_state_lock, flags);
0efff300 798
0efff300
JG
799 complete(&task->slow_task->completion);
800}
801
802#define TASK_TIMEOUT 20
803#define TASK_RETRY 3
804static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
805 void *parameter, u32 para_len,
806 struct hisi_sas_tmf_task *tmf)
807{
808 struct hisi_sas_device *sas_dev = device->lldd_dev;
809 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
11b75249 810 struct device *dev = hisi_hba->dev;
0efff300
JG
811 struct sas_task *task;
812 int res, retry;
813
814 for (retry = 0; retry < TASK_RETRY; retry++) {
815 task = sas_alloc_slow_task(GFP_KERNEL);
816 if (!task)
817 return -ENOMEM;
818
819 task->dev = device;
820 task->task_proto = device->tproto;
821
7c594f04
XC
822 if (dev_is_sata(device)) {
823 task->ata_task.device_control_reg_update = 1;
824 memcpy(&task->ata_task.fis, parameter, para_len);
825 } else {
826 memcpy(&task->ssp_task, parameter, para_len);
827 }
0efff300
JG
828 task->task_done = hisi_sas_task_done;
829
830 task->slow_task->timer.data = (unsigned long) task;
831 task->slow_task->timer.function = hisi_sas_tmf_timedout;
832 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
833 add_timer(&task->slow_task->timer);
834
835 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
836
837 if (res) {
838 del_timer(&task->slow_task->timer);
839 dev_err(dev, "abort tmf: executing internal task failed: %d\n",
840 res);
841 goto ex_err;
842 }
843
844 wait_for_completion(&task->slow_task->completion);
845 res = TMF_RESP_FUNC_FAILED;
846 /* Even TMF timed out, return direct. */
847 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
848 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
d3c4dd4e
JG
849 struct hisi_sas_slot *slot = task->lldd_task;
850
7c594f04 851 dev_err(dev, "abort tmf: TMF task timeout\n");
d3c4dd4e
JG
852 if (slot)
853 slot->task = NULL;
854
0efff300
JG
855 goto ex_err;
856 }
857 }
858
859 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1af1b808 860 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
0efff300
JG
861 res = TMF_RESP_FUNC_COMPLETE;
862 break;
863 }
864
4ffde482
JG
865 if (task->task_status.resp == SAS_TASK_COMPLETE &&
866 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
867 res = TMF_RESP_FUNC_SUCC;
868 break;
869 }
870
0efff300
JG
871 if (task->task_status.resp == SAS_TASK_COMPLETE &&
872 task->task_status.stat == SAS_DATA_UNDERRUN) {
873 /* no error, but return the number of bytes of
874 * underrun
875 */
876 dev_warn(dev, "abort tmf: task to dev %016llx "
877 "resp: 0x%x sts 0x%x underrun\n",
878 SAS_ADDR(device->sas_addr),
879 task->task_status.resp,
880 task->task_status.stat);
881 res = task->task_status.residual;
882 break;
883 }
884
885 if (task->task_status.resp == SAS_TASK_COMPLETE &&
886 task->task_status.stat == SAS_DATA_OVERRUN) {
887 dev_warn(dev, "abort tmf: blocked task error\n");
888 res = -EMSGSIZE;
889 break;
890 }
891
892 dev_warn(dev, "abort tmf: task to dev "
893 "%016llx resp: 0x%x status 0x%x\n",
894 SAS_ADDR(device->sas_addr), task->task_status.resp,
895 task->task_status.stat);
896 sas_free_task(task);
897 task = NULL;
898 }
899ex_err:
d2d7e7a0
XC
900 if (retry == TASK_RETRY)
901 dev_warn(dev, "abort tmf: executing internal task failed!\n");
0efff300
JG
902 sas_free_task(task);
903 return res;
904}
905
7c594f04
XC
906static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
907 bool reset, int pmp, u8 *fis)
908{
909 struct ata_taskfile tf;
910
911 ata_tf_init(dev, &tf);
912 if (reset)
913 tf.ctl |= ATA_SRST;
914 else
915 tf.ctl &= ~ATA_SRST;
916 tf.command = ATA_CMD_DEV_RESET;
917 ata_tf_to_fis(&tf, pmp, 0, fis);
918}
919
920static int hisi_sas_softreset_ata_disk(struct domain_device *device)
921{
922 u8 fis[20] = {0};
923 struct ata_port *ap = device->sata_dev.ap;
924 struct ata_link *link;
925 int rc = TMF_RESP_FUNC_FAILED;
926 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
11b75249 927 struct device *dev = hisi_hba->dev;
7c594f04
XC
928 int s = sizeof(struct host_to_dev_fis);
929 unsigned long flags;
930
931 ata_for_each_link(link, ap, EDGE) {
932 int pmp = sata_srst_pmp(link);
933
934 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
935 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
936 if (rc != TMF_RESP_FUNC_COMPLETE)
937 break;
938 }
939
940 if (rc == TMF_RESP_FUNC_COMPLETE) {
941 ata_for_each_link(link, ap, EDGE) {
942 int pmp = sata_srst_pmp(link);
943
944 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
945 rc = hisi_sas_exec_internal_tmf_task(device, fis,
946 s, NULL);
947 if (rc != TMF_RESP_FUNC_COMPLETE)
948 dev_err(dev, "ata disk de-reset failed\n");
949 }
950 } else {
951 dev_err(dev, "ata disk reset failed\n");
952 }
953
954 if (rc == TMF_RESP_FUNC_COMPLETE) {
955 spin_lock_irqsave(&hisi_hba->lock, flags);
956 hisi_sas_release_task(hisi_hba, device);
957 spin_unlock_irqrestore(&hisi_hba->lock, flags);
958 }
959
960 return rc;
961}
962
0efff300
JG
963static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
964 u8 *lun, struct hisi_sas_tmf_task *tmf)
965{
966 struct sas_ssp_task ssp_task;
967
968 if (!(device->tproto & SAS_PROTOCOL_SSP))
969 return TMF_RESP_FUNC_ESUPP;
970
971 memcpy(ssp_task.LUN, lun, 8);
972
973 return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
974 sizeof(ssp_task), tmf);
975}
976
689ad4fb
XT
977static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba,
978 struct asd_sas_port *sas_port, enum sas_linkrate linkrate)
979{
980 struct hisi_sas_device *sas_dev;
981 struct domain_device *device;
982 int i;
983
984 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
985 sas_dev = &hisi_hba->devices[i];
986 device = sas_dev->sas_device;
987 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
988 || !device || (device->port != sas_port))
989 continue;
990
991 hisi_hba->hw->free_device(hisi_hba, sas_dev);
992
993 /* Update linkrate of directly attached device. */
994 if (!device->parent)
995 device->linkrate = linkrate;
996
997 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
998 }
999}
1000
1001static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1002 u32 state)
1003{
1004 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1005 struct asd_sas_port *_sas_port = NULL;
1006 int phy_no;
1007
1008 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1009 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1010 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1011 struct asd_sas_port *sas_port = sas_phy->port;
1012 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
1013 bool do_port_check = !!(_sas_port != sas_port);
1014
1015 if (!sas_phy->phy->enabled)
1016 continue;
1017
1018 /* Report PHY state change to libsas */
1019 if (state & (1 << phy_no)) {
1020 if (do_port_check && sas_port) {
1021 struct domain_device *dev = sas_port->port_dev;
1022
1023 _sas_port = sas_port;
1024 port->id = phy->port_id;
1025 hisi_sas_refresh_port_id(hisi_hba,
1026 sas_port, sas_phy->linkrate);
1027
1028 if (DEV_IS_EXPANDER(dev->dev_type))
1029 sas_ha->notify_port_event(sas_phy,
1030 PORTE_BROADCAST_RCVD);
1031 }
1032 } else if (old_state & (1 << phy_no))
1033 /* PHY down but was up before */
1034 hisi_sas_phy_down(hisi_hba, phy_no, 0);
1035
1036 }
1037
1038 drain_workqueue(hisi_hba->shost->work_q);
1039}
1040
06ec0fb9
XC
1041static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1042{
689ad4fb
XT
1043 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1044 struct device *dev = hisi_hba->dev;
1045 struct Scsi_Host *shost = hisi_hba->shost;
1046 u32 old_state, state;
1047 unsigned long flags;
06ec0fb9
XC
1048 int rc;
1049
1050 if (!hisi_hba->hw->soft_reset)
1051 return -1;
1052
689ad4fb
XT
1053 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1054 return -1;
06ec0fb9 1055
689ad4fb
XT
1056 dev_dbg(dev, "controller resetting...\n");
1057 old_state = hisi_hba->hw->get_phys_state(hisi_hba);
06ec0fb9 1058
689ad4fb
XT
1059 scsi_block_requests(shost);
1060 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1061 rc = hisi_hba->hw->soft_reset(hisi_hba);
1062 if (rc) {
1063 dev_warn(dev, "controller reset failed (%d)\n", rc);
1064 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1065 goto out;
1066 }
1067 spin_lock_irqsave(&hisi_hba->lock, flags);
1068 hisi_sas_release_tasks(hisi_hba);
1069 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1070
1071 sas_ha->notify_ha_event(sas_ha, HAE_RESET);
1072 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1073
1074 /* Init and wait for PHYs to come up and all libsas event finished. */
1075 hisi_hba->hw->phys_init(hisi_hba);
1076 msleep(1000);
1077 drain_workqueue(hisi_hba->wq);
1078 drain_workqueue(shost->work_q);
1079
1080 state = hisi_hba->hw->get_phys_state(hisi_hba);
1081 hisi_sas_rescan_topology(hisi_hba, old_state, state);
1082 dev_dbg(dev, "controller reset complete\n");
06ec0fb9
XC
1083
1084out:
689ad4fb 1085 scsi_unblock_requests(shost);
06ec0fb9 1086 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
689ad4fb 1087
06ec0fb9
XC
1088 return rc;
1089}
1090
0efff300
JG
1091static int hisi_sas_abort_task(struct sas_task *task)
1092{
1093 struct scsi_lun lun;
1094 struct hisi_sas_tmf_task tmf_task;
1095 struct domain_device *device = task->dev;
1096 struct hisi_sas_device *sas_dev = device->lldd_dev;
1097 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
11b75249 1098 struct device *dev = hisi_hba->dev;
0efff300
JG
1099 int rc = TMF_RESP_FUNC_FAILED;
1100 unsigned long flags;
1101
1102 if (!sas_dev) {
1103 dev_warn(dev, "Device has been removed\n");
1104 return TMF_RESP_FUNC_FAILED;
1105 }
1106
0efff300 1107 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
0efff300
JG
1108 rc = TMF_RESP_FUNC_COMPLETE;
1109 goto out;
1110 }
1111
0efff300
JG
1112 sas_dev->dev_status = HISI_SAS_DEV_EH;
1113 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1114 struct scsi_cmnd *cmnd = task->uldd_task;
1115 struct hisi_sas_slot *slot = task->lldd_task;
1116 u32 tag = slot->idx;
c35279f2 1117 int rc2;
0efff300
JG
1118
1119 int_to_scsilun(cmnd->device->lun, &lun);
1120 tmf_task.tmf = TMF_ABORT_TASK;
1121 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1122
1123 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1124 &tmf_task);
1125
c35279f2
JG
1126 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1127 HISI_SAS_INT_ABT_CMD, tag);
1128 /*
1129 * If the TMF finds that the IO is not in the device and also
1130 * the internal abort does not succeed, then it is safe to
1131 * free the slot.
1132 * Note: if the internal abort succeeds then the slot
1133 * will have already been completed
1134 */
1135 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
0efff300 1136 if (task->lldd_task) {
0efff300 1137 spin_lock_irqsave(&hisi_hba->lock, flags);
c35279f2 1138 hisi_sas_do_release_task(hisi_hba, task, slot);
0efff300
JG
1139 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1140 }
1141 }
0efff300
JG
1142 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1143 task->task_proto & SAS_PROTOCOL_STP) {
1144 if (task->dev->dev_type == SAS_SATA_DEV) {
dc8a49ca
JG
1145 hisi_sas_internal_task_abort(hisi_hba, device,
1146 HISI_SAS_INT_ABT_DEV, 0);
d30ff263 1147 hisi_sas_dereg_device(hisi_hba, device);
7c594f04 1148 rc = hisi_sas_softreset_ata_disk(device);
0efff300 1149 }
eb045e04 1150 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
dc8a49ca
JG
1151 /* SMP */
1152 struct hisi_sas_slot *slot = task->lldd_task;
1153 u32 tag = slot->idx;
0efff300 1154
ccbfe5a0
XC
1155 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1156 HISI_SAS_INT_ABT_CMD, tag);
1157 if (rc == TMF_RESP_FUNC_FAILED) {
1158 spin_lock_irqsave(&hisi_hba->lock, flags);
1159 hisi_sas_do_release_task(hisi_hba, task, slot);
1160 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1161 }
0efff300
JG
1162 }
1163
1164out:
1165 if (rc != TMF_RESP_FUNC_COMPLETE)
1166 dev_notice(dev, "abort task: rc=%d\n", rc);
1167 return rc;
1168}
1169
1170static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1171{
1172 struct hisi_sas_tmf_task tmf_task;
1173 int rc = TMF_RESP_FUNC_FAILED;
1174
1175 tmf_task.tmf = TMF_ABORT_TASK_SET;
1176 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1177
1178 return rc;
1179}
1180
1181static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1182{
1183 int rc = TMF_RESP_FUNC_FAILED;
1184 struct hisi_sas_tmf_task tmf_task;
1185
1186 tmf_task.tmf = TMF_CLEAR_ACA;
1187 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1188
1189 return rc;
1190}
1191
1192static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1193{
1194 struct sas_phy *phy = sas_get_local_phy(device);
1195 int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1196 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1197 rc = sas_phy_reset(phy, reset_type);
1198 sas_put_local_phy(phy);
1199 msleep(2000);
1200 return rc;
1201}
1202
1203static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1204{
1205 struct hisi_sas_device *sas_dev = device->lldd_dev;
1206 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1207 unsigned long flags;
1208 int rc = TMF_RESP_FUNC_FAILED;
1209
1210 if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1211 return TMF_RESP_FUNC_FAILED;
1212 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1213
d30ff263
XC
1214 hisi_sas_internal_task_abort(hisi_hba, device,
1215 HISI_SAS_INT_ABT_DEV, 0);
1216 hisi_sas_dereg_device(hisi_hba, device);
1217
0efff300
JG
1218 rc = hisi_sas_debug_I_T_nexus_reset(device);
1219
6131243a
XC
1220 if (rc == TMF_RESP_FUNC_COMPLETE) {
1221 spin_lock_irqsave(&hisi_hba->lock, flags);
1222 hisi_sas_release_task(hisi_hba, device);
1223 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1224 }
1225 return rc;
0efff300
JG
1226}
1227
1228static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1229{
0efff300
JG
1230 struct hisi_sas_device *sas_dev = device->lldd_dev;
1231 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
11b75249 1232 struct device *dev = hisi_hba->dev;
0efff300
JG
1233 unsigned long flags;
1234 int rc = TMF_RESP_FUNC_FAILED;
1235
0efff300 1236 sas_dev->dev_status = HISI_SAS_DEV_EH;
055945df
JG
1237 if (dev_is_sata(device)) {
1238 struct sas_phy *phy;
1239
1240 /* Clear internal IO and then hardreset */
1241 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1242 HISI_SAS_INT_ABT_DEV, 0);
1243 if (rc == TMF_RESP_FUNC_FAILED)
1244 goto out;
d30ff263 1245 hisi_sas_dereg_device(hisi_hba, device);
0efff300 1246
055945df
JG
1247 phy = sas_get_local_phy(device);
1248
1249 rc = sas_phy_reset(phy, 1);
1250
1251 if (rc == 0) {
1252 spin_lock_irqsave(&hisi_hba->lock, flags);
1253 hisi_sas_release_task(hisi_hba, device);
1254 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1255 }
1256 sas_put_local_phy(phy);
1257 } else {
1258 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
1259
1260 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1261 if (rc == TMF_RESP_FUNC_COMPLETE) {
1262 spin_lock_irqsave(&hisi_hba->lock, flags);
1263 hisi_sas_release_task(hisi_hba, device);
1264 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1265 }
1266 }
1267out:
14d3f397 1268 if (rc != TMF_RESP_FUNC_COMPLETE)
ad604832 1269 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
14d3f397 1270 sas_dev->device_id, rc);
0efff300
JG
1271 return rc;
1272}
1273
8b05ad6a
JG
1274static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1275{
1276 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1277
1278 return hisi_sas_controller_reset(hisi_hba);
1279}
1280
0efff300
JG
1281static int hisi_sas_query_task(struct sas_task *task)
1282{
1283 struct scsi_lun lun;
1284 struct hisi_sas_tmf_task tmf_task;
1285 int rc = TMF_RESP_FUNC_FAILED;
1286
1287 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1288 struct scsi_cmnd *cmnd = task->uldd_task;
1289 struct domain_device *device = task->dev;
1290 struct hisi_sas_slot *slot = task->lldd_task;
1291 u32 tag = slot->idx;
1292
1293 int_to_scsilun(cmnd->device->lun, &lun);
1294 tmf_task.tmf = TMF_QUERY_TASK;
1295 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1296
1297 rc = hisi_sas_debug_issue_ssp_tmf(device,
1298 lun.scsi_lun,
1299 &tmf_task);
1300 switch (rc) {
1301 /* The task is still in Lun, release it then */
1302 case TMF_RESP_FUNC_SUCC:
1303 /* The task is not in Lun or failed, reset the phy */
1304 case TMF_RESP_FUNC_FAILED:
1305 case TMF_RESP_FUNC_COMPLETE:
1306 break;
997ee43c
XC
1307 default:
1308 rc = TMF_RESP_FUNC_FAILED;
1309 break;
0efff300
JG
1310 }
1311 }
1312 return rc;
1313}
1314
441c2740 1315static int
ad604832 1316hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
441c2740
JG
1317 struct sas_task *task, int abort_flag,
1318 int task_tag)
1319{
1320 struct domain_device *device = task->dev;
1321 struct hisi_sas_device *sas_dev = device->lldd_dev;
11b75249 1322 struct device *dev = hisi_hba->dev;
441c2740
JG
1323 struct hisi_sas_port *port;
1324 struct hisi_sas_slot *slot;
2e244f0f 1325 struct asd_sas_port *sas_port = device->port;
441c2740 1326 struct hisi_sas_cmd_hdr *cmd_hdr_base;
b1a49412 1327 struct hisi_sas_dq *dq = sas_dev->dq;
441c2740 1328 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
b1a49412 1329 unsigned long flags, flags_dq;
441c2740 1330
689ad4fb 1331 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
06ec0fb9
XC
1332 return -EINVAL;
1333
441c2740
JG
1334 if (!device->port)
1335 return -1;
1336
2e244f0f 1337 port = to_hisi_sas_port(sas_port);
441c2740
JG
1338
1339 /* simply get a slot and send abort command */
b1a49412 1340 spin_lock_irqsave(&hisi_hba->lock, flags);
441c2740 1341 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
b1a49412
XC
1342 if (rc) {
1343 spin_unlock_irqrestore(&hisi_hba->lock, flags);
441c2740 1344 goto err_out;
b1a49412
XC
1345 }
1346 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1347
1348 spin_lock_irqsave(&dq->lock, flags_dq);
1349 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
441c2740
JG
1350 if (rc)
1351 goto err_out_tag;
1352
b1a49412
XC
1353 dlvry_queue = dq->id;
1354 dlvry_queue_slot = dq->wr_point;
1355
441c2740
JG
1356 slot = &hisi_hba->slot_info[slot_idx];
1357 memset(slot, 0, sizeof(struct hisi_sas_slot));
1358
1359 slot->idx = slot_idx;
1360 slot->n_elem = n_elem;
1361 slot->dlvry_queue = dlvry_queue;
1362 slot->dlvry_queue_slot = dlvry_queue_slot;
1363 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1364 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1365 slot->task = task;
1366 slot->port = port;
1367 task->lldd_task = slot;
1368
1369 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1370
1371 rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1372 abort_flag, task_tag);
1373 if (rc)
1374 goto err_out_tag;
1375
405314df
JG
1376
1377 list_add_tail(&slot->entry, &sas_dev->list);
54c9dd2d 1378 spin_lock_irqsave(&task->task_state_lock, flags);
441c2740 1379 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
54c9dd2d 1380 spin_unlock_irqrestore(&task->task_state_lock, flags);
441c2740 1381
b1a49412 1382 dq->slot_prep = slot;
441c2740 1383
f696cc32
JG
1384 atomic64_inc(&sas_dev->running_req);
1385
b1a49412
XC
1386 /* send abort command to the chip */
1387 hisi_hba->hw->start_delivery(dq);
1388 spin_unlock_irqrestore(&dq->lock, flags_dq);
441c2740
JG
1389
1390 return 0;
1391
1392err_out_tag:
b1a49412 1393 spin_lock_irqsave(&hisi_hba->lock, flags);
441c2740 1394 hisi_sas_slot_index_free(hisi_hba, slot_idx);
b1a49412
XC
1395 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1396 spin_unlock_irqrestore(&dq->lock, flags_dq);
441c2740
JG
1397err_out:
1398 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1399
1400 return rc;
1401}
1402
1403/**
1404 * hisi_sas_internal_task_abort -- execute an internal
1405 * abort command for single IO command or a device
1406 * @hisi_hba: host controller struct
1407 * @device: domain device
1408 * @abort_flag: mode of operation, device or single IO
1409 * @tag: tag of IO to be aborted (only relevant to single
1410 * IO mode)
1411 */
1412static int
1413hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1414 struct domain_device *device,
1415 int abort_flag, int tag)
1416{
1417 struct sas_task *task;
1418 struct hisi_sas_device *sas_dev = device->lldd_dev;
11b75249 1419 struct device *dev = hisi_hba->dev;
441c2740 1420 int res;
441c2740
JG
1421
1422 if (!hisi_hba->hw->prep_abort)
1423 return -EOPNOTSUPP;
1424
1425 task = sas_alloc_slow_task(GFP_KERNEL);
1426 if (!task)
1427 return -ENOMEM;
1428
1429 task->dev = device;
1430 task->task_proto = device->tproto;
1431 task->task_done = hisi_sas_task_done;
1432 task->slow_task->timer.data = (unsigned long)task;
1433 task->slow_task->timer.function = hisi_sas_tmf_timedout;
0844a3ff 1434 task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110);
441c2740
JG
1435 add_timer(&task->slow_task->timer);
1436
441c2740
JG
1437 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1438 task, abort_flag, tag);
441c2740
JG
1439 if (res) {
1440 del_timer(&task->slow_task->timer);
1441 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1442 res);
1443 goto exit;
1444 }
1445 wait_for_completion(&task->slow_task->completion);
1446 res = TMF_RESP_FUNC_FAILED;
1447
f64a6988
XC
1448 /* Internal abort timed out */
1449 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1450 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1451 struct hisi_sas_slot *slot = task->lldd_task;
1452
1453 if (slot)
1454 slot->task = NULL;
1455 dev_err(dev, "internal task abort: timeout.\n");
5b9a3e85 1456 goto exit;
f64a6988
XC
1457 }
1458 }
1459
441c2740
JG
1460 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1461 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1462 res = TMF_RESP_FUNC_COMPLETE;
1463 goto exit;
1464 }
1465
c35279f2
JG
1466 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1467 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1468 res = TMF_RESP_FUNC_SUCC;
1469 goto exit;
1470 }
1471
441c2740 1472exit:
297d7302 1473 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
441c2740
JG
1474 "resp: 0x%x sts 0x%x\n",
1475 SAS_ADDR(device->sas_addr),
1476 task,
1477 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1478 task->task_status.stat);
1479 sas_free_task(task);
1480
1481 return res;
1482}
1483
184a4635
JG
1484static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1485{
1486 hisi_sas_port_notify_formed(sas_phy);
1487}
1488
184a4635
JG
1489static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1490{
1491 phy->phy_attached = 0;
1492 phy->phy_type = 0;
1493 phy->port = NULL;
1494}
1495
1496void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1497{
1498 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1499 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1500 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1501
1502 if (rdy) {
1503 /* Phy down but ready */
1504 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1505 hisi_sas_port_notify_formed(sas_phy);
1506 } else {
1507 struct hisi_sas_port *port = phy->port;
1508
1509 /* Phy down and not ready */
1510 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1511 sas_phy_disconnected(sas_phy);
1512
1513 if (port) {
1514 if (phy->phy_type & PORT_TYPE_SAS) {
1515 int port_id = port->id;
1516
1517 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1518 port_id))
1519 port->port_attached = 0;
1520 } else if (phy->phy_type & PORT_TYPE_SATA)
1521 port->port_attached = 0;
1522 }
1523 hisi_sas_phy_disconnected(phy);
1524 }
1525}
1526EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1527
06ec0fb9 1528
e21fe3a5
JG
1529struct scsi_transport_template *hisi_sas_stt;
1530EXPORT_SYMBOL_GPL(hisi_sas_stt);
e8899fad 1531
e21fe3a5 1532static struct scsi_host_template _hisi_sas_sht = {
7eb7869f
JG
1533 .module = THIS_MODULE,
1534 .name = DRV_NAME,
1535 .queuecommand = sas_queuecommand,
1536 .target_alloc = sas_target_alloc,
31eec8a6 1537 .slave_configure = hisi_sas_slave_configure,
701f75ec
JG
1538 .scan_finished = hisi_sas_scan_finished,
1539 .scan_start = hisi_sas_scan_start,
7eb7869f
JG
1540 .change_queue_depth = sas_change_queue_depth,
1541 .bios_param = sas_bios_param,
1542 .can_queue = 1,
1543 .this_id = -1,
1544 .sg_tablesize = SG_ALL,
1545 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
1546 .use_clustering = ENABLE_CLUSTERING,
1547 .eh_device_reset_handler = sas_eh_device_reset_handler,
1548 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
1549 .target_destroy = sas_target_destroy,
1550 .ioctl = sas_ioctl,
1551};
e21fe3a5
JG
1552struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
1553EXPORT_SYMBOL_GPL(hisi_sas_sht);
7eb7869f 1554
e8899fad 1555static struct sas_domain_function_template hisi_sas_transport_ops = {
abda97c2
JG
1556 .lldd_dev_found = hisi_sas_dev_found,
1557 .lldd_dev_gone = hisi_sas_dev_gone,
42e7a693 1558 .lldd_execute_task = hisi_sas_queue_command,
e4189d53 1559 .lldd_control_phy = hisi_sas_control_phy,
0efff300
JG
1560 .lldd_abort_task = hisi_sas_abort_task,
1561 .lldd_abort_task_set = hisi_sas_abort_task_set,
1562 .lldd_clear_aca = hisi_sas_clear_aca,
1563 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
1564 .lldd_lu_reset = hisi_sas_lu_reset,
1565 .lldd_query_task = hisi_sas_query_task,
8b05ad6a 1566 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
184a4635 1567 .lldd_port_formed = hisi_sas_port_formed,
e8899fad
JG
1568};
1569
06ec0fb9
XC
1570void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
1571{
1572 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1573
1574 for (i = 0; i < hisi_hba->queue_count; i++) {
1575 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1576 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1577
1578 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1579 memset(hisi_hba->cmd_hdr[i], 0, s);
1580 dq->wr_point = 0;
1581
1582 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1583 memset(hisi_hba->complete_hdr[i], 0, s);
1584 cq->rd_point = 0;
1585 }
1586
1587 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
1588 memset(hisi_hba->initial_fis, 0, s);
1589
1590 s = max_command_entries * sizeof(struct hisi_sas_iost);
1591 memset(hisi_hba->iost, 0, s);
1592
1593 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1594 memset(hisi_hba->breakpoint, 0, s);
1595
1596 s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
1597 memset(hisi_hba->sata_breakpoint, 0, s);
1598}
1599EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
1600
e21fe3a5 1601int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
6be6de18 1602{
11b75249 1603 struct device *dev = hisi_hba->dev;
a8d547bd 1604 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
6be6de18 1605
fa42d80d 1606 spin_lock_init(&hisi_hba->lock);
976867e6
JG
1607 for (i = 0; i < hisi_hba->n_phy; i++) {
1608 hisi_sas_phy_init(hisi_hba, i);
1609 hisi_hba->port[i].port_attached = 0;
1610 hisi_hba->port[i].id = -1;
976867e6
JG
1611 }
1612
af740dbe
JG
1613 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1614 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
1615 hisi_hba->devices[i].device_id = i;
1616 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
1617 }
1618
6be6de18 1619 for (i = 0; i < hisi_hba->queue_count; i++) {
9101a079 1620 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
4fde02ad 1621 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
9101a079
JG
1622
1623 /* Completion queue structure */
1624 cq->id = i;
1625 cq->hisi_hba = hisi_hba;
1626
4fde02ad
JG
1627 /* Delivery queue structure */
1628 dq->id = i;
1629 dq->hisi_hba = hisi_hba;
1630
6be6de18
JG
1631 /* Delivery queue */
1632 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1633 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1634 &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1635 if (!hisi_hba->cmd_hdr[i])
1636 goto err_out;
6be6de18
JG
1637
1638 /* Completion queue */
1639 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1640 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1641 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1642 if (!hisi_hba->complete_hdr[i])
1643 goto err_out;
6be6de18
JG
1644 }
1645
f557e32c
XT
1646 s = sizeof(struct hisi_sas_slot_buf_table);
1647 hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
1648 if (!hisi_hba->buffer_pool)
6be6de18
JG
1649 goto err_out;
1650
1651 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1652 hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma,
1653 GFP_KERNEL);
1654 if (!hisi_hba->itct)
1655 goto err_out;
1656
1657 memset(hisi_hba->itct, 0, s);
1658
a8d547bd 1659 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
6be6de18
JG
1660 sizeof(struct hisi_sas_slot),
1661 GFP_KERNEL);
1662 if (!hisi_hba->slot_info)
1663 goto err_out;
1664
a8d547bd 1665 s = max_command_entries * sizeof(struct hisi_sas_iost);
6be6de18
JG
1666 hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1667 GFP_KERNEL);
1668 if (!hisi_hba->iost)
1669 goto err_out;
1670
a8d547bd 1671 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
6be6de18
JG
1672 hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1673 &hisi_hba->breakpoint_dma, GFP_KERNEL);
1674 if (!hisi_hba->breakpoint)
1675 goto err_out;
1676
a8d547bd 1677 hisi_hba->slot_index_count = max_command_entries;
433f5696 1678 s = hisi_hba->slot_index_count / BITS_PER_BYTE;
257efd1f
JG
1679 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1680 if (!hisi_hba->slot_index_tags)
1681 goto err_out;
1682
6be6de18
JG
1683 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1684 hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1685 &hisi_hba->initial_fis_dma, GFP_KERNEL);
1686 if (!hisi_hba->initial_fis)
1687 goto err_out;
6be6de18 1688
a8d547bd 1689 s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
6be6de18
JG
1690 hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1691 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1692 if (!hisi_hba->sata_breakpoint)
1693 goto err_out;
06ec0fb9 1694 hisi_sas_init_mem(hisi_hba);
6be6de18 1695
257efd1f
JG
1696 hisi_sas_slot_index_init(hisi_hba);
1697
7e9080e1
JG
1698 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1699 if (!hisi_hba->wq) {
1700 dev_err(dev, "sas_alloc: failed to create workqueue\n");
1701 goto err_out;
1702 }
1703
6be6de18
JG
1704 return 0;
1705err_out:
1706 return -ENOMEM;
1707}
e21fe3a5 1708EXPORT_SYMBOL_GPL(hisi_sas_alloc);
6be6de18 1709
e21fe3a5 1710void hisi_sas_free(struct hisi_hba *hisi_hba)
89d53322 1711{
11b75249 1712 struct device *dev = hisi_hba->dev;
a8d547bd 1713 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
89d53322
JG
1714
1715 for (i = 0; i < hisi_hba->queue_count; i++) {
1716 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1717 if (hisi_hba->cmd_hdr[i])
1718 dma_free_coherent(dev, s,
1719 hisi_hba->cmd_hdr[i],
1720 hisi_hba->cmd_hdr_dma[i]);
1721
1722 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1723 if (hisi_hba->complete_hdr[i])
1724 dma_free_coherent(dev, s,
1725 hisi_hba->complete_hdr[i],
1726 hisi_hba->complete_hdr_dma[i]);
1727 }
1728
f557e32c 1729 dma_pool_destroy(hisi_hba->buffer_pool);
89d53322
JG
1730
1731 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1732 if (hisi_hba->itct)
1733 dma_free_coherent(dev, s,
1734 hisi_hba->itct, hisi_hba->itct_dma);
1735
a8d547bd 1736 s = max_command_entries * sizeof(struct hisi_sas_iost);
89d53322
JG
1737 if (hisi_hba->iost)
1738 dma_free_coherent(dev, s,
1739 hisi_hba->iost, hisi_hba->iost_dma);
1740
a8d547bd 1741 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
89d53322
JG
1742 if (hisi_hba->breakpoint)
1743 dma_free_coherent(dev, s,
1744 hisi_hba->breakpoint,
1745 hisi_hba->breakpoint_dma);
1746
1747
1748 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1749 if (hisi_hba->initial_fis)
1750 dma_free_coherent(dev, s,
1751 hisi_hba->initial_fis,
1752 hisi_hba->initial_fis_dma);
1753
a8d547bd 1754 s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
89d53322
JG
1755 if (hisi_hba->sata_breakpoint)
1756 dma_free_coherent(dev, s,
1757 hisi_hba->sata_breakpoint,
1758 hisi_hba->sata_breakpoint_dma);
1759
7e9080e1
JG
1760 if (hisi_hba->wq)
1761 destroy_workqueue(hisi_hba->wq);
89d53322 1762}
e21fe3a5 1763EXPORT_SYMBOL_GPL(hisi_sas_free);
6be6de18 1764
06ec0fb9
XC
1765static void hisi_sas_rst_work_handler(struct work_struct *work)
1766{
1767 struct hisi_hba *hisi_hba =
1768 container_of(work, struct hisi_hba, rst_work);
1769
1770 hisi_sas_controller_reset(hisi_hba);
1771}
1772
0fa24c19 1773int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
7eb7869f 1774{
0fa24c19
JG
1775 struct device *dev = hisi_hba->dev;
1776 struct platform_device *pdev = hisi_hba->platform_dev;
1777 struct device_node *np = pdev ? pdev->dev.of_node : NULL;
3bc45af8 1778 struct clk *refclk;
7eb7869f 1779
4d558c77 1780 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
0fa24c19
JG
1781 SAS_ADDR_SIZE)) {
1782 dev_err(dev, "could not get property sas-addr\n");
1783 return -ENOENT;
1784 }
e26b2f40 1785
4d558c77 1786 if (np) {
0fa24c19
JG
1787 /*
1788 * These properties are only required for platform device-based
1789 * controller with DT firmware.
1790 */
4d558c77
JG
1791 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
1792 "hisilicon,sas-syscon");
0fa24c19
JG
1793 if (IS_ERR(hisi_hba->ctrl)) {
1794 dev_err(dev, "could not get syscon\n");
1795 return -ENOENT;
1796 }
e26b2f40 1797
4d558c77 1798 if (device_property_read_u32(dev, "ctrl-reset-reg",
0fa24c19
JG
1799 &hisi_hba->ctrl_reset_reg)) {
1800 dev_err(dev,
1801 "could not get property ctrl-reset-reg\n");
1802 return -ENOENT;
1803 }
e26b2f40 1804
4d558c77 1805 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
0fa24c19
JG
1806 &hisi_hba->ctrl_reset_sts_reg)) {
1807 dev_err(dev,
1808 "could not get property ctrl-reset-sts-reg\n");
1809 return -ENOENT;
1810 }
e26b2f40 1811
4d558c77 1812 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
0fa24c19
JG
1813 &hisi_hba->ctrl_clock_ena_reg)) {
1814 dev_err(dev,
1815 "could not get property ctrl-clock-ena-reg\n");
1816 return -ENOENT;
1817 }
4d558c77
JG
1818 }
1819
0fa24c19 1820 refclk = devm_clk_get(dev, NULL);
3bc45af8 1821 if (IS_ERR(refclk))
87e287c1 1822 dev_dbg(dev, "no ref clk property\n");
3bc45af8
JG
1823 else
1824 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
1825
0fa24c19
JG
1826 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
1827 dev_err(dev, "could not get property phy-count\n");
1828 return -ENOENT;
1829 }
e26b2f40 1830
4d558c77 1831 if (device_property_read_u32(dev, "queue-count",
0fa24c19
JG
1832 &hisi_hba->queue_count)) {
1833 dev_err(dev, "could not get property queue-count\n");
1834 return -ENOENT;
1835 }
1836
1837 return 0;
1838}
1839EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
1840
1841static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
1842 const struct hisi_sas_hw *hw)
1843{
1844 struct resource *res;
1845 struct Scsi_Host *shost;
1846 struct hisi_hba *hisi_hba;
1847 struct device *dev = &pdev->dev;
1848
e21fe3a5 1849 shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
0fa24c19
JG
1850 if (!shost) {
1851 dev_err(dev, "scsi host alloc failed\n");
1852 return NULL;
1853 }
1854 hisi_hba = shost_priv(shost);
1855
1856 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
1857 hisi_hba->hw = hw;
1858 hisi_hba->dev = dev;
1859 hisi_hba->platform_dev = pdev;
1860 hisi_hba->shost = shost;
1861 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
1862
1863 init_timer(&hisi_hba->timer);
1864
1865 if (hisi_sas_get_fw_info(hisi_hba) < 0)
e26b2f40
JG
1866 goto err_out;
1867
a6f2c7ff
JG
1868 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
1869 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
1870 dev_err(dev, "No usable DMA addressing method\n");
1871 goto err_out;
1872 }
1873
e26b2f40
JG
1874 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1875 hisi_hba->regs = devm_ioremap_resource(dev, res);
1876 if (IS_ERR(hisi_hba->regs))
1877 goto err_out;
1878
89d53322
JG
1879 if (hisi_sas_alloc(hisi_hba, shost)) {
1880 hisi_sas_free(hisi_hba);
6be6de18 1881 goto err_out;
89d53322 1882 }
6be6de18 1883
7eb7869f
JG
1884 return shost;
1885err_out:
d37a0082 1886 kfree(shost);
7eb7869f
JG
1887 dev_err(dev, "shost alloc failed\n");
1888 return NULL;
1889}
1890
e21fe3a5 1891void hisi_sas_init_add(struct hisi_hba *hisi_hba)
5d74242e
JG
1892{
1893 int i;
1894
1895 for (i = 0; i < hisi_hba->n_phy; i++)
1896 memcpy(&hisi_hba->phy[i].dev_sas_addr,
1897 hisi_hba->sas_addr,
1898 SAS_ADDR_SIZE);
1899}
e21fe3a5 1900EXPORT_SYMBOL_GPL(hisi_sas_init_add);
5d74242e 1901
7eb7869f
JG
1902int hisi_sas_probe(struct platform_device *pdev,
1903 const struct hisi_sas_hw *hw)
1904{
1905 struct Scsi_Host *shost;
1906 struct hisi_hba *hisi_hba;
1907 struct device *dev = &pdev->dev;
1908 struct asd_sas_phy **arr_phy;
1909 struct asd_sas_port **arr_port;
1910 struct sas_ha_struct *sha;
1911 int rc, phy_nr, port_nr, i;
1912
1913 shost = hisi_sas_shost_alloc(pdev, hw);
d37a0082
XT
1914 if (!shost)
1915 return -ENOMEM;
7eb7869f
JG
1916
1917 sha = SHOST_TO_SAS_HA(shost);
1918 hisi_hba = shost_priv(shost);
1919 platform_set_drvdata(pdev, sha);
50cb916f 1920
7eb7869f
JG
1921 phy_nr = port_nr = hisi_hba->n_phy;
1922
1923 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
1924 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
d37a0082
XT
1925 if (!arr_phy || !arr_port) {
1926 rc = -ENOMEM;
1927 goto err_out_ha;
1928 }
7eb7869f
JG
1929
1930 sha->sas_phy = arr_phy;
1931 sha->sas_port = arr_port;
7eb7869f
JG
1932 sha->lldd_ha = hisi_hba;
1933
1934 shost->transportt = hisi_sas_stt;
1935 shost->max_id = HISI_SAS_MAX_DEVICES;
1936 shost->max_lun = ~0;
1937 shost->max_channel = 1;
1938 shost->max_cmd_len = 16;
1939 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
a8d547bd
JG
1940 shost->can_queue = hisi_hba->hw->max_command_entries;
1941 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
7eb7869f
JG
1942
1943 sha->sas_ha_name = DRV_NAME;
11b75249 1944 sha->dev = hisi_hba->dev;
7eb7869f
JG
1945 sha->lldd_module = THIS_MODULE;
1946 sha->sas_addr = &hisi_hba->sas_addr[0];
1947 sha->num_phys = hisi_hba->n_phy;
1948 sha->core.shost = hisi_hba->shost;
1949
1950 for (i = 0; i < hisi_hba->n_phy; i++) {
1951 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
1952 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
1953 }
1954
5d74242e
JG
1955 hisi_sas_init_add(hisi_hba);
1956
7eb7869f
JG
1957 rc = scsi_add_host(shost, &pdev->dev);
1958 if (rc)
1959 goto err_out_ha;
1960
1961 rc = sas_register_ha(sha);
1962 if (rc)
1963 goto err_out_register_ha;
1964
0757f041
XC
1965 rc = hisi_hba->hw->hw_init(hisi_hba);
1966 if (rc)
1967 goto err_out_register_ha;
1968
7eb7869f
JG
1969 scsi_scan_host(shost);
1970
1971 return 0;
1972
1973err_out_register_ha:
1974 scsi_remove_host(shost);
1975err_out_ha:
d37a0082 1976 hisi_sas_free(hisi_hba);
7eb7869f
JG
1977 kfree(shost);
1978 return rc;
1979}
1980EXPORT_SYMBOL_GPL(hisi_sas_probe);
1981
89d53322
JG
1982int hisi_sas_remove(struct platform_device *pdev)
1983{
1984 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
1985 struct hisi_hba *hisi_hba = sha->lldd_ha;
d37a0082 1986 struct Scsi_Host *shost = sha->core.shost;
89d53322 1987
89d53322
JG
1988 sas_unregister_ha(sha);
1989 sas_remove_host(sha->core.shost);
1990
1991 hisi_sas_free(hisi_hba);
d37a0082 1992 kfree(shost);
89d53322
JG
1993 return 0;
1994}
1995EXPORT_SYMBOL_GPL(hisi_sas_remove);
1996
e8899fad
JG
1997static __init int hisi_sas_init(void)
1998{
1999 pr_info("hisi_sas: driver version %s\n", DRV_VERSION);
2000
2001 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2002 if (!hisi_sas_stt)
2003 return -ENOMEM;
2004
2005 return 0;
2006}
2007
2008static __exit void hisi_sas_exit(void)
2009{
2010 sas_release_transport(hisi_sas_stt);
2011}
2012
2013module_init(hisi_sas_init);
2014module_exit(hisi_sas_exit);
2015
2016MODULE_VERSION(DRV_VERSION);
2017MODULE_LICENSE("GPL");
2018MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2019MODULE_DESCRIPTION("HISILICON SAS controller driver");
2020MODULE_ALIAS("platform:" DRV_NAME);