]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/hisi_sas/hisi_sas_main.c
7a0875bb7c919d34edcba7fed837ed0c1e7a4a68
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / hisi_sas / hisi_sas_main.c
1 /*
2 * Copyright (c) 2015 Linaro Ltd.
3 * Copyright (c) 2015 Hisilicon Limited.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 */
11
12 #include "hisi_sas.h"
13 #define DRV_NAME "hisi_sas"
14
15 #define DEV_IS_GONE(dev) \
16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
17
18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19 u8 *lun, struct hisi_sas_tmf_task *tmf);
20 static int
21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22 struct domain_device *device,
23 int abort_flag, int tag);
24 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
25 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
26 void *funcdata);
27 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
28 struct domain_device *device);
29 static void hisi_sas_dev_gone(struct domain_device *device);
30
31 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
32 {
33 switch (fis->command) {
34 case ATA_CMD_FPDMA_WRITE:
35 case ATA_CMD_FPDMA_READ:
36 case ATA_CMD_FPDMA_RECV:
37 case ATA_CMD_FPDMA_SEND:
38 case ATA_CMD_NCQ_NON_DATA:
39 return HISI_SAS_SATA_PROTOCOL_FPDMA;
40
41 case ATA_CMD_DOWNLOAD_MICRO:
42 case ATA_CMD_ID_ATA:
43 case ATA_CMD_PMP_READ:
44 case ATA_CMD_READ_LOG_EXT:
45 case ATA_CMD_PIO_READ:
46 case ATA_CMD_PIO_READ_EXT:
47 case ATA_CMD_PMP_WRITE:
48 case ATA_CMD_WRITE_LOG_EXT:
49 case ATA_CMD_PIO_WRITE:
50 case ATA_CMD_PIO_WRITE_EXT:
51 return HISI_SAS_SATA_PROTOCOL_PIO;
52
53 case ATA_CMD_DSM:
54 case ATA_CMD_DOWNLOAD_MICRO_DMA:
55 case ATA_CMD_PMP_READ_DMA:
56 case ATA_CMD_PMP_WRITE_DMA:
57 case ATA_CMD_READ:
58 case ATA_CMD_READ_EXT:
59 case ATA_CMD_READ_LOG_DMA_EXT:
60 case ATA_CMD_READ_STREAM_DMA_EXT:
61 case ATA_CMD_TRUSTED_RCV_DMA:
62 case ATA_CMD_TRUSTED_SND_DMA:
63 case ATA_CMD_WRITE:
64 case ATA_CMD_WRITE_EXT:
65 case ATA_CMD_WRITE_FUA_EXT:
66 case ATA_CMD_WRITE_QUEUED:
67 case ATA_CMD_WRITE_LOG_DMA_EXT:
68 case ATA_CMD_WRITE_STREAM_DMA_EXT:
69 case ATA_CMD_ZAC_MGMT_IN:
70 return HISI_SAS_SATA_PROTOCOL_DMA;
71
72 case ATA_CMD_CHK_POWER:
73 case ATA_CMD_DEV_RESET:
74 case ATA_CMD_EDD:
75 case ATA_CMD_FLUSH:
76 case ATA_CMD_FLUSH_EXT:
77 case ATA_CMD_VERIFY:
78 case ATA_CMD_VERIFY_EXT:
79 case ATA_CMD_SET_FEATURES:
80 case ATA_CMD_STANDBY:
81 case ATA_CMD_STANDBYNOW1:
82 case ATA_CMD_ZAC_MGMT_OUT:
83 return HISI_SAS_SATA_PROTOCOL_NONDATA;
84 default:
85 {
86 if (fis->command == ATA_CMD_SET_MAX) {
87 switch (fis->features) {
88 case ATA_SET_MAX_PASSWD:
89 case ATA_SET_MAX_LOCK:
90 return HISI_SAS_SATA_PROTOCOL_PIO;
91
92 case ATA_SET_MAX_PASSWD_DMA:
93 case ATA_SET_MAX_UNLOCK_DMA:
94 return HISI_SAS_SATA_PROTOCOL_DMA;
95
96 default:
97 return HISI_SAS_SATA_PROTOCOL_NONDATA;
98 }
99 }
100 if (direction == DMA_NONE)
101 return HISI_SAS_SATA_PROTOCOL_NONDATA;
102 return HISI_SAS_SATA_PROTOCOL_PIO;
103 }
104 }
105 }
106 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
107
108 void hisi_sas_sata_done(struct sas_task *task,
109 struct hisi_sas_slot *slot)
110 {
111 struct task_status_struct *ts = &task->task_status;
112 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
113 struct hisi_sas_status_buffer *status_buf =
114 hisi_sas_status_buf_addr_mem(slot);
115 u8 *iu = &status_buf->iu[0];
116 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
117
118 resp->frame_len = sizeof(struct dev_to_host_fis);
119 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
120
121 ts->buf_valid_size = sizeof(*resp);
122 }
123 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
124
125 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
126 {
127 struct ata_queued_cmd *qc = task->uldd_task;
128
129 if (qc) {
130 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
131 qc->tf.command == ATA_CMD_FPDMA_READ) {
132 *tag = qc->tag;
133 return 1;
134 }
135 }
136 return 0;
137 }
138 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
139
140 /*
141 * This function assumes linkrate mask fits in 8 bits, which it
142 * does for all HW versions supported.
143 */
144 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
145 {
146 u16 rate = 0;
147 int i;
148
149 max -= SAS_LINK_RATE_1_5_GBPS;
150 for (i = 0; i <= max; i++)
151 rate |= 1 << (i * 2);
152 return rate;
153 }
154 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
155
156 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
157 {
158 return device->port->ha->lldd_ha;
159 }
160
161 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
162 {
163 return container_of(sas_port, struct hisi_sas_port, sas_port);
164 }
165 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
166
167 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
168 {
169 int phy_no;
170
171 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
172 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
173 }
174 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
175
176 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
177 {
178 void *bitmap = hisi_hba->slot_index_tags;
179
180 clear_bit(slot_idx, bitmap);
181 }
182
183 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
184 {
185 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
186 }
187
188 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
189 {
190 void *bitmap = hisi_hba->slot_index_tags;
191
192 set_bit(slot_idx, bitmap);
193 }
194
195 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
196 {
197 unsigned int index;
198 void *bitmap = hisi_hba->slot_index_tags;
199
200 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
201 hisi_hba->last_slot_index + 1);
202 if (index >= hisi_hba->slot_index_count) {
203 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
204 0);
205 if (index >= hisi_hba->slot_index_count)
206 return -SAS_QUEUE_FULL;
207 }
208 hisi_sas_slot_index_set(hisi_hba, index);
209 *slot_idx = index;
210 hisi_hba->last_slot_index = index;
211
212 return 0;
213 }
214
215 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
216 {
217 int i;
218
219 for (i = 0; i < hisi_hba->slot_index_count; ++i)
220 hisi_sas_slot_index_clear(hisi_hba, i);
221 }
222
223 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
224 struct hisi_sas_slot *slot)
225 {
226 struct hisi_sas_dq *dq = &hisi_hba->dq[slot->dlvry_queue];
227 unsigned long flags;
228
229 if (task) {
230 struct device *dev = hisi_hba->dev;
231
232 if (!task->lldd_task)
233 return;
234
235 task->lldd_task = NULL;
236
237 if (!sas_protocol_ata(task->task_proto))
238 if (slot->n_elem)
239 dma_unmap_sg(dev, task->scatter,
240 task->num_scatter,
241 task->data_dir);
242 }
243
244 if (slot->buf)
245 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
246
247 spin_lock_irqsave(&dq->lock, flags);
248 list_del_init(&slot->entry);
249 spin_unlock_irqrestore(&dq->lock, flags);
250 slot->buf = NULL;
251 slot->task = NULL;
252 slot->port = NULL;
253 spin_lock_irqsave(&hisi_hba->lock, flags);
254 hisi_sas_slot_index_free(hisi_hba, slot->idx);
255 spin_unlock_irqrestore(&hisi_hba->lock, flags);
256
257 /* slot memory is fully zeroed when it is reused */
258 }
259 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
260
261 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
262 struct hisi_sas_slot *slot)
263 {
264 hisi_hba->hw->prep_smp(hisi_hba, slot);
265 }
266
267 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
268 struct hisi_sas_slot *slot, int is_tmf,
269 struct hisi_sas_tmf_task *tmf)
270 {
271 hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
272 }
273
274 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
275 struct hisi_sas_slot *slot)
276 {
277 hisi_hba->hw->prep_stp(hisi_hba, slot);
278 }
279
280 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
281 struct hisi_sas_slot *slot,
282 int device_id, int abort_flag, int tag_to_abort)
283 {
284 hisi_hba->hw->prep_abort(hisi_hba, slot,
285 device_id, abort_flag, tag_to_abort);
286 }
287
288 /*
289 * This function will issue an abort TMF regardless of whether the
290 * task is in the sdev or not. Then it will do the task complete
291 * cleanup and callbacks.
292 */
293 static void hisi_sas_slot_abort(struct work_struct *work)
294 {
295 struct hisi_sas_slot *abort_slot =
296 container_of(work, struct hisi_sas_slot, abort_slot);
297 struct sas_task *task = abort_slot->task;
298 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
299 struct scsi_cmnd *cmnd = task->uldd_task;
300 struct hisi_sas_tmf_task tmf_task;
301 struct scsi_lun lun;
302 struct device *dev = hisi_hba->dev;
303 int tag = abort_slot->idx;
304
305 if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
306 dev_err(dev, "cannot abort slot for non-ssp task\n");
307 goto out;
308 }
309
310 int_to_scsilun(cmnd->device->lun, &lun);
311 tmf_task.tmf = TMF_ABORT_TASK;
312 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
313
314 hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
315 out:
316 /* Do cleanup for this task */
317 hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
318 if (task->task_done)
319 task->task_done(task);
320 }
321
322 static int hisi_sas_task_prep(struct sas_task *task,
323 struct hisi_sas_dq **dq_pointer,
324 int is_tmf, struct hisi_sas_tmf_task *tmf,
325 int *pass)
326 {
327 struct domain_device *device = task->dev;
328 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
329 struct hisi_sas_device *sas_dev = device->lldd_dev;
330 struct hisi_sas_port *port;
331 struct hisi_sas_slot *slot;
332 struct hisi_sas_cmd_hdr *cmd_hdr_base;
333 struct asd_sas_port *sas_port = device->port;
334 struct device *dev = hisi_hba->dev;
335 int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
336 int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
337 unsigned long flags, flags_dq;
338 struct hisi_sas_dq *dq;
339 int wr_q_index;
340
341 if (!sas_port) {
342 struct task_status_struct *ts = &task->task_status;
343
344 ts->resp = SAS_TASK_UNDELIVERED;
345 ts->stat = SAS_PHY_DOWN;
346 /*
347 * libsas will use dev->port, should
348 * not call task_done for sata
349 */
350 if (device->dev_type != SAS_SATA_DEV)
351 task->task_done(task);
352 return -ECOMM;
353 }
354
355 if (DEV_IS_GONE(sas_dev)) {
356 if (sas_dev)
357 dev_info(dev, "task prep: device %d not ready\n",
358 sas_dev->device_id);
359 else
360 dev_info(dev, "task prep: device %016llx not ready\n",
361 SAS_ADDR(device->sas_addr));
362
363 return -ECOMM;
364 }
365
366 *dq_pointer = dq = sas_dev->dq;
367
368 port = to_hisi_sas_port(sas_port);
369 if (port && !port->port_attached) {
370 dev_info(dev, "task prep: %s port%d not attach device\n",
371 (dev_is_sata(device)) ?
372 "SATA/STP" : "SAS",
373 device->port->id);
374
375 return -ECOMM;
376 }
377
378 if (!sas_protocol_ata(task->task_proto)) {
379 unsigned int req_len, resp_len;
380
381 if (task->num_scatter) {
382 n_elem = dma_map_sg(dev, task->scatter,
383 task->num_scatter, task->data_dir);
384 if (!n_elem) {
385 rc = -ENOMEM;
386 goto prep_out;
387 }
388 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
389 n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
390 1, DMA_TO_DEVICE);
391 if (!n_elem_req) {
392 rc = -ENOMEM;
393 goto prep_out;
394 }
395 req_len = sg_dma_len(&task->smp_task.smp_req);
396 if (req_len & 0x3) {
397 rc = -EINVAL;
398 goto err_out_dma_unmap;
399 }
400 n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
401 1, DMA_FROM_DEVICE);
402 if (!n_elem_resp) {
403 rc = -ENOMEM;
404 goto err_out_dma_unmap;
405 }
406 resp_len = sg_dma_len(&task->smp_task.smp_resp);
407 if (resp_len & 0x3) {
408 rc = -EINVAL;
409 goto err_out_dma_unmap;
410 }
411 }
412 } else
413 n_elem = task->num_scatter;
414
415 if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
416 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
417 n_elem);
418 rc = -EINVAL;
419 goto err_out_dma_unmap;
420 }
421
422 spin_lock_irqsave(&hisi_hba->lock, flags);
423 if (hisi_hba->hw->slot_index_alloc)
424 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
425 device);
426 else
427 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
428 spin_unlock_irqrestore(&hisi_hba->lock, flags);
429 if (rc)
430 goto err_out_dma_unmap;
431
432 slot = &hisi_hba->slot_info[slot_idx];
433 memset(slot, 0, sizeof(struct hisi_sas_slot));
434
435 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
436 GFP_ATOMIC, &slot->buf_dma);
437 if (!slot->buf) {
438 rc = -ENOMEM;
439 goto err_out_tag;
440 }
441
442 spin_lock_irqsave(&dq->lock, flags_dq);
443 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
444 if (wr_q_index < 0) {
445 spin_unlock_irqrestore(&dq->lock, flags_dq);
446 goto err_out_buf;
447 }
448
449 list_add_tail(&slot->delivery, &dq->list);
450 spin_unlock_irqrestore(&dq->lock, flags_dq);
451
452 dlvry_queue = dq->id;
453 dlvry_queue_slot = wr_q_index;
454
455 slot->idx = slot_idx;
456 slot->n_elem = n_elem;
457 slot->dlvry_queue = dlvry_queue;
458 slot->dlvry_queue_slot = dlvry_queue_slot;
459 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
460 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
461 slot->task = task;
462 slot->port = port;
463 if (is_tmf)
464 slot->is_internal = true;
465 task->lldd_task = slot;
466 INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
467
468 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
469 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
470 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
471
472 switch (task->task_proto) {
473 case SAS_PROTOCOL_SMP:
474 hisi_sas_task_prep_smp(hisi_hba, slot);
475 break;
476 case SAS_PROTOCOL_SSP:
477 hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
478 break;
479 case SAS_PROTOCOL_SATA:
480 case SAS_PROTOCOL_STP:
481 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
482 hisi_sas_task_prep_ata(hisi_hba, slot);
483 break;
484 default:
485 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
486 task->task_proto);
487 break;
488 }
489
490 spin_lock_irqsave(&dq->lock, flags);
491 list_add_tail(&slot->entry, &sas_dev->list);
492 spin_unlock_irqrestore(&dq->lock, flags);
493 spin_lock_irqsave(&task->task_state_lock, flags);
494 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
495 spin_unlock_irqrestore(&task->task_state_lock, flags);
496
497 ++(*pass);
498 slot->ready = 1;
499
500 return 0;
501
502 err_out_buf:
503 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
504 slot->buf_dma);
505 err_out_tag:
506 spin_lock_irqsave(&hisi_hba->lock, flags);
507 hisi_sas_slot_index_free(hisi_hba, slot_idx);
508 spin_unlock_irqrestore(&hisi_hba->lock, flags);
509 err_out_dma_unmap:
510 if (!sas_protocol_ata(task->task_proto)) {
511 if (task->num_scatter) {
512 dma_unmap_sg(dev, task->scatter, task->num_scatter,
513 task->data_dir);
514 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
515 if (n_elem_req)
516 dma_unmap_sg(dev, &task->smp_task.smp_req,
517 1, DMA_TO_DEVICE);
518 if (n_elem_resp)
519 dma_unmap_sg(dev, &task->smp_task.smp_resp,
520 1, DMA_FROM_DEVICE);
521 }
522 }
523 prep_out:
524 dev_err(dev, "task prep: failed[%d]!\n", rc);
525 return rc;
526 }
527
528 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
529 int is_tmf, struct hisi_sas_tmf_task *tmf)
530 {
531 u32 rc;
532 u32 pass = 0;
533 unsigned long flags;
534 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
535 struct device *dev = hisi_hba->dev;
536 struct hisi_sas_dq *dq = NULL;
537
538 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
539 return -EINVAL;
540
541 /* protect task_prep and start_delivery sequence */
542 rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
543 if (rc)
544 dev_err(dev, "task exec: failed[%d]!\n", rc);
545
546 if (likely(pass)) {
547 spin_lock_irqsave(&dq->lock, flags);
548 hisi_hba->hw->start_delivery(dq);
549 spin_unlock_irqrestore(&dq->lock, flags);
550 }
551
552 return rc;
553 }
554
555 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
556 {
557 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
558 struct asd_sas_phy *sas_phy = &phy->sas_phy;
559 struct sas_ha_struct *sas_ha;
560
561 if (!phy->phy_attached)
562 return;
563
564 sas_ha = &hisi_hba->sha;
565 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
566
567 if (sas_phy->phy) {
568 struct sas_phy *sphy = sas_phy->phy;
569
570 sphy->negotiated_linkrate = sas_phy->linkrate;
571 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
572 sphy->maximum_linkrate_hw =
573 hisi_hba->hw->phy_get_max_linkrate();
574 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
575 sphy->minimum_linkrate = phy->minimum_linkrate;
576
577 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
578 sphy->maximum_linkrate = phy->maximum_linkrate;
579 }
580
581 if (phy->phy_type & PORT_TYPE_SAS) {
582 struct sas_identify_frame *id;
583
584 id = (struct sas_identify_frame *)phy->frame_rcvd;
585 id->dev_type = phy->identify.device_type;
586 id->initiator_bits = SAS_PROTOCOL_ALL;
587 id->target_bits = phy->identify.target_port_protocols;
588 } else if (phy->phy_type & PORT_TYPE_SATA) {
589 /*Nothing*/
590 }
591
592 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
593 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
594 }
595
596 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
597 {
598 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
599 struct hisi_sas_device *sas_dev = NULL;
600 unsigned long flags;
601 int last = hisi_hba->last_dev_id;
602 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
603 int i;
604
605 spin_lock_irqsave(&hisi_hba->lock, flags);
606 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
607 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
608 int queue = i % hisi_hba->queue_count;
609 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
610
611 hisi_hba->devices[i].device_id = i;
612 sas_dev = &hisi_hba->devices[i];
613 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
614 sas_dev->dev_type = device->dev_type;
615 sas_dev->hisi_hba = hisi_hba;
616 sas_dev->sas_device = device;
617 sas_dev->dq = dq;
618 INIT_LIST_HEAD(&hisi_hba->devices[i].list);
619 break;
620 }
621 i++;
622 }
623 hisi_hba->last_dev_id = i;
624 spin_unlock_irqrestore(&hisi_hba->lock, flags);
625
626 return sas_dev;
627 }
628
629 #define HISI_SAS_SRST_ATA_DISK_CNT 3
630 static int hisi_sas_init_device(struct domain_device *device)
631 {
632 int rc = TMF_RESP_FUNC_COMPLETE;
633 struct scsi_lun lun;
634 struct hisi_sas_tmf_task tmf_task;
635 int retry = HISI_SAS_SRST_ATA_DISK_CNT;
636 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
637
638 switch (device->dev_type) {
639 case SAS_END_DEVICE:
640 int_to_scsilun(0, &lun);
641
642 tmf_task.tmf = TMF_CLEAR_TASK_SET;
643 rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun,
644 &tmf_task);
645 if (rc == TMF_RESP_FUNC_COMPLETE)
646 hisi_sas_release_task(hisi_hba, device);
647 break;
648 case SAS_SATA_DEV:
649 case SAS_SATA_PM:
650 case SAS_SATA_PM_PORT:
651 case SAS_SATA_PENDING:
652 while (retry-- > 0) {
653 rc = hisi_sas_softreset_ata_disk(device);
654 if (!rc)
655 break;
656 }
657 break;
658 default:
659 break;
660 }
661
662 return rc;
663 }
664
665 static int hisi_sas_dev_found(struct domain_device *device)
666 {
667 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
668 struct domain_device *parent_dev = device->parent;
669 struct hisi_sas_device *sas_dev;
670 struct device *dev = hisi_hba->dev;
671 int rc;
672
673 if (hisi_hba->hw->alloc_dev)
674 sas_dev = hisi_hba->hw->alloc_dev(device);
675 else
676 sas_dev = hisi_sas_alloc_dev(device);
677 if (!sas_dev) {
678 dev_err(dev, "fail alloc dev: max support %d devices\n",
679 HISI_SAS_MAX_DEVICES);
680 return -EINVAL;
681 }
682
683 device->lldd_dev = sas_dev;
684 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
685
686 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
687 int phy_no;
688 u8 phy_num = parent_dev->ex_dev.num_phys;
689 struct ex_phy *phy;
690
691 for (phy_no = 0; phy_no < phy_num; phy_no++) {
692 phy = &parent_dev->ex_dev.ex_phy[phy_no];
693 if (SAS_ADDR(phy->attached_sas_addr) ==
694 SAS_ADDR(device->sas_addr)) {
695 sas_dev->attached_phy = phy_no;
696 break;
697 }
698 }
699
700 if (phy_no == phy_num) {
701 dev_info(dev, "dev found: no attached "
702 "dev:%016llx at ex:%016llx\n",
703 SAS_ADDR(device->sas_addr),
704 SAS_ADDR(parent_dev->sas_addr));
705 rc = -EINVAL;
706 goto err_out;
707 }
708 }
709
710 dev_info(dev, "dev[%d:%x] found\n",
711 sas_dev->device_id, sas_dev->dev_type);
712
713 rc = hisi_sas_init_device(device);
714 if (rc)
715 goto err_out;
716 return 0;
717
718 err_out:
719 hisi_sas_dev_gone(device);
720 return rc;
721 }
722
723 int hisi_sas_slave_configure(struct scsi_device *sdev)
724 {
725 struct domain_device *dev = sdev_to_domain_dev(sdev);
726 int ret = sas_slave_configure(sdev);
727
728 if (ret)
729 return ret;
730 if (!dev_is_sata(dev))
731 sas_change_queue_depth(sdev, 64);
732
733 return 0;
734 }
735 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure);
736
737 void hisi_sas_scan_start(struct Scsi_Host *shost)
738 {
739 struct hisi_hba *hisi_hba = shost_priv(shost);
740
741 hisi_hba->hw->phys_init(hisi_hba);
742 }
743 EXPORT_SYMBOL_GPL(hisi_sas_scan_start);
744
745 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
746 {
747 struct hisi_hba *hisi_hba = shost_priv(shost);
748 struct sas_ha_struct *sha = &hisi_hba->sha;
749
750 /* Wait for PHY up interrupt to occur */
751 if (time < HZ)
752 return 0;
753
754 sas_drain_work(sha);
755 return 1;
756 }
757 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
758
759 static void hisi_sas_phyup_work(struct work_struct *work)
760 {
761 struct hisi_sas_phy *phy =
762 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
763 struct hisi_hba *hisi_hba = phy->hisi_hba;
764 struct asd_sas_phy *sas_phy = &phy->sas_phy;
765 int phy_no = sas_phy->id;
766
767 hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
768 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
769 }
770
771 static void hisi_sas_linkreset_work(struct work_struct *work)
772 {
773 struct hisi_sas_phy *phy =
774 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
775 struct asd_sas_phy *sas_phy = &phy->sas_phy;
776
777 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
778 }
779
780 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
781 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
782 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
783 };
784
785 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
786 enum hisi_sas_phy_event event)
787 {
788 struct hisi_hba *hisi_hba = phy->hisi_hba;
789
790 if (WARN_ON(event >= HISI_PHYES_NUM))
791 return false;
792
793 return queue_work(hisi_hba->wq, &phy->works[event]);
794 }
795 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
796
797 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
798 {
799 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
800 struct asd_sas_phy *sas_phy = &phy->sas_phy;
801 int i;
802
803 phy->hisi_hba = hisi_hba;
804 phy->port = NULL;
805 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
806 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
807 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
808 sas_phy->class = SAS;
809 sas_phy->iproto = SAS_PROTOCOL_ALL;
810 sas_phy->tproto = 0;
811 sas_phy->type = PHY_TYPE_PHYSICAL;
812 sas_phy->role = PHY_ROLE_INITIATOR;
813 sas_phy->oob_mode = OOB_NOT_CONNECTED;
814 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
815 sas_phy->id = phy_no;
816 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
817 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
818 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
819 sas_phy->lldd_phy = phy;
820
821 for (i = 0; i < HISI_PHYES_NUM; i++)
822 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
823 }
824
825 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
826 {
827 struct sas_ha_struct *sas_ha = sas_phy->ha;
828 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
829 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
830 struct asd_sas_port *sas_port = sas_phy->port;
831 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
832 unsigned long flags;
833
834 if (!sas_port)
835 return;
836
837 spin_lock_irqsave(&hisi_hba->lock, flags);
838 port->port_attached = 1;
839 port->id = phy->port_id;
840 phy->port = port;
841 sas_port->lldd_port = port;
842 spin_unlock_irqrestore(&hisi_hba->lock, flags);
843 }
844
845 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
846 struct hisi_sas_slot *slot)
847 {
848 if (task) {
849 unsigned long flags;
850 struct task_status_struct *ts;
851
852 ts = &task->task_status;
853
854 ts->resp = SAS_TASK_COMPLETE;
855 ts->stat = SAS_ABORTED_TASK;
856 spin_lock_irqsave(&task->task_state_lock, flags);
857 task->task_state_flags &=
858 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
859 task->task_state_flags |= SAS_TASK_STATE_DONE;
860 spin_unlock_irqrestore(&task->task_state_lock, flags);
861 }
862
863 hisi_sas_slot_task_free(hisi_hba, task, slot);
864 }
865
866 /* hisi_hba.lock should be locked */
867 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
868 struct domain_device *device)
869 {
870 struct hisi_sas_slot *slot, *slot2;
871 struct hisi_sas_device *sas_dev = device->lldd_dev;
872
873 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
874 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
875 }
876
877 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
878 {
879 struct hisi_sas_device *sas_dev;
880 struct domain_device *device;
881 int i;
882
883 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
884 sas_dev = &hisi_hba->devices[i];
885 device = sas_dev->sas_device;
886
887 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
888 !device)
889 continue;
890
891 hisi_sas_release_task(hisi_hba, device);
892 }
893 }
894 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
895
896 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
897 struct domain_device *device)
898 {
899 if (hisi_hba->hw->dereg_device)
900 hisi_hba->hw->dereg_device(hisi_hba, device);
901 }
902
903 static void hisi_sas_dev_gone(struct domain_device *device)
904 {
905 struct hisi_sas_device *sas_dev = device->lldd_dev;
906 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
907 struct device *dev = hisi_hba->dev;
908
909 dev_info(dev, "dev[%d:%x] is gone\n",
910 sas_dev->device_id, sas_dev->dev_type);
911
912 if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
913 hisi_sas_internal_task_abort(hisi_hba, device,
914 HISI_SAS_INT_ABT_DEV, 0);
915
916 hisi_sas_dereg_device(hisi_hba, device);
917
918 hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
919 device->lldd_dev = NULL;
920 }
921
922 if (hisi_hba->hw->free_device)
923 hisi_hba->hw->free_device(sas_dev);
924 sas_dev->dev_type = SAS_PHY_UNUSED;
925 }
926
927 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
928 {
929 return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
930 }
931
932 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
933 void *funcdata)
934 {
935 struct sas_ha_struct *sas_ha = sas_phy->ha;
936 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
937 int phy_no = sas_phy->id;
938
939 switch (func) {
940 case PHY_FUNC_HARD_RESET:
941 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
942 break;
943
944 case PHY_FUNC_LINK_RESET:
945 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
946 msleep(100);
947 hisi_hba->hw->phy_start(hisi_hba, phy_no);
948 break;
949
950 case PHY_FUNC_DISABLE:
951 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
952 break;
953
954 case PHY_FUNC_SET_LINK_RATE:
955 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
956 break;
957 case PHY_FUNC_GET_EVENTS:
958 if (hisi_hba->hw->get_events) {
959 hisi_hba->hw->get_events(hisi_hba, phy_no);
960 break;
961 }
962 /* fallthru */
963 case PHY_FUNC_RELEASE_SPINUP_HOLD:
964 default:
965 return -EOPNOTSUPP;
966 }
967 return 0;
968 }
969
970 static void hisi_sas_task_done(struct sas_task *task)
971 {
972 if (!del_timer(&task->slow_task->timer))
973 return;
974 complete(&task->slow_task->completion);
975 }
976
977 static void hisi_sas_tmf_timedout(struct timer_list *t)
978 {
979 struct sas_task_slow *slow = from_timer(slow, t, timer);
980 struct sas_task *task = slow->task;
981 unsigned long flags;
982
983 spin_lock_irqsave(&task->task_state_lock, flags);
984 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
985 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
986 spin_unlock_irqrestore(&task->task_state_lock, flags);
987
988 complete(&task->slow_task->completion);
989 }
990
991 #define TASK_TIMEOUT 20
992 #define TASK_RETRY 3
993 #define INTERNAL_ABORT_TIMEOUT 6
994 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
995 void *parameter, u32 para_len,
996 struct hisi_sas_tmf_task *tmf)
997 {
998 struct hisi_sas_device *sas_dev = device->lldd_dev;
999 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
1000 struct device *dev = hisi_hba->dev;
1001 struct sas_task *task;
1002 int res, retry;
1003
1004 for (retry = 0; retry < TASK_RETRY; retry++) {
1005 task = sas_alloc_slow_task(GFP_KERNEL);
1006 if (!task)
1007 return -ENOMEM;
1008
1009 task->dev = device;
1010 task->task_proto = device->tproto;
1011
1012 if (dev_is_sata(device)) {
1013 task->ata_task.device_control_reg_update = 1;
1014 memcpy(&task->ata_task.fis, parameter, para_len);
1015 } else {
1016 memcpy(&task->ssp_task, parameter, para_len);
1017 }
1018 task->task_done = hisi_sas_task_done;
1019
1020 task->slow_task->timer.function = hisi_sas_tmf_timedout;
1021 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
1022 add_timer(&task->slow_task->timer);
1023
1024 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
1025
1026 if (res) {
1027 del_timer(&task->slow_task->timer);
1028 dev_err(dev, "abort tmf: executing internal task failed: %d\n",
1029 res);
1030 goto ex_err;
1031 }
1032
1033 wait_for_completion(&task->slow_task->completion);
1034 res = TMF_RESP_FUNC_FAILED;
1035 /* Even TMF timed out, return direct. */
1036 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1037 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1038 struct hisi_sas_slot *slot = task->lldd_task;
1039
1040 dev_err(dev, "abort tmf: TMF task timeout and not done\n");
1041 if (slot)
1042 slot->task = NULL;
1043
1044 goto ex_err;
1045 } else
1046 dev_err(dev, "abort tmf: TMF task timeout\n");
1047 }
1048
1049 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1050 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1051 res = TMF_RESP_FUNC_COMPLETE;
1052 break;
1053 }
1054
1055 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1056 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1057 res = TMF_RESP_FUNC_SUCC;
1058 break;
1059 }
1060
1061 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1062 task->task_status.stat == SAS_DATA_UNDERRUN) {
1063 /* no error, but return the number of bytes of
1064 * underrun
1065 */
1066 dev_warn(dev, "abort tmf: task to dev %016llx "
1067 "resp: 0x%x sts 0x%x underrun\n",
1068 SAS_ADDR(device->sas_addr),
1069 task->task_status.resp,
1070 task->task_status.stat);
1071 res = task->task_status.residual;
1072 break;
1073 }
1074
1075 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1076 task->task_status.stat == SAS_DATA_OVERRUN) {
1077 dev_warn(dev, "abort tmf: blocked task error\n");
1078 res = -EMSGSIZE;
1079 break;
1080 }
1081
1082 dev_warn(dev, "abort tmf: task to dev "
1083 "%016llx resp: 0x%x status 0x%x\n",
1084 SAS_ADDR(device->sas_addr), task->task_status.resp,
1085 task->task_status.stat);
1086 sas_free_task(task);
1087 task = NULL;
1088 }
1089 ex_err:
1090 if (retry == TASK_RETRY)
1091 dev_warn(dev, "abort tmf: executing internal task failed!\n");
1092 sas_free_task(task);
1093 return res;
1094 }
1095
1096 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
1097 bool reset, int pmp, u8 *fis)
1098 {
1099 struct ata_taskfile tf;
1100
1101 ata_tf_init(dev, &tf);
1102 if (reset)
1103 tf.ctl |= ATA_SRST;
1104 else
1105 tf.ctl &= ~ATA_SRST;
1106 tf.command = ATA_CMD_DEV_RESET;
1107 ata_tf_to_fis(&tf, pmp, 0, fis);
1108 }
1109
1110 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
1111 {
1112 u8 fis[20] = {0};
1113 struct ata_port *ap = device->sata_dev.ap;
1114 struct ata_link *link;
1115 int rc = TMF_RESP_FUNC_FAILED;
1116 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1117 struct device *dev = hisi_hba->dev;
1118 int s = sizeof(struct host_to_dev_fis);
1119
1120 ata_for_each_link(link, ap, EDGE) {
1121 int pmp = sata_srst_pmp(link);
1122
1123 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1124 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
1125 if (rc != TMF_RESP_FUNC_COMPLETE)
1126 break;
1127 }
1128
1129 if (rc == TMF_RESP_FUNC_COMPLETE) {
1130 ata_for_each_link(link, ap, EDGE) {
1131 int pmp = sata_srst_pmp(link);
1132
1133 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1134 rc = hisi_sas_exec_internal_tmf_task(device, fis,
1135 s, NULL);
1136 if (rc != TMF_RESP_FUNC_COMPLETE)
1137 dev_err(dev, "ata disk de-reset failed\n");
1138 }
1139 } else {
1140 dev_err(dev, "ata disk reset failed\n");
1141 }
1142
1143 if (rc == TMF_RESP_FUNC_COMPLETE)
1144 hisi_sas_release_task(hisi_hba, device);
1145
1146 return rc;
1147 }
1148
1149 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
1150 u8 *lun, struct hisi_sas_tmf_task *tmf)
1151 {
1152 struct sas_ssp_task ssp_task;
1153
1154 if (!(device->tproto & SAS_PROTOCOL_SSP))
1155 return TMF_RESP_FUNC_ESUPP;
1156
1157 memcpy(ssp_task.LUN, lun, 8);
1158
1159 return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
1160 sizeof(ssp_task), tmf);
1161 }
1162
1163 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1164 {
1165 u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1166 int i;
1167
1168 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1169 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1170 struct domain_device *device = sas_dev->sas_device;
1171 struct asd_sas_port *sas_port;
1172 struct hisi_sas_port *port;
1173 struct hisi_sas_phy *phy = NULL;
1174 struct asd_sas_phy *sas_phy;
1175
1176 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1177 || !device || !device->port)
1178 continue;
1179
1180 sas_port = device->port;
1181 port = to_hisi_sas_port(sas_port);
1182
1183 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1184 if (state & BIT(sas_phy->id)) {
1185 phy = sas_phy->lldd_phy;
1186 break;
1187 }
1188
1189 if (phy) {
1190 port->id = phy->port_id;
1191
1192 /* Update linkrate of directly attached device. */
1193 if (!device->parent)
1194 device->linkrate = phy->sas_phy.linkrate;
1195
1196 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1197 } else
1198 port->id = 0xff;
1199 }
1200 }
1201
1202 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1203 u32 state)
1204 {
1205 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1206 struct asd_sas_port *_sas_port = NULL;
1207 int phy_no;
1208
1209 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1210 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1211 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1212 struct asd_sas_port *sas_port = sas_phy->port;
1213 bool do_port_check = !!(_sas_port != sas_port);
1214
1215 if (!sas_phy->phy->enabled)
1216 continue;
1217
1218 /* Report PHY state change to libsas */
1219 if (state & BIT(phy_no)) {
1220 if (do_port_check && sas_port && sas_port->port_dev) {
1221 struct domain_device *dev = sas_port->port_dev;
1222
1223 _sas_port = sas_port;
1224
1225 if (DEV_IS_EXPANDER(dev->dev_type))
1226 sas_ha->notify_port_event(sas_phy,
1227 PORTE_BROADCAST_RCVD);
1228 }
1229 } else if (old_state & (1 << phy_no))
1230 /* PHY down but was up before */
1231 hisi_sas_phy_down(hisi_hba, phy_no, 0);
1232
1233 }
1234 }
1235
1236 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1237 {
1238 struct device *dev = hisi_hba->dev;
1239 struct Scsi_Host *shost = hisi_hba->shost;
1240 u32 old_state, state;
1241 int rc;
1242
1243 if (!hisi_hba->hw->soft_reset)
1244 return -1;
1245
1246 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1247 return -1;
1248
1249 dev_info(dev, "controller resetting...\n");
1250 old_state = hisi_hba->hw->get_phys_state(hisi_hba);
1251
1252 scsi_block_requests(shost);
1253 if (timer_pending(&hisi_hba->timer))
1254 del_timer_sync(&hisi_hba->timer);
1255
1256 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1257 rc = hisi_hba->hw->soft_reset(hisi_hba);
1258 if (rc) {
1259 dev_warn(dev, "controller reset failed (%d)\n", rc);
1260 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1261 scsi_unblock_requests(shost);
1262 goto out;
1263 }
1264 hisi_sas_release_tasks(hisi_hba);
1265
1266 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1267
1268 /* Init and wait for PHYs to come up and all libsas event finished. */
1269 hisi_hba->hw->phys_init(hisi_hba);
1270 msleep(1000);
1271 hisi_sas_refresh_port_id(hisi_hba);
1272 scsi_unblock_requests(shost);
1273
1274 state = hisi_hba->hw->get_phys_state(hisi_hba);
1275 hisi_sas_rescan_topology(hisi_hba, old_state, state);
1276 dev_info(dev, "controller reset complete\n");
1277
1278 out:
1279 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1280
1281 return rc;
1282 }
1283
1284 static int hisi_sas_abort_task(struct sas_task *task)
1285 {
1286 struct scsi_lun lun;
1287 struct hisi_sas_tmf_task tmf_task;
1288 struct domain_device *device = task->dev;
1289 struct hisi_sas_device *sas_dev = device->lldd_dev;
1290 struct hisi_hba *hisi_hba;
1291 struct device *dev;
1292 int rc = TMF_RESP_FUNC_FAILED;
1293 unsigned long flags;
1294
1295 if (!sas_dev)
1296 return TMF_RESP_FUNC_FAILED;
1297
1298 hisi_hba = dev_to_hisi_hba(task->dev);
1299 dev = hisi_hba->dev;
1300
1301 spin_lock_irqsave(&task->task_state_lock, flags);
1302 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1303 spin_unlock_irqrestore(&task->task_state_lock, flags);
1304 rc = TMF_RESP_FUNC_COMPLETE;
1305 goto out;
1306 }
1307 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1308 spin_unlock_irqrestore(&task->task_state_lock, flags);
1309
1310 sas_dev->dev_status = HISI_SAS_DEV_EH;
1311 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1312 struct scsi_cmnd *cmnd = task->uldd_task;
1313 struct hisi_sas_slot *slot = task->lldd_task;
1314 u32 tag = slot->idx;
1315 int rc2;
1316
1317 int_to_scsilun(cmnd->device->lun, &lun);
1318 tmf_task.tmf = TMF_ABORT_TASK;
1319 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1320
1321 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1322 &tmf_task);
1323
1324 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1325 HISI_SAS_INT_ABT_CMD, tag);
1326 if (rc2 < 0) {
1327 dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1328 return TMF_RESP_FUNC_FAILED;
1329 }
1330
1331 /*
1332 * If the TMF finds that the IO is not in the device and also
1333 * the internal abort does not succeed, then it is safe to
1334 * free the slot.
1335 * Note: if the internal abort succeeds then the slot
1336 * will have already been completed
1337 */
1338 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1339 if (task->lldd_task)
1340 hisi_sas_do_release_task(hisi_hba, task, slot);
1341 }
1342 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1343 task->task_proto & SAS_PROTOCOL_STP) {
1344 if (task->dev->dev_type == SAS_SATA_DEV) {
1345 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1346 HISI_SAS_INT_ABT_DEV, 0);
1347 if (rc < 0) {
1348 dev_err(dev, "abort task: internal abort failed\n");
1349 goto out;
1350 }
1351 hisi_sas_dereg_device(hisi_hba, device);
1352 rc = hisi_sas_softreset_ata_disk(device);
1353 }
1354 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1355 /* SMP */
1356 struct hisi_sas_slot *slot = task->lldd_task;
1357 u32 tag = slot->idx;
1358
1359 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1360 HISI_SAS_INT_ABT_CMD, tag);
1361 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1362 task->lldd_task)
1363 hisi_sas_do_release_task(hisi_hba, task, slot);
1364 }
1365
1366 out:
1367 if (rc != TMF_RESP_FUNC_COMPLETE)
1368 dev_notice(dev, "abort task: rc=%d\n", rc);
1369 return rc;
1370 }
1371
1372 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1373 {
1374 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1375 struct device *dev = hisi_hba->dev;
1376 struct hisi_sas_tmf_task tmf_task;
1377 int rc = TMF_RESP_FUNC_FAILED;
1378
1379 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1380 HISI_SAS_INT_ABT_DEV, 0);
1381 if (rc < 0) {
1382 dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1383 return TMF_RESP_FUNC_FAILED;
1384 }
1385 hisi_sas_dereg_device(hisi_hba, device);
1386
1387 tmf_task.tmf = TMF_ABORT_TASK_SET;
1388 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1389
1390 if (rc == TMF_RESP_FUNC_COMPLETE)
1391 hisi_sas_release_task(hisi_hba, device);
1392
1393 return rc;
1394 }
1395
1396 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1397 {
1398 int rc = TMF_RESP_FUNC_FAILED;
1399 struct hisi_sas_tmf_task tmf_task;
1400
1401 tmf_task.tmf = TMF_CLEAR_ACA;
1402 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1403
1404 return rc;
1405 }
1406
1407 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1408 {
1409 struct sas_phy *phy = sas_get_local_phy(device);
1410 int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1411 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1412 rc = sas_phy_reset(phy, reset_type);
1413 sas_put_local_phy(phy);
1414 msleep(2000);
1415 return rc;
1416 }
1417
1418 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1419 {
1420 struct hisi_sas_device *sas_dev = device->lldd_dev;
1421 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1422 struct device *dev = hisi_hba->dev;
1423 int rc = TMF_RESP_FUNC_FAILED;
1424
1425 if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1426 return TMF_RESP_FUNC_FAILED;
1427 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1428
1429 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1430 HISI_SAS_INT_ABT_DEV, 0);
1431 if (rc < 0) {
1432 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1433 return TMF_RESP_FUNC_FAILED;
1434 }
1435 hisi_sas_dereg_device(hisi_hba, device);
1436
1437 rc = hisi_sas_debug_I_T_nexus_reset(device);
1438
1439 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
1440 hisi_sas_release_task(hisi_hba, device);
1441
1442 return rc;
1443 }
1444
1445 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1446 {
1447 struct hisi_sas_device *sas_dev = device->lldd_dev;
1448 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1449 struct device *dev = hisi_hba->dev;
1450 int rc = TMF_RESP_FUNC_FAILED;
1451
1452 sas_dev->dev_status = HISI_SAS_DEV_EH;
1453 if (dev_is_sata(device)) {
1454 struct sas_phy *phy;
1455
1456 /* Clear internal IO and then hardreset */
1457 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1458 HISI_SAS_INT_ABT_DEV, 0);
1459 if (rc < 0) {
1460 dev_err(dev, "lu_reset: internal abort failed\n");
1461 goto out;
1462 }
1463 hisi_sas_dereg_device(hisi_hba, device);
1464
1465 phy = sas_get_local_phy(device);
1466
1467 rc = sas_phy_reset(phy, 1);
1468
1469 if (rc == 0)
1470 hisi_sas_release_task(hisi_hba, device);
1471 sas_put_local_phy(phy);
1472 } else {
1473 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
1474
1475 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1476 HISI_SAS_INT_ABT_DEV, 0);
1477 if (rc < 0) {
1478 dev_err(dev, "lu_reset: internal abort failed\n");
1479 goto out;
1480 }
1481 hisi_sas_dereg_device(hisi_hba, device);
1482
1483 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1484 if (rc == TMF_RESP_FUNC_COMPLETE)
1485 hisi_sas_release_task(hisi_hba, device);
1486 }
1487 out:
1488 if (rc != TMF_RESP_FUNC_COMPLETE)
1489 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1490 sas_dev->device_id, rc);
1491 return rc;
1492 }
1493
1494 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1495 {
1496 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1497 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1498
1499 queue_work(hisi_hba->wq, &r.work);
1500 wait_for_completion(r.completion);
1501 if (r.done)
1502 return TMF_RESP_FUNC_COMPLETE;
1503
1504 return TMF_RESP_FUNC_FAILED;
1505 }
1506
1507 static int hisi_sas_query_task(struct sas_task *task)
1508 {
1509 struct scsi_lun lun;
1510 struct hisi_sas_tmf_task tmf_task;
1511 int rc = TMF_RESP_FUNC_FAILED;
1512
1513 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1514 struct scsi_cmnd *cmnd = task->uldd_task;
1515 struct domain_device *device = task->dev;
1516 struct hisi_sas_slot *slot = task->lldd_task;
1517 u32 tag = slot->idx;
1518
1519 int_to_scsilun(cmnd->device->lun, &lun);
1520 tmf_task.tmf = TMF_QUERY_TASK;
1521 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1522
1523 rc = hisi_sas_debug_issue_ssp_tmf(device,
1524 lun.scsi_lun,
1525 &tmf_task);
1526 switch (rc) {
1527 /* The task is still in Lun, release it then */
1528 case TMF_RESP_FUNC_SUCC:
1529 /* The task is not in Lun or failed, reset the phy */
1530 case TMF_RESP_FUNC_FAILED:
1531 case TMF_RESP_FUNC_COMPLETE:
1532 break;
1533 default:
1534 rc = TMF_RESP_FUNC_FAILED;
1535 break;
1536 }
1537 }
1538 return rc;
1539 }
1540
1541 static int
1542 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1543 struct sas_task *task, int abort_flag,
1544 int task_tag)
1545 {
1546 struct domain_device *device = task->dev;
1547 struct hisi_sas_device *sas_dev = device->lldd_dev;
1548 struct device *dev = hisi_hba->dev;
1549 struct hisi_sas_port *port;
1550 struct hisi_sas_slot *slot;
1551 struct asd_sas_port *sas_port = device->port;
1552 struct hisi_sas_cmd_hdr *cmd_hdr_base;
1553 struct hisi_sas_dq *dq = sas_dev->dq;
1554 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1555 unsigned long flags, flags_dq = 0;
1556 int wr_q_index;
1557
1558 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1559 return -EINVAL;
1560
1561 if (!device->port)
1562 return -1;
1563
1564 port = to_hisi_sas_port(sas_port);
1565
1566 /* simply get a slot and send abort command */
1567 spin_lock_irqsave(&hisi_hba->lock, flags);
1568 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
1569 if (rc) {
1570 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1571 goto err_out;
1572 }
1573 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1574
1575 slot = &hisi_hba->slot_info[slot_idx];
1576 memset(slot, 0, sizeof(struct hisi_sas_slot));
1577
1578 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
1579 GFP_ATOMIC, &slot->buf_dma);
1580 if (!slot->buf) {
1581 rc = -ENOMEM;
1582 goto err_out_tag;
1583 }
1584
1585 spin_lock_irqsave(&dq->lock, flags_dq);
1586 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1587 if (wr_q_index < 0) {
1588 spin_unlock_irqrestore(&dq->lock, flags_dq);
1589 goto err_out_buf;
1590 }
1591 list_add_tail(&slot->delivery, &dq->list);
1592 spin_unlock_irqrestore(&dq->lock, flags_dq);
1593
1594 dlvry_queue = dq->id;
1595 dlvry_queue_slot = wr_q_index;
1596
1597 slot->idx = slot_idx;
1598 slot->n_elem = n_elem;
1599 slot->dlvry_queue = dlvry_queue;
1600 slot->dlvry_queue_slot = dlvry_queue_slot;
1601 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1602 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1603 slot->task = task;
1604 slot->port = port;
1605 slot->is_internal = true;
1606 task->lldd_task = slot;
1607
1608 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1609 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1610 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
1611
1612 hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1613 abort_flag, task_tag);
1614
1615 spin_lock_irqsave(&task->task_state_lock, flags);
1616 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1617 spin_unlock_irqrestore(&task->task_state_lock, flags);
1618
1619 slot->ready = 1;
1620 /* send abort command to the chip */
1621 spin_lock_irqsave(&dq->lock, flags);
1622 list_add_tail(&slot->entry, &sas_dev->list);
1623 hisi_hba->hw->start_delivery(dq);
1624 spin_unlock_irqrestore(&dq->lock, flags);
1625
1626 return 0;
1627
1628 err_out_buf:
1629 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
1630 slot->buf_dma);
1631 err_out_tag:
1632 spin_lock_irqsave(&hisi_hba->lock, flags);
1633 hisi_sas_slot_index_free(hisi_hba, slot_idx);
1634 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1635 err_out:
1636 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1637
1638 return rc;
1639 }
1640
1641 /**
1642 * hisi_sas_internal_task_abort -- execute an internal
1643 * abort command for single IO command or a device
1644 * @hisi_hba: host controller struct
1645 * @device: domain device
1646 * @abort_flag: mode of operation, device or single IO
1647 * @tag: tag of IO to be aborted (only relevant to single
1648 * IO mode)
1649 */
1650 static int
1651 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1652 struct domain_device *device,
1653 int abort_flag, int tag)
1654 {
1655 struct sas_task *task;
1656 struct hisi_sas_device *sas_dev = device->lldd_dev;
1657 struct device *dev = hisi_hba->dev;
1658 int res;
1659
1660 /*
1661 * The interface is not realized means this HW don't support internal
1662 * abort, or don't need to do internal abort. Then here, we return
1663 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
1664 * the internal abort has been executed and returned CQ.
1665 */
1666 if (!hisi_hba->hw->prep_abort)
1667 return TMF_RESP_FUNC_FAILED;
1668
1669 task = sas_alloc_slow_task(GFP_KERNEL);
1670 if (!task)
1671 return -ENOMEM;
1672
1673 task->dev = device;
1674 task->task_proto = device->tproto;
1675 task->task_done = hisi_sas_task_done;
1676 task->slow_task->timer.function = hisi_sas_tmf_timedout;
1677 task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT*HZ;
1678 add_timer(&task->slow_task->timer);
1679
1680 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1681 task, abort_flag, tag);
1682 if (res) {
1683 del_timer(&task->slow_task->timer);
1684 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1685 res);
1686 goto exit;
1687 }
1688 wait_for_completion(&task->slow_task->completion);
1689 res = TMF_RESP_FUNC_FAILED;
1690
1691 /* Internal abort timed out */
1692 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1693 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1694 struct hisi_sas_slot *slot = task->lldd_task;
1695
1696 if (slot)
1697 slot->task = NULL;
1698 dev_err(dev, "internal task abort: timeout and not done.\n");
1699 res = -EIO;
1700 goto exit;
1701 } else
1702 dev_err(dev, "internal task abort: timeout.\n");
1703 }
1704
1705 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1706 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1707 res = TMF_RESP_FUNC_COMPLETE;
1708 goto exit;
1709 }
1710
1711 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1712 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1713 res = TMF_RESP_FUNC_SUCC;
1714 goto exit;
1715 }
1716
1717 exit:
1718 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
1719 "resp: 0x%x sts 0x%x\n",
1720 SAS_ADDR(device->sas_addr),
1721 task,
1722 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1723 task->task_status.stat);
1724 sas_free_task(task);
1725
1726 return res;
1727 }
1728
1729 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1730 {
1731 hisi_sas_port_notify_formed(sas_phy);
1732 }
1733
1734 static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
1735 {
1736 }
1737
1738 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
1739 u8 reg_index, u8 reg_count, u8 *write_data)
1740 {
1741 struct hisi_hba *hisi_hba = sha->lldd_ha;
1742
1743 if (!hisi_hba->hw->write_gpio)
1744 return -EOPNOTSUPP;
1745
1746 return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
1747 reg_index, reg_count, write_data);
1748 }
1749
1750 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1751 {
1752 phy->phy_attached = 0;
1753 phy->phy_type = 0;
1754 phy->port = NULL;
1755 }
1756
1757 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1758 {
1759 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1760 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1761 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1762
1763 if (rdy) {
1764 /* Phy down but ready */
1765 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1766 hisi_sas_port_notify_formed(sas_phy);
1767 } else {
1768 struct hisi_sas_port *port = phy->port;
1769
1770 /* Phy down and not ready */
1771 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1772 sas_phy_disconnected(sas_phy);
1773
1774 if (port) {
1775 if (phy->phy_type & PORT_TYPE_SAS) {
1776 int port_id = port->id;
1777
1778 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1779 port_id))
1780 port->port_attached = 0;
1781 } else if (phy->phy_type & PORT_TYPE_SATA)
1782 port->port_attached = 0;
1783 }
1784 hisi_sas_phy_disconnected(phy);
1785 }
1786 }
1787 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1788
1789 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
1790 {
1791 int i;
1792
1793 for (i = 0; i < hisi_hba->queue_count; i++) {
1794 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1795
1796 tasklet_kill(&cq->tasklet);
1797 }
1798 }
1799 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
1800
1801 struct scsi_transport_template *hisi_sas_stt;
1802 EXPORT_SYMBOL_GPL(hisi_sas_stt);
1803
1804 struct device_attribute *host_attrs[] = {
1805 &dev_attr_phy_event_threshold,
1806 NULL,
1807 };
1808 EXPORT_SYMBOL_GPL(host_attrs);
1809
1810 static struct sas_domain_function_template hisi_sas_transport_ops = {
1811 .lldd_dev_found = hisi_sas_dev_found,
1812 .lldd_dev_gone = hisi_sas_dev_gone,
1813 .lldd_execute_task = hisi_sas_queue_command,
1814 .lldd_control_phy = hisi_sas_control_phy,
1815 .lldd_abort_task = hisi_sas_abort_task,
1816 .lldd_abort_task_set = hisi_sas_abort_task_set,
1817 .lldd_clear_aca = hisi_sas_clear_aca,
1818 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
1819 .lldd_lu_reset = hisi_sas_lu_reset,
1820 .lldd_query_task = hisi_sas_query_task,
1821 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
1822 .lldd_port_formed = hisi_sas_port_formed,
1823 .lldd_port_deformed = hisi_sas_port_deformed,
1824 .lldd_write_gpio = hisi_sas_write_gpio,
1825 };
1826
1827 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
1828 {
1829 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1830
1831 for (i = 0; i < hisi_hba->queue_count; i++) {
1832 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1833 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1834
1835 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1836 memset(hisi_hba->cmd_hdr[i], 0, s);
1837 dq->wr_point = 0;
1838
1839 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1840 memset(hisi_hba->complete_hdr[i], 0, s);
1841 cq->rd_point = 0;
1842 }
1843
1844 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
1845 memset(hisi_hba->initial_fis, 0, s);
1846
1847 s = max_command_entries * sizeof(struct hisi_sas_iost);
1848 memset(hisi_hba->iost, 0, s);
1849
1850 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1851 memset(hisi_hba->breakpoint, 0, s);
1852
1853 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1854 memset(hisi_hba->sata_breakpoint, 0, s);
1855 }
1856 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
1857
1858 int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1859 {
1860 struct device *dev = hisi_hba->dev;
1861 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1862
1863 spin_lock_init(&hisi_hba->lock);
1864 for (i = 0; i < hisi_hba->n_phy; i++) {
1865 hisi_sas_phy_init(hisi_hba, i);
1866 hisi_hba->port[i].port_attached = 0;
1867 hisi_hba->port[i].id = -1;
1868 }
1869
1870 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1871 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
1872 hisi_hba->devices[i].device_id = i;
1873 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
1874 }
1875
1876 for (i = 0; i < hisi_hba->queue_count; i++) {
1877 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1878 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1879
1880 /* Completion queue structure */
1881 cq->id = i;
1882 cq->hisi_hba = hisi_hba;
1883
1884 /* Delivery queue structure */
1885 spin_lock_init(&dq->lock);
1886 INIT_LIST_HEAD(&dq->list);
1887 dq->id = i;
1888 dq->hisi_hba = hisi_hba;
1889
1890 /* Delivery queue */
1891 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1892 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1893 &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1894 if (!hisi_hba->cmd_hdr[i])
1895 goto err_out;
1896
1897 /* Completion queue */
1898 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1899 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1900 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1901 if (!hisi_hba->complete_hdr[i])
1902 goto err_out;
1903 }
1904
1905 s = sizeof(struct hisi_sas_slot_buf_table);
1906 hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
1907 if (!hisi_hba->buffer_pool)
1908 goto err_out;
1909
1910 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1911 hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma,
1912 GFP_KERNEL);
1913 if (!hisi_hba->itct)
1914 goto err_out;
1915
1916 memset(hisi_hba->itct, 0, s);
1917
1918 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
1919 sizeof(struct hisi_sas_slot),
1920 GFP_KERNEL);
1921 if (!hisi_hba->slot_info)
1922 goto err_out;
1923
1924 s = max_command_entries * sizeof(struct hisi_sas_iost);
1925 hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1926 GFP_KERNEL);
1927 if (!hisi_hba->iost)
1928 goto err_out;
1929
1930 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1931 hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1932 &hisi_hba->breakpoint_dma, GFP_KERNEL);
1933 if (!hisi_hba->breakpoint)
1934 goto err_out;
1935
1936 hisi_hba->slot_index_count = max_command_entries;
1937 s = hisi_hba->slot_index_count / BITS_PER_BYTE;
1938 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1939 if (!hisi_hba->slot_index_tags)
1940 goto err_out;
1941
1942 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1943 hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1944 &hisi_hba->initial_fis_dma, GFP_KERNEL);
1945 if (!hisi_hba->initial_fis)
1946 goto err_out;
1947
1948 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1949 hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1950 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1951 if (!hisi_hba->sata_breakpoint)
1952 goto err_out;
1953 hisi_sas_init_mem(hisi_hba);
1954
1955 hisi_sas_slot_index_init(hisi_hba);
1956
1957 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1958 if (!hisi_hba->wq) {
1959 dev_err(dev, "sas_alloc: failed to create workqueue\n");
1960 goto err_out;
1961 }
1962
1963 return 0;
1964 err_out:
1965 return -ENOMEM;
1966 }
1967 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
1968
1969 void hisi_sas_free(struct hisi_hba *hisi_hba)
1970 {
1971 struct device *dev = hisi_hba->dev;
1972 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1973
1974 for (i = 0; i < hisi_hba->queue_count; i++) {
1975 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1976 if (hisi_hba->cmd_hdr[i])
1977 dma_free_coherent(dev, s,
1978 hisi_hba->cmd_hdr[i],
1979 hisi_hba->cmd_hdr_dma[i]);
1980
1981 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1982 if (hisi_hba->complete_hdr[i])
1983 dma_free_coherent(dev, s,
1984 hisi_hba->complete_hdr[i],
1985 hisi_hba->complete_hdr_dma[i]);
1986 }
1987
1988 dma_pool_destroy(hisi_hba->buffer_pool);
1989
1990 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1991 if (hisi_hba->itct)
1992 dma_free_coherent(dev, s,
1993 hisi_hba->itct, hisi_hba->itct_dma);
1994
1995 s = max_command_entries * sizeof(struct hisi_sas_iost);
1996 if (hisi_hba->iost)
1997 dma_free_coherent(dev, s,
1998 hisi_hba->iost, hisi_hba->iost_dma);
1999
2000 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2001 if (hisi_hba->breakpoint)
2002 dma_free_coherent(dev, s,
2003 hisi_hba->breakpoint,
2004 hisi_hba->breakpoint_dma);
2005
2006
2007 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
2008 if (hisi_hba->initial_fis)
2009 dma_free_coherent(dev, s,
2010 hisi_hba->initial_fis,
2011 hisi_hba->initial_fis_dma);
2012
2013 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
2014 if (hisi_hba->sata_breakpoint)
2015 dma_free_coherent(dev, s,
2016 hisi_hba->sata_breakpoint,
2017 hisi_hba->sata_breakpoint_dma);
2018
2019 if (hisi_hba->wq)
2020 destroy_workqueue(hisi_hba->wq);
2021 }
2022 EXPORT_SYMBOL_GPL(hisi_sas_free);
2023
2024 void hisi_sas_rst_work_handler(struct work_struct *work)
2025 {
2026 struct hisi_hba *hisi_hba =
2027 container_of(work, struct hisi_hba, rst_work);
2028
2029 hisi_sas_controller_reset(hisi_hba);
2030 }
2031 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
2032
2033 void hisi_sas_sync_rst_work_handler(struct work_struct *work)
2034 {
2035 struct hisi_sas_rst *rst =
2036 container_of(work, struct hisi_sas_rst, work);
2037
2038 if (!hisi_sas_controller_reset(rst->hisi_hba))
2039 rst->done = true;
2040 complete(rst->completion);
2041 }
2042 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
2043
2044 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
2045 {
2046 struct device *dev = hisi_hba->dev;
2047 struct platform_device *pdev = hisi_hba->platform_dev;
2048 struct device_node *np = pdev ? pdev->dev.of_node : NULL;
2049 struct clk *refclk;
2050
2051 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
2052 SAS_ADDR_SIZE)) {
2053 dev_err(dev, "could not get property sas-addr\n");
2054 return -ENOENT;
2055 }
2056
2057 if (np) {
2058 /*
2059 * These properties are only required for platform device-based
2060 * controller with DT firmware.
2061 */
2062 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
2063 "hisilicon,sas-syscon");
2064 if (IS_ERR(hisi_hba->ctrl)) {
2065 dev_err(dev, "could not get syscon\n");
2066 return -ENOENT;
2067 }
2068
2069 if (device_property_read_u32(dev, "ctrl-reset-reg",
2070 &hisi_hba->ctrl_reset_reg)) {
2071 dev_err(dev,
2072 "could not get property ctrl-reset-reg\n");
2073 return -ENOENT;
2074 }
2075
2076 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
2077 &hisi_hba->ctrl_reset_sts_reg)) {
2078 dev_err(dev,
2079 "could not get property ctrl-reset-sts-reg\n");
2080 return -ENOENT;
2081 }
2082
2083 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
2084 &hisi_hba->ctrl_clock_ena_reg)) {
2085 dev_err(dev,
2086 "could not get property ctrl-clock-ena-reg\n");
2087 return -ENOENT;
2088 }
2089 }
2090
2091 refclk = devm_clk_get(dev, NULL);
2092 if (IS_ERR(refclk))
2093 dev_dbg(dev, "no ref clk property\n");
2094 else
2095 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2096
2097 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2098 dev_err(dev, "could not get property phy-count\n");
2099 return -ENOENT;
2100 }
2101
2102 if (device_property_read_u32(dev, "queue-count",
2103 &hisi_hba->queue_count)) {
2104 dev_err(dev, "could not get property queue-count\n");
2105 return -ENOENT;
2106 }
2107
2108 return 0;
2109 }
2110 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2111
2112 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2113 const struct hisi_sas_hw *hw)
2114 {
2115 struct resource *res;
2116 struct Scsi_Host *shost;
2117 struct hisi_hba *hisi_hba;
2118 struct device *dev = &pdev->dev;
2119
2120 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
2121 if (!shost) {
2122 dev_err(dev, "scsi host alloc failed\n");
2123 return NULL;
2124 }
2125 hisi_hba = shost_priv(shost);
2126
2127 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2128 hisi_hba->hw = hw;
2129 hisi_hba->dev = dev;
2130 hisi_hba->platform_dev = pdev;
2131 hisi_hba->shost = shost;
2132 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2133
2134 timer_setup(&hisi_hba->timer, NULL, 0);
2135
2136 if (hisi_sas_get_fw_info(hisi_hba) < 0)
2137 goto err_out;
2138
2139 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
2140 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2141 dev_err(dev, "No usable DMA addressing method\n");
2142 goto err_out;
2143 }
2144
2145 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2146 hisi_hba->regs = devm_ioremap_resource(dev, res);
2147 if (IS_ERR(hisi_hba->regs))
2148 goto err_out;
2149
2150 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2151 if (res) {
2152 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2153 if (IS_ERR(hisi_hba->sgpio_regs))
2154 goto err_out;
2155 }
2156
2157 if (hisi_sas_alloc(hisi_hba, shost)) {
2158 hisi_sas_free(hisi_hba);
2159 goto err_out;
2160 }
2161
2162 return shost;
2163 err_out:
2164 scsi_host_put(shost);
2165 dev_err(dev, "shost alloc failed\n");
2166 return NULL;
2167 }
2168
2169 void hisi_sas_init_add(struct hisi_hba *hisi_hba)
2170 {
2171 int i;
2172
2173 for (i = 0; i < hisi_hba->n_phy; i++)
2174 memcpy(&hisi_hba->phy[i].dev_sas_addr,
2175 hisi_hba->sas_addr,
2176 SAS_ADDR_SIZE);
2177 }
2178 EXPORT_SYMBOL_GPL(hisi_sas_init_add);
2179
2180 int hisi_sas_probe(struct platform_device *pdev,
2181 const struct hisi_sas_hw *hw)
2182 {
2183 struct Scsi_Host *shost;
2184 struct hisi_hba *hisi_hba;
2185 struct device *dev = &pdev->dev;
2186 struct asd_sas_phy **arr_phy;
2187 struct asd_sas_port **arr_port;
2188 struct sas_ha_struct *sha;
2189 int rc, phy_nr, port_nr, i;
2190
2191 shost = hisi_sas_shost_alloc(pdev, hw);
2192 if (!shost)
2193 return -ENOMEM;
2194
2195 sha = SHOST_TO_SAS_HA(shost);
2196 hisi_hba = shost_priv(shost);
2197 platform_set_drvdata(pdev, sha);
2198
2199 phy_nr = port_nr = hisi_hba->n_phy;
2200
2201 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2202 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2203 if (!arr_phy || !arr_port) {
2204 rc = -ENOMEM;
2205 goto err_out_ha;
2206 }
2207
2208 sha->sas_phy = arr_phy;
2209 sha->sas_port = arr_port;
2210 sha->lldd_ha = hisi_hba;
2211
2212 shost->transportt = hisi_sas_stt;
2213 shost->max_id = HISI_SAS_MAX_DEVICES;
2214 shost->max_lun = ~0;
2215 shost->max_channel = 1;
2216 shost->max_cmd_len = 16;
2217 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
2218 shost->can_queue = hisi_hba->hw->max_command_entries;
2219 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
2220
2221 sha->sas_ha_name = DRV_NAME;
2222 sha->dev = hisi_hba->dev;
2223 sha->lldd_module = THIS_MODULE;
2224 sha->sas_addr = &hisi_hba->sas_addr[0];
2225 sha->num_phys = hisi_hba->n_phy;
2226 sha->core.shost = hisi_hba->shost;
2227
2228 for (i = 0; i < hisi_hba->n_phy; i++) {
2229 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2230 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2231 }
2232
2233 hisi_sas_init_add(hisi_hba);
2234
2235 rc = scsi_add_host(shost, &pdev->dev);
2236 if (rc)
2237 goto err_out_ha;
2238
2239 rc = sas_register_ha(sha);
2240 if (rc)
2241 goto err_out_register_ha;
2242
2243 rc = hisi_hba->hw->hw_init(hisi_hba);
2244 if (rc)
2245 goto err_out_register_ha;
2246
2247 scsi_scan_host(shost);
2248
2249 return 0;
2250
2251 err_out_register_ha:
2252 scsi_remove_host(shost);
2253 err_out_ha:
2254 hisi_sas_free(hisi_hba);
2255 scsi_host_put(shost);
2256 return rc;
2257 }
2258 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2259
2260 int hisi_sas_remove(struct platform_device *pdev)
2261 {
2262 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2263 struct hisi_hba *hisi_hba = sha->lldd_ha;
2264 struct Scsi_Host *shost = sha->core.shost;
2265
2266 if (timer_pending(&hisi_hba->timer))
2267 del_timer(&hisi_hba->timer);
2268
2269 sas_unregister_ha(sha);
2270 sas_remove_host(sha->core.shost);
2271
2272 hisi_sas_free(hisi_hba);
2273 scsi_host_put(shost);
2274 return 0;
2275 }
2276 EXPORT_SYMBOL_GPL(hisi_sas_remove);
2277
2278 static __init int hisi_sas_init(void)
2279 {
2280 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2281 if (!hisi_sas_stt)
2282 return -ENOMEM;
2283
2284 return 0;
2285 }
2286
2287 static __exit void hisi_sas_exit(void)
2288 {
2289 sas_release_transport(hisi_sas_stt);
2290 }
2291
2292 module_init(hisi_sas_init);
2293 module_exit(hisi_sas_exit);
2294
2295 MODULE_LICENSE("GPL");
2296 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2297 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2298 MODULE_ALIAS("platform:" DRV_NAME);