]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/hisi_sas/hisi_sas_main.c
scsi: hisi_sas: Don't lock DQ for complete task sending
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / hisi_sas / hisi_sas_main.c
1 /*
2 * Copyright (c) 2015 Linaro Ltd.
3 * Copyright (c) 2015 Hisilicon Limited.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 */
11
12 #include "hisi_sas.h"
13 #define DRV_NAME "hisi_sas"
14
15 #define DEV_IS_GONE(dev) \
16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
17
18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19 u8 *lun, struct hisi_sas_tmf_task *tmf);
20 static int
21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22 struct domain_device *device,
23 int abort_flag, int tag);
24 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
25 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
26 void *funcdata);
27
28 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
29 {
30 switch (fis->command) {
31 case ATA_CMD_FPDMA_WRITE:
32 case ATA_CMD_FPDMA_READ:
33 case ATA_CMD_FPDMA_RECV:
34 case ATA_CMD_FPDMA_SEND:
35 case ATA_CMD_NCQ_NON_DATA:
36 return HISI_SAS_SATA_PROTOCOL_FPDMA;
37
38 case ATA_CMD_DOWNLOAD_MICRO:
39 case ATA_CMD_ID_ATA:
40 case ATA_CMD_PMP_READ:
41 case ATA_CMD_READ_LOG_EXT:
42 case ATA_CMD_PIO_READ:
43 case ATA_CMD_PIO_READ_EXT:
44 case ATA_CMD_PMP_WRITE:
45 case ATA_CMD_WRITE_LOG_EXT:
46 case ATA_CMD_PIO_WRITE:
47 case ATA_CMD_PIO_WRITE_EXT:
48 return HISI_SAS_SATA_PROTOCOL_PIO;
49
50 case ATA_CMD_DSM:
51 case ATA_CMD_DOWNLOAD_MICRO_DMA:
52 case ATA_CMD_PMP_READ_DMA:
53 case ATA_CMD_PMP_WRITE_DMA:
54 case ATA_CMD_READ:
55 case ATA_CMD_READ_EXT:
56 case ATA_CMD_READ_LOG_DMA_EXT:
57 case ATA_CMD_READ_STREAM_DMA_EXT:
58 case ATA_CMD_TRUSTED_RCV_DMA:
59 case ATA_CMD_TRUSTED_SND_DMA:
60 case ATA_CMD_WRITE:
61 case ATA_CMD_WRITE_EXT:
62 case ATA_CMD_WRITE_FUA_EXT:
63 case ATA_CMD_WRITE_QUEUED:
64 case ATA_CMD_WRITE_LOG_DMA_EXT:
65 case ATA_CMD_WRITE_STREAM_DMA_EXT:
66 case ATA_CMD_ZAC_MGMT_IN:
67 return HISI_SAS_SATA_PROTOCOL_DMA;
68
69 case ATA_CMD_CHK_POWER:
70 case ATA_CMD_DEV_RESET:
71 case ATA_CMD_EDD:
72 case ATA_CMD_FLUSH:
73 case ATA_CMD_FLUSH_EXT:
74 case ATA_CMD_VERIFY:
75 case ATA_CMD_VERIFY_EXT:
76 case ATA_CMD_SET_FEATURES:
77 case ATA_CMD_STANDBY:
78 case ATA_CMD_STANDBYNOW1:
79 case ATA_CMD_ZAC_MGMT_OUT:
80 return HISI_SAS_SATA_PROTOCOL_NONDATA;
81 default:
82 {
83 if (fis->command == ATA_CMD_SET_MAX) {
84 switch (fis->features) {
85 case ATA_SET_MAX_PASSWD:
86 case ATA_SET_MAX_LOCK:
87 return HISI_SAS_SATA_PROTOCOL_PIO;
88
89 case ATA_SET_MAX_PASSWD_DMA:
90 case ATA_SET_MAX_UNLOCK_DMA:
91 return HISI_SAS_SATA_PROTOCOL_DMA;
92
93 default:
94 return HISI_SAS_SATA_PROTOCOL_NONDATA;
95 }
96 }
97 if (direction == DMA_NONE)
98 return HISI_SAS_SATA_PROTOCOL_NONDATA;
99 return HISI_SAS_SATA_PROTOCOL_PIO;
100 }
101 }
102 }
103 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
104
105 void hisi_sas_sata_done(struct sas_task *task,
106 struct hisi_sas_slot *slot)
107 {
108 struct task_status_struct *ts = &task->task_status;
109 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
110 struct hisi_sas_status_buffer *status_buf =
111 hisi_sas_status_buf_addr_mem(slot);
112 u8 *iu = &status_buf->iu[0];
113 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
114
115 resp->frame_len = sizeof(struct dev_to_host_fis);
116 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
117
118 ts->buf_valid_size = sizeof(*resp);
119 }
120 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
121
122 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
123 {
124 struct ata_queued_cmd *qc = task->uldd_task;
125
126 if (qc) {
127 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
128 qc->tf.command == ATA_CMD_FPDMA_READ) {
129 *tag = qc->tag;
130 return 1;
131 }
132 }
133 return 0;
134 }
135 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
136
137 /*
138 * This function assumes linkrate mask fits in 8 bits, which it
139 * does for all HW versions supported.
140 */
141 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
142 {
143 u16 rate = 0;
144 int i;
145
146 max -= SAS_LINK_RATE_1_5_GBPS;
147 for (i = 0; i <= max; i++)
148 rate |= 1 << (i * 2);
149 return rate;
150 }
151 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
152
153 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
154 {
155 return device->port->ha->lldd_ha;
156 }
157
158 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
159 {
160 return container_of(sas_port, struct hisi_sas_port, sas_port);
161 }
162 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
163
164 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
165 {
166 int phy_no;
167
168 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
169 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
170 }
171 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
172
173 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
174 {
175 void *bitmap = hisi_hba->slot_index_tags;
176
177 clear_bit(slot_idx, bitmap);
178 }
179
180 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
181 {
182 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
183 }
184
185 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
186 {
187 void *bitmap = hisi_hba->slot_index_tags;
188
189 set_bit(slot_idx, bitmap);
190 }
191
192 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
193 {
194 unsigned int index;
195 void *bitmap = hisi_hba->slot_index_tags;
196
197 index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count);
198 if (index >= hisi_hba->slot_index_count)
199 return -SAS_QUEUE_FULL;
200 hisi_sas_slot_index_set(hisi_hba, index);
201 *slot_idx = index;
202 return 0;
203 }
204
205 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
206 {
207 int i;
208
209 for (i = 0; i < hisi_hba->slot_index_count; ++i)
210 hisi_sas_slot_index_clear(hisi_hba, i);
211 }
212
213 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
214 struct hisi_sas_slot *slot)
215 {
216
217 if (task) {
218 struct device *dev = hisi_hba->dev;
219
220 if (!task->lldd_task)
221 return;
222
223 task->lldd_task = NULL;
224
225 if (!sas_protocol_ata(task->task_proto))
226 if (slot->n_elem)
227 dma_unmap_sg(dev, task->scatter,
228 task->num_scatter,
229 task->data_dir);
230 }
231
232 if (slot->buf)
233 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
234
235 list_del_init(&slot->entry);
236 slot->buf = NULL;
237 slot->task = NULL;
238 slot->port = NULL;
239 hisi_sas_slot_index_free(hisi_hba, slot->idx);
240
241 /* slot memory is fully zeroed when it is reused */
242 }
243 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
244
245 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
246 struct hisi_sas_slot *slot)
247 {
248 hisi_hba->hw->prep_smp(hisi_hba, slot);
249 }
250
251 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
252 struct hisi_sas_slot *slot, int is_tmf,
253 struct hisi_sas_tmf_task *tmf)
254 {
255 hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
256 }
257
258 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
259 struct hisi_sas_slot *slot)
260 {
261 hisi_hba->hw->prep_stp(hisi_hba, slot);
262 }
263
264 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
265 struct hisi_sas_slot *slot,
266 int device_id, int abort_flag, int tag_to_abort)
267 {
268 hisi_hba->hw->prep_abort(hisi_hba, slot,
269 device_id, abort_flag, tag_to_abort);
270 }
271
272 /*
273 * This function will issue an abort TMF regardless of whether the
274 * task is in the sdev or not. Then it will do the task complete
275 * cleanup and callbacks.
276 */
277 static void hisi_sas_slot_abort(struct work_struct *work)
278 {
279 struct hisi_sas_slot *abort_slot =
280 container_of(work, struct hisi_sas_slot, abort_slot);
281 struct sas_task *task = abort_slot->task;
282 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
283 struct scsi_cmnd *cmnd = task->uldd_task;
284 struct hisi_sas_tmf_task tmf_task;
285 struct scsi_lun lun;
286 struct device *dev = hisi_hba->dev;
287 int tag = abort_slot->idx;
288 unsigned long flags;
289
290 if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
291 dev_err(dev, "cannot abort slot for non-ssp task\n");
292 goto out;
293 }
294
295 int_to_scsilun(cmnd->device->lun, &lun);
296 tmf_task.tmf = TMF_ABORT_TASK;
297 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
298
299 hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
300 out:
301 /* Do cleanup for this task */
302 spin_lock_irqsave(&hisi_hba->lock, flags);
303 hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
304 spin_unlock_irqrestore(&hisi_hba->lock, flags);
305 if (task->task_done)
306 task->task_done(task);
307 }
308
309 static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq *dq,
310 int is_tmf, struct hisi_sas_tmf_task *tmf,
311 int *pass)
312 {
313 struct hisi_hba *hisi_hba = dq->hisi_hba;
314 struct domain_device *device = task->dev;
315 struct hisi_sas_device *sas_dev = device->lldd_dev;
316 struct hisi_sas_port *port;
317 struct hisi_sas_slot *slot;
318 struct hisi_sas_cmd_hdr *cmd_hdr_base;
319 struct asd_sas_port *sas_port = device->port;
320 struct device *dev = hisi_hba->dev;
321 int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
322 int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
323 unsigned long flags, flags_dq;
324 int wr_q_index;
325
326 if (!sas_port) {
327 struct task_status_struct *ts = &task->task_status;
328
329 ts->resp = SAS_TASK_UNDELIVERED;
330 ts->stat = SAS_PHY_DOWN;
331 /*
332 * libsas will use dev->port, should
333 * not call task_done for sata
334 */
335 if (device->dev_type != SAS_SATA_DEV)
336 task->task_done(task);
337 return -ECOMM;
338 }
339
340 if (DEV_IS_GONE(sas_dev)) {
341 if (sas_dev)
342 dev_info(dev, "task prep: device %d not ready\n",
343 sas_dev->device_id);
344 else
345 dev_info(dev, "task prep: device %016llx not ready\n",
346 SAS_ADDR(device->sas_addr));
347
348 return -ECOMM;
349 }
350
351 port = to_hisi_sas_port(sas_port);
352 if (port && !port->port_attached) {
353 dev_info(dev, "task prep: %s port%d not attach device\n",
354 (dev_is_sata(device)) ?
355 "SATA/STP" : "SAS",
356 device->port->id);
357
358 return -ECOMM;
359 }
360
361 if (!sas_protocol_ata(task->task_proto)) {
362 unsigned int req_len, resp_len;
363
364 if (task->num_scatter) {
365 n_elem = dma_map_sg(dev, task->scatter,
366 task->num_scatter, task->data_dir);
367 if (!n_elem) {
368 rc = -ENOMEM;
369 goto prep_out;
370 }
371 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
372 n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
373 1, DMA_TO_DEVICE);
374 if (!n_elem_req) {
375 rc = -ENOMEM;
376 goto prep_out;
377 }
378 req_len = sg_dma_len(&task->smp_task.smp_req);
379 if (req_len & 0x3) {
380 rc = -EINVAL;
381 goto err_out_dma_unmap;
382 }
383 n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
384 1, DMA_FROM_DEVICE);
385 if (!n_elem_req) {
386 rc = -ENOMEM;
387 goto err_out_dma_unmap;
388 }
389 resp_len = sg_dma_len(&task->smp_task.smp_resp);
390 if (resp_len & 0x3) {
391 rc = -EINVAL;
392 goto err_out_dma_unmap;
393 }
394 }
395 } else
396 n_elem = task->num_scatter;
397
398 if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
399 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
400 n_elem);
401 rc = -EINVAL;
402 goto err_out_dma_unmap;
403 }
404
405 spin_lock_irqsave(&hisi_hba->lock, flags);
406 if (hisi_hba->hw->slot_index_alloc)
407 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
408 device);
409 else
410 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
411 spin_unlock_irqrestore(&hisi_hba->lock, flags);
412 if (rc)
413 goto err_out_dma_unmap;
414
415 slot = &hisi_hba->slot_info[slot_idx];
416 memset(slot, 0, sizeof(struct hisi_sas_slot));
417
418 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
419 GFP_ATOMIC, &slot->buf_dma);
420 if (!slot->buf) {
421 rc = -ENOMEM;
422 goto err_out_tag;
423 }
424
425 spin_lock_irqsave(&dq->lock, flags_dq);
426 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
427 if (wr_q_index < 0) {
428 spin_unlock_irqrestore(&dq->lock, flags_dq);
429 goto err_out_buf;
430 }
431
432 list_add_tail(&slot->delivery, &dq->list);
433 spin_unlock_irqrestore(&dq->lock, flags_dq);
434
435 dlvry_queue = dq->id;
436 dlvry_queue_slot = wr_q_index;
437
438 slot->idx = slot_idx;
439 slot->n_elem = n_elem;
440 slot->dlvry_queue = dlvry_queue;
441 slot->dlvry_queue_slot = dlvry_queue_slot;
442 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
443 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
444 slot->task = task;
445 slot->port = port;
446 if (is_tmf)
447 slot->is_internal = true;
448 task->lldd_task = slot;
449 INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
450
451 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
452 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
453 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
454
455 switch (task->task_proto) {
456 case SAS_PROTOCOL_SMP:
457 hisi_sas_task_prep_smp(hisi_hba, slot);
458 break;
459 case SAS_PROTOCOL_SSP:
460 hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
461 break;
462 case SAS_PROTOCOL_SATA:
463 case SAS_PROTOCOL_STP:
464 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
465 hisi_sas_task_prep_ata(hisi_hba, slot);
466 break;
467 default:
468 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
469 task->task_proto);
470 break;
471 }
472
473 spin_lock_irqsave(&hisi_hba->lock, flags);
474 list_add_tail(&slot->entry, &sas_dev->list);
475 spin_unlock_irqrestore(&hisi_hba->lock, flags);
476 spin_lock_irqsave(&task->task_state_lock, flags);
477 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
478 spin_unlock_irqrestore(&task->task_state_lock, flags);
479
480 ++(*pass);
481 slot->ready = 1;
482
483 return 0;
484
485 err_out_buf:
486 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
487 slot->buf_dma);
488 err_out_tag:
489 spin_lock_irqsave(&hisi_hba->lock, flags);
490 hisi_sas_slot_index_free(hisi_hba, slot_idx);
491 spin_unlock_irqrestore(&hisi_hba->lock, flags);
492 err_out_dma_unmap:
493 if (!sas_protocol_ata(task->task_proto)) {
494 if (task->num_scatter) {
495 dma_unmap_sg(dev, task->scatter, task->num_scatter,
496 task->data_dir);
497 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
498 if (n_elem_req)
499 dma_unmap_sg(dev, &task->smp_task.smp_req,
500 1, DMA_TO_DEVICE);
501 if (n_elem_resp)
502 dma_unmap_sg(dev, &task->smp_task.smp_resp,
503 1, DMA_FROM_DEVICE);
504 }
505 }
506 prep_out:
507 dev_err(dev, "task prep: failed[%d]!\n", rc);
508 return rc;
509 }
510
511 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
512 int is_tmf, struct hisi_sas_tmf_task *tmf)
513 {
514 u32 rc;
515 u32 pass = 0;
516 unsigned long flags;
517 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
518 struct device *dev = hisi_hba->dev;
519 struct domain_device *device = task->dev;
520 struct hisi_sas_device *sas_dev = device->lldd_dev;
521 struct hisi_sas_dq *dq = sas_dev->dq;
522
523 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
524 return -EINVAL;
525
526 /* protect task_prep and start_delivery sequence */
527 rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass);
528 if (rc)
529 dev_err(dev, "task exec: failed[%d]!\n", rc);
530
531 spin_lock_irqsave(&dq->lock, flags);
532 if (likely(pass))
533 hisi_hba->hw->start_delivery(dq);
534 spin_unlock_irqrestore(&dq->lock, flags);
535
536 return rc;
537 }
538
539 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
540 {
541 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
542 struct asd_sas_phy *sas_phy = &phy->sas_phy;
543 struct sas_ha_struct *sas_ha;
544
545 if (!phy->phy_attached)
546 return;
547
548 sas_ha = &hisi_hba->sha;
549 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
550
551 if (sas_phy->phy) {
552 struct sas_phy *sphy = sas_phy->phy;
553
554 sphy->negotiated_linkrate = sas_phy->linkrate;
555 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
556 sphy->maximum_linkrate_hw =
557 hisi_hba->hw->phy_get_max_linkrate();
558 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
559 sphy->minimum_linkrate = phy->minimum_linkrate;
560
561 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
562 sphy->maximum_linkrate = phy->maximum_linkrate;
563 }
564
565 if (phy->phy_type & PORT_TYPE_SAS) {
566 struct sas_identify_frame *id;
567
568 id = (struct sas_identify_frame *)phy->frame_rcvd;
569 id->dev_type = phy->identify.device_type;
570 id->initiator_bits = SAS_PROTOCOL_ALL;
571 id->target_bits = phy->identify.target_port_protocols;
572 } else if (phy->phy_type & PORT_TYPE_SATA) {
573 /*Nothing*/
574 }
575
576 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
577 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
578 }
579
580 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
581 {
582 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
583 struct hisi_sas_device *sas_dev = NULL;
584 unsigned long flags;
585 int i;
586
587 spin_lock_irqsave(&hisi_hba->lock, flags);
588 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
589 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
590 int queue = i % hisi_hba->queue_count;
591 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
592
593 hisi_hba->devices[i].device_id = i;
594 sas_dev = &hisi_hba->devices[i];
595 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
596 sas_dev->dev_type = device->dev_type;
597 sas_dev->hisi_hba = hisi_hba;
598 sas_dev->sas_device = device;
599 sas_dev->dq = dq;
600 INIT_LIST_HEAD(&hisi_hba->devices[i].list);
601 break;
602 }
603 }
604 spin_unlock_irqrestore(&hisi_hba->lock, flags);
605
606 return sas_dev;
607 }
608
609 static int hisi_sas_dev_found(struct domain_device *device)
610 {
611 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
612 struct domain_device *parent_dev = device->parent;
613 struct hisi_sas_device *sas_dev;
614 struct device *dev = hisi_hba->dev;
615
616 if (hisi_hba->hw->alloc_dev)
617 sas_dev = hisi_hba->hw->alloc_dev(device);
618 else
619 sas_dev = hisi_sas_alloc_dev(device);
620 if (!sas_dev) {
621 dev_err(dev, "fail alloc dev: max support %d devices\n",
622 HISI_SAS_MAX_DEVICES);
623 return -EINVAL;
624 }
625
626 device->lldd_dev = sas_dev;
627 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
628
629 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
630 int phy_no;
631 u8 phy_num = parent_dev->ex_dev.num_phys;
632 struct ex_phy *phy;
633
634 for (phy_no = 0; phy_no < phy_num; phy_no++) {
635 phy = &parent_dev->ex_dev.ex_phy[phy_no];
636 if (SAS_ADDR(phy->attached_sas_addr) ==
637 SAS_ADDR(device->sas_addr)) {
638 sas_dev->attached_phy = phy_no;
639 break;
640 }
641 }
642
643 if (phy_no == phy_num) {
644 dev_info(dev, "dev found: no attached "
645 "dev:%016llx at ex:%016llx\n",
646 SAS_ADDR(device->sas_addr),
647 SAS_ADDR(parent_dev->sas_addr));
648 return -EINVAL;
649 }
650 }
651
652 dev_info(dev, "dev[%d:%x] found\n",
653 sas_dev->device_id, sas_dev->dev_type);
654
655 return 0;
656 }
657
658 static int hisi_sas_slave_configure(struct scsi_device *sdev)
659 {
660 struct domain_device *dev = sdev_to_domain_dev(sdev);
661 int ret = sas_slave_configure(sdev);
662
663 if (ret)
664 return ret;
665 if (!dev_is_sata(dev))
666 sas_change_queue_depth(sdev, 64);
667
668 return 0;
669 }
670
671 static void hisi_sas_scan_start(struct Scsi_Host *shost)
672 {
673 struct hisi_hba *hisi_hba = shost_priv(shost);
674
675 hisi_hba->hw->phys_init(hisi_hba);
676 }
677
678 static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
679 {
680 struct hisi_hba *hisi_hba = shost_priv(shost);
681 struct sas_ha_struct *sha = &hisi_hba->sha;
682
683 /* Wait for PHY up interrupt to occur */
684 if (time < HZ)
685 return 0;
686
687 sas_drain_work(sha);
688 return 1;
689 }
690
691 static void hisi_sas_phyup_work(struct work_struct *work)
692 {
693 struct hisi_sas_phy *phy =
694 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
695 struct hisi_hba *hisi_hba = phy->hisi_hba;
696 struct asd_sas_phy *sas_phy = &phy->sas_phy;
697 int phy_no = sas_phy->id;
698
699 hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
700 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
701 }
702
703 static void hisi_sas_linkreset_work(struct work_struct *work)
704 {
705 struct hisi_sas_phy *phy =
706 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
707 struct asd_sas_phy *sas_phy = &phy->sas_phy;
708
709 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
710 }
711
712 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
713 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
714 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
715 };
716
717 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
718 enum hisi_sas_phy_event event)
719 {
720 struct hisi_hba *hisi_hba = phy->hisi_hba;
721
722 if (WARN_ON(event >= HISI_PHYES_NUM))
723 return false;
724
725 return queue_work(hisi_hba->wq, &phy->works[event]);
726 }
727 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
728
729 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
730 {
731 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
732 struct asd_sas_phy *sas_phy = &phy->sas_phy;
733 int i;
734
735 phy->hisi_hba = hisi_hba;
736 phy->port = NULL;
737 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
738 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
739 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
740 sas_phy->class = SAS;
741 sas_phy->iproto = SAS_PROTOCOL_ALL;
742 sas_phy->tproto = 0;
743 sas_phy->type = PHY_TYPE_PHYSICAL;
744 sas_phy->role = PHY_ROLE_INITIATOR;
745 sas_phy->oob_mode = OOB_NOT_CONNECTED;
746 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
747 sas_phy->id = phy_no;
748 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
749 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
750 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
751 sas_phy->lldd_phy = phy;
752
753 for (i = 0; i < HISI_PHYES_NUM; i++)
754 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
755 }
756
757 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
758 {
759 struct sas_ha_struct *sas_ha = sas_phy->ha;
760 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
761 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
762 struct asd_sas_port *sas_port = sas_phy->port;
763 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
764 unsigned long flags;
765
766 if (!sas_port)
767 return;
768
769 spin_lock_irqsave(&hisi_hba->lock, flags);
770 port->port_attached = 1;
771 port->id = phy->port_id;
772 phy->port = port;
773 sas_port->lldd_port = port;
774 spin_unlock_irqrestore(&hisi_hba->lock, flags);
775 }
776
777 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
778 struct hisi_sas_slot *slot)
779 {
780 if (task) {
781 unsigned long flags;
782 struct task_status_struct *ts;
783
784 ts = &task->task_status;
785
786 ts->resp = SAS_TASK_COMPLETE;
787 ts->stat = SAS_ABORTED_TASK;
788 spin_lock_irqsave(&task->task_state_lock, flags);
789 task->task_state_flags &=
790 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
791 task->task_state_flags |= SAS_TASK_STATE_DONE;
792 spin_unlock_irqrestore(&task->task_state_lock, flags);
793 }
794
795 hisi_sas_slot_task_free(hisi_hba, task, slot);
796 }
797
798 /* hisi_hba.lock should be locked */
799 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
800 struct domain_device *device)
801 {
802 struct hisi_sas_slot *slot, *slot2;
803 struct hisi_sas_device *sas_dev = device->lldd_dev;
804
805 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
806 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
807 }
808
809 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
810 {
811 struct hisi_sas_device *sas_dev;
812 struct domain_device *device;
813 int i;
814
815 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
816 sas_dev = &hisi_hba->devices[i];
817 device = sas_dev->sas_device;
818
819 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
820 !device)
821 continue;
822
823 hisi_sas_release_task(hisi_hba, device);
824 }
825 }
826 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
827
828 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
829 struct domain_device *device)
830 {
831 if (hisi_hba->hw->dereg_device)
832 hisi_hba->hw->dereg_device(hisi_hba, device);
833 }
834
835 static void hisi_sas_dev_gone(struct domain_device *device)
836 {
837 struct hisi_sas_device *sas_dev = device->lldd_dev;
838 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
839 struct device *dev = hisi_hba->dev;
840
841 dev_info(dev, "dev[%d:%x] is gone\n",
842 sas_dev->device_id, sas_dev->dev_type);
843
844 if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
845 hisi_sas_internal_task_abort(hisi_hba, device,
846 HISI_SAS_INT_ABT_DEV, 0);
847
848 hisi_sas_dereg_device(hisi_hba, device);
849
850 hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
851 device->lldd_dev = NULL;
852 }
853
854 if (hisi_hba->hw->free_device)
855 hisi_hba->hw->free_device(sas_dev);
856 sas_dev->dev_type = SAS_PHY_UNUSED;
857 }
858
859 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
860 {
861 return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
862 }
863
864 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
865 void *funcdata)
866 {
867 struct sas_ha_struct *sas_ha = sas_phy->ha;
868 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
869 int phy_no = sas_phy->id;
870
871 switch (func) {
872 case PHY_FUNC_HARD_RESET:
873 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
874 break;
875
876 case PHY_FUNC_LINK_RESET:
877 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
878 msleep(100);
879 hisi_hba->hw->phy_start(hisi_hba, phy_no);
880 break;
881
882 case PHY_FUNC_DISABLE:
883 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
884 break;
885
886 case PHY_FUNC_SET_LINK_RATE:
887 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
888 break;
889 case PHY_FUNC_GET_EVENTS:
890 if (hisi_hba->hw->get_events) {
891 hisi_hba->hw->get_events(hisi_hba, phy_no);
892 break;
893 }
894 /* fallthru */
895 case PHY_FUNC_RELEASE_SPINUP_HOLD:
896 default:
897 return -EOPNOTSUPP;
898 }
899 return 0;
900 }
901
902 static void hisi_sas_task_done(struct sas_task *task)
903 {
904 if (!del_timer(&task->slow_task->timer))
905 return;
906 complete(&task->slow_task->completion);
907 }
908
909 static void hisi_sas_tmf_timedout(struct timer_list *t)
910 {
911 struct sas_task_slow *slow = from_timer(slow, t, timer);
912 struct sas_task *task = slow->task;
913 unsigned long flags;
914
915 spin_lock_irqsave(&task->task_state_lock, flags);
916 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
917 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
918 spin_unlock_irqrestore(&task->task_state_lock, flags);
919
920 complete(&task->slow_task->completion);
921 }
922
923 #define TASK_TIMEOUT 20
924 #define TASK_RETRY 3
925 #define INTERNAL_ABORT_TIMEOUT 6
926 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
927 void *parameter, u32 para_len,
928 struct hisi_sas_tmf_task *tmf)
929 {
930 struct hisi_sas_device *sas_dev = device->lldd_dev;
931 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
932 struct device *dev = hisi_hba->dev;
933 struct sas_task *task;
934 int res, retry;
935
936 for (retry = 0; retry < TASK_RETRY; retry++) {
937 task = sas_alloc_slow_task(GFP_KERNEL);
938 if (!task)
939 return -ENOMEM;
940
941 task->dev = device;
942 task->task_proto = device->tproto;
943
944 if (dev_is_sata(device)) {
945 task->ata_task.device_control_reg_update = 1;
946 memcpy(&task->ata_task.fis, parameter, para_len);
947 } else {
948 memcpy(&task->ssp_task, parameter, para_len);
949 }
950 task->task_done = hisi_sas_task_done;
951
952 task->slow_task->timer.function = hisi_sas_tmf_timedout;
953 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
954 add_timer(&task->slow_task->timer);
955
956 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
957
958 if (res) {
959 del_timer(&task->slow_task->timer);
960 dev_err(dev, "abort tmf: executing internal task failed: %d\n",
961 res);
962 goto ex_err;
963 }
964
965 wait_for_completion(&task->slow_task->completion);
966 res = TMF_RESP_FUNC_FAILED;
967 /* Even TMF timed out, return direct. */
968 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
969 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
970 struct hisi_sas_slot *slot = task->lldd_task;
971
972 dev_err(dev, "abort tmf: TMF task timeout and not done\n");
973 if (slot)
974 slot->task = NULL;
975
976 goto ex_err;
977 } else
978 dev_err(dev, "abort tmf: TMF task timeout\n");
979 }
980
981 if (task->task_status.resp == SAS_TASK_COMPLETE &&
982 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
983 res = TMF_RESP_FUNC_COMPLETE;
984 break;
985 }
986
987 if (task->task_status.resp == SAS_TASK_COMPLETE &&
988 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
989 res = TMF_RESP_FUNC_SUCC;
990 break;
991 }
992
993 if (task->task_status.resp == SAS_TASK_COMPLETE &&
994 task->task_status.stat == SAS_DATA_UNDERRUN) {
995 /* no error, but return the number of bytes of
996 * underrun
997 */
998 dev_warn(dev, "abort tmf: task to dev %016llx "
999 "resp: 0x%x sts 0x%x underrun\n",
1000 SAS_ADDR(device->sas_addr),
1001 task->task_status.resp,
1002 task->task_status.stat);
1003 res = task->task_status.residual;
1004 break;
1005 }
1006
1007 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1008 task->task_status.stat == SAS_DATA_OVERRUN) {
1009 dev_warn(dev, "abort tmf: blocked task error\n");
1010 res = -EMSGSIZE;
1011 break;
1012 }
1013
1014 dev_warn(dev, "abort tmf: task to dev "
1015 "%016llx resp: 0x%x status 0x%x\n",
1016 SAS_ADDR(device->sas_addr), task->task_status.resp,
1017 task->task_status.stat);
1018 sas_free_task(task);
1019 task = NULL;
1020 }
1021 ex_err:
1022 if (retry == TASK_RETRY)
1023 dev_warn(dev, "abort tmf: executing internal task failed!\n");
1024 sas_free_task(task);
1025 return res;
1026 }
1027
1028 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
1029 bool reset, int pmp, u8 *fis)
1030 {
1031 struct ata_taskfile tf;
1032
1033 ata_tf_init(dev, &tf);
1034 if (reset)
1035 tf.ctl |= ATA_SRST;
1036 else
1037 tf.ctl &= ~ATA_SRST;
1038 tf.command = ATA_CMD_DEV_RESET;
1039 ata_tf_to_fis(&tf, pmp, 0, fis);
1040 }
1041
1042 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
1043 {
1044 u8 fis[20] = {0};
1045 struct ata_port *ap = device->sata_dev.ap;
1046 struct ata_link *link;
1047 int rc = TMF_RESP_FUNC_FAILED;
1048 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1049 struct device *dev = hisi_hba->dev;
1050 int s = sizeof(struct host_to_dev_fis);
1051 unsigned long flags;
1052
1053 ata_for_each_link(link, ap, EDGE) {
1054 int pmp = sata_srst_pmp(link);
1055
1056 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1057 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
1058 if (rc != TMF_RESP_FUNC_COMPLETE)
1059 break;
1060 }
1061
1062 if (rc == TMF_RESP_FUNC_COMPLETE) {
1063 ata_for_each_link(link, ap, EDGE) {
1064 int pmp = sata_srst_pmp(link);
1065
1066 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1067 rc = hisi_sas_exec_internal_tmf_task(device, fis,
1068 s, NULL);
1069 if (rc != TMF_RESP_FUNC_COMPLETE)
1070 dev_err(dev, "ata disk de-reset failed\n");
1071 }
1072 } else {
1073 dev_err(dev, "ata disk reset failed\n");
1074 }
1075
1076 if (rc == TMF_RESP_FUNC_COMPLETE) {
1077 spin_lock_irqsave(&hisi_hba->lock, flags);
1078 hisi_sas_release_task(hisi_hba, device);
1079 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1080 }
1081
1082 return rc;
1083 }
1084
1085 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
1086 u8 *lun, struct hisi_sas_tmf_task *tmf)
1087 {
1088 struct sas_ssp_task ssp_task;
1089
1090 if (!(device->tproto & SAS_PROTOCOL_SSP))
1091 return TMF_RESP_FUNC_ESUPP;
1092
1093 memcpy(ssp_task.LUN, lun, 8);
1094
1095 return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
1096 sizeof(ssp_task), tmf);
1097 }
1098
1099 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1100 {
1101 u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1102 int i;
1103
1104 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1105 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1106 struct domain_device *device = sas_dev->sas_device;
1107 struct asd_sas_port *sas_port;
1108 struct hisi_sas_port *port;
1109 struct hisi_sas_phy *phy = NULL;
1110 struct asd_sas_phy *sas_phy;
1111
1112 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1113 || !device || !device->port)
1114 continue;
1115
1116 sas_port = device->port;
1117 port = to_hisi_sas_port(sas_port);
1118
1119 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1120 if (state & BIT(sas_phy->id)) {
1121 phy = sas_phy->lldd_phy;
1122 break;
1123 }
1124
1125 if (phy) {
1126 port->id = phy->port_id;
1127
1128 /* Update linkrate of directly attached device. */
1129 if (!device->parent)
1130 device->linkrate = phy->sas_phy.linkrate;
1131
1132 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1133 } else
1134 port->id = 0xff;
1135 }
1136 }
1137
1138 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1139 u32 state)
1140 {
1141 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1142 struct asd_sas_port *_sas_port = NULL;
1143 int phy_no;
1144
1145 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1146 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1147 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1148 struct asd_sas_port *sas_port = sas_phy->port;
1149 bool do_port_check = !!(_sas_port != sas_port);
1150
1151 if (!sas_phy->phy->enabled)
1152 continue;
1153
1154 /* Report PHY state change to libsas */
1155 if (state & BIT(phy_no)) {
1156 if (do_port_check && sas_port && sas_port->port_dev) {
1157 struct domain_device *dev = sas_port->port_dev;
1158
1159 _sas_port = sas_port;
1160
1161 if (DEV_IS_EXPANDER(dev->dev_type))
1162 sas_ha->notify_port_event(sas_phy,
1163 PORTE_BROADCAST_RCVD);
1164 }
1165 } else if (old_state & (1 << phy_no))
1166 /* PHY down but was up before */
1167 hisi_sas_phy_down(hisi_hba, phy_no, 0);
1168
1169 }
1170 }
1171
1172 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1173 {
1174 struct device *dev = hisi_hba->dev;
1175 struct Scsi_Host *shost = hisi_hba->shost;
1176 u32 old_state, state;
1177 unsigned long flags;
1178 int rc;
1179
1180 if (!hisi_hba->hw->soft_reset)
1181 return -1;
1182
1183 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1184 return -1;
1185
1186 dev_info(dev, "controller resetting...\n");
1187 old_state = hisi_hba->hw->get_phys_state(hisi_hba);
1188
1189 scsi_block_requests(shost);
1190 if (timer_pending(&hisi_hba->timer))
1191 del_timer_sync(&hisi_hba->timer);
1192
1193 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1194 rc = hisi_hba->hw->soft_reset(hisi_hba);
1195 if (rc) {
1196 dev_warn(dev, "controller reset failed (%d)\n", rc);
1197 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1198 scsi_unblock_requests(shost);
1199 goto out;
1200 }
1201 spin_lock_irqsave(&hisi_hba->lock, flags);
1202 hisi_sas_release_tasks(hisi_hba);
1203 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1204
1205 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1206
1207 /* Init and wait for PHYs to come up and all libsas event finished. */
1208 hisi_hba->hw->phys_init(hisi_hba);
1209 msleep(1000);
1210 hisi_sas_refresh_port_id(hisi_hba);
1211 scsi_unblock_requests(shost);
1212
1213 state = hisi_hba->hw->get_phys_state(hisi_hba);
1214 hisi_sas_rescan_topology(hisi_hba, old_state, state);
1215 dev_info(dev, "controller reset complete\n");
1216
1217 out:
1218 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1219
1220 return rc;
1221 }
1222
1223 static int hisi_sas_abort_task(struct sas_task *task)
1224 {
1225 struct scsi_lun lun;
1226 struct hisi_sas_tmf_task tmf_task;
1227 struct domain_device *device = task->dev;
1228 struct hisi_sas_device *sas_dev = device->lldd_dev;
1229 struct hisi_hba *hisi_hba;
1230 struct device *dev;
1231 int rc = TMF_RESP_FUNC_FAILED;
1232 unsigned long flags;
1233
1234 if (!sas_dev)
1235 return TMF_RESP_FUNC_FAILED;
1236
1237 hisi_hba = dev_to_hisi_hba(task->dev);
1238 dev = hisi_hba->dev;
1239
1240 spin_lock_irqsave(&task->task_state_lock, flags);
1241 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1242 spin_unlock_irqrestore(&task->task_state_lock, flags);
1243 rc = TMF_RESP_FUNC_COMPLETE;
1244 goto out;
1245 }
1246 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1247 spin_unlock_irqrestore(&task->task_state_lock, flags);
1248
1249 sas_dev->dev_status = HISI_SAS_DEV_EH;
1250 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1251 struct scsi_cmnd *cmnd = task->uldd_task;
1252 struct hisi_sas_slot *slot = task->lldd_task;
1253 u32 tag = slot->idx;
1254 int rc2;
1255
1256 int_to_scsilun(cmnd->device->lun, &lun);
1257 tmf_task.tmf = TMF_ABORT_TASK;
1258 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1259
1260 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1261 &tmf_task);
1262
1263 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1264 HISI_SAS_INT_ABT_CMD, tag);
1265 if (rc2 < 0) {
1266 dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1267 return TMF_RESP_FUNC_FAILED;
1268 }
1269
1270 /*
1271 * If the TMF finds that the IO is not in the device and also
1272 * the internal abort does not succeed, then it is safe to
1273 * free the slot.
1274 * Note: if the internal abort succeeds then the slot
1275 * will have already been completed
1276 */
1277 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1278 if (task->lldd_task) {
1279 spin_lock_irqsave(&hisi_hba->lock, flags);
1280 hisi_sas_do_release_task(hisi_hba, task, slot);
1281 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1282 }
1283 }
1284 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1285 task->task_proto & SAS_PROTOCOL_STP) {
1286 if (task->dev->dev_type == SAS_SATA_DEV) {
1287 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1288 HISI_SAS_INT_ABT_DEV, 0);
1289 if (rc < 0) {
1290 dev_err(dev, "abort task: internal abort failed\n");
1291 goto out;
1292 }
1293 hisi_sas_dereg_device(hisi_hba, device);
1294 rc = hisi_sas_softreset_ata_disk(device);
1295 }
1296 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1297 /* SMP */
1298 struct hisi_sas_slot *slot = task->lldd_task;
1299 u32 tag = slot->idx;
1300
1301 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1302 HISI_SAS_INT_ABT_CMD, tag);
1303 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1304 task->lldd_task) {
1305 spin_lock_irqsave(&hisi_hba->lock, flags);
1306 hisi_sas_do_release_task(hisi_hba, task, slot);
1307 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1308 }
1309 }
1310
1311 out:
1312 if (rc != TMF_RESP_FUNC_COMPLETE)
1313 dev_notice(dev, "abort task: rc=%d\n", rc);
1314 return rc;
1315 }
1316
1317 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1318 {
1319 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1320 struct device *dev = hisi_hba->dev;
1321 struct hisi_sas_tmf_task tmf_task;
1322 int rc = TMF_RESP_FUNC_FAILED;
1323 unsigned long flags;
1324
1325 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1326 HISI_SAS_INT_ABT_DEV, 0);
1327 if (rc < 0) {
1328 dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1329 return TMF_RESP_FUNC_FAILED;
1330 }
1331 hisi_sas_dereg_device(hisi_hba, device);
1332
1333 tmf_task.tmf = TMF_ABORT_TASK_SET;
1334 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1335
1336 if (rc == TMF_RESP_FUNC_COMPLETE) {
1337 spin_lock_irqsave(&hisi_hba->lock, flags);
1338 hisi_sas_release_task(hisi_hba, device);
1339 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1340 }
1341
1342 return rc;
1343 }
1344
1345 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1346 {
1347 int rc = TMF_RESP_FUNC_FAILED;
1348 struct hisi_sas_tmf_task tmf_task;
1349
1350 tmf_task.tmf = TMF_CLEAR_ACA;
1351 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1352
1353 return rc;
1354 }
1355
1356 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1357 {
1358 struct sas_phy *phy = sas_get_local_phy(device);
1359 int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1360 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1361 rc = sas_phy_reset(phy, reset_type);
1362 sas_put_local_phy(phy);
1363 msleep(2000);
1364 return rc;
1365 }
1366
1367 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1368 {
1369 struct hisi_sas_device *sas_dev = device->lldd_dev;
1370 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1371 struct device *dev = hisi_hba->dev;
1372 int rc = TMF_RESP_FUNC_FAILED;
1373 unsigned long flags;
1374
1375 if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1376 return TMF_RESP_FUNC_FAILED;
1377 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1378
1379 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1380 HISI_SAS_INT_ABT_DEV, 0);
1381 if (rc < 0) {
1382 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1383 return TMF_RESP_FUNC_FAILED;
1384 }
1385 hisi_sas_dereg_device(hisi_hba, device);
1386
1387 rc = hisi_sas_debug_I_T_nexus_reset(device);
1388
1389 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) {
1390 spin_lock_irqsave(&hisi_hba->lock, flags);
1391 hisi_sas_release_task(hisi_hba, device);
1392 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1393 }
1394 return rc;
1395 }
1396
1397 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1398 {
1399 struct hisi_sas_device *sas_dev = device->lldd_dev;
1400 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1401 struct device *dev = hisi_hba->dev;
1402 unsigned long flags;
1403 int rc = TMF_RESP_FUNC_FAILED;
1404
1405 sas_dev->dev_status = HISI_SAS_DEV_EH;
1406 if (dev_is_sata(device)) {
1407 struct sas_phy *phy;
1408
1409 /* Clear internal IO and then hardreset */
1410 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1411 HISI_SAS_INT_ABT_DEV, 0);
1412 if (rc < 0) {
1413 dev_err(dev, "lu_reset: internal abort failed\n");
1414 goto out;
1415 }
1416 hisi_sas_dereg_device(hisi_hba, device);
1417
1418 phy = sas_get_local_phy(device);
1419
1420 rc = sas_phy_reset(phy, 1);
1421
1422 if (rc == 0) {
1423 spin_lock_irqsave(&hisi_hba->lock, flags);
1424 hisi_sas_release_task(hisi_hba, device);
1425 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1426 }
1427 sas_put_local_phy(phy);
1428 } else {
1429 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
1430
1431 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1432 HISI_SAS_INT_ABT_DEV, 0);
1433 if (rc < 0) {
1434 dev_err(dev, "lu_reset: internal abort failed\n");
1435 goto out;
1436 }
1437 hisi_sas_dereg_device(hisi_hba, device);
1438
1439 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1440 if (rc == TMF_RESP_FUNC_COMPLETE) {
1441 spin_lock_irqsave(&hisi_hba->lock, flags);
1442 hisi_sas_release_task(hisi_hba, device);
1443 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1444 }
1445 }
1446 out:
1447 if (rc != TMF_RESP_FUNC_COMPLETE)
1448 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1449 sas_dev->device_id, rc);
1450 return rc;
1451 }
1452
1453 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1454 {
1455 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1456 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1457
1458 queue_work(hisi_hba->wq, &r.work);
1459 wait_for_completion(r.completion);
1460 if (r.done)
1461 return TMF_RESP_FUNC_COMPLETE;
1462
1463 return TMF_RESP_FUNC_FAILED;
1464 }
1465
1466 static int hisi_sas_query_task(struct sas_task *task)
1467 {
1468 struct scsi_lun lun;
1469 struct hisi_sas_tmf_task tmf_task;
1470 int rc = TMF_RESP_FUNC_FAILED;
1471
1472 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1473 struct scsi_cmnd *cmnd = task->uldd_task;
1474 struct domain_device *device = task->dev;
1475 struct hisi_sas_slot *slot = task->lldd_task;
1476 u32 tag = slot->idx;
1477
1478 int_to_scsilun(cmnd->device->lun, &lun);
1479 tmf_task.tmf = TMF_QUERY_TASK;
1480 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1481
1482 rc = hisi_sas_debug_issue_ssp_tmf(device,
1483 lun.scsi_lun,
1484 &tmf_task);
1485 switch (rc) {
1486 /* The task is still in Lun, release it then */
1487 case TMF_RESP_FUNC_SUCC:
1488 /* The task is not in Lun or failed, reset the phy */
1489 case TMF_RESP_FUNC_FAILED:
1490 case TMF_RESP_FUNC_COMPLETE:
1491 break;
1492 default:
1493 rc = TMF_RESP_FUNC_FAILED;
1494 break;
1495 }
1496 }
1497 return rc;
1498 }
1499
1500 static int
1501 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1502 struct sas_task *task, int abort_flag,
1503 int task_tag)
1504 {
1505 struct domain_device *device = task->dev;
1506 struct hisi_sas_device *sas_dev = device->lldd_dev;
1507 struct device *dev = hisi_hba->dev;
1508 struct hisi_sas_port *port;
1509 struct hisi_sas_slot *slot;
1510 struct asd_sas_port *sas_port = device->port;
1511 struct hisi_sas_cmd_hdr *cmd_hdr_base;
1512 struct hisi_sas_dq *dq = sas_dev->dq;
1513 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1514 unsigned long flags, flags_dq = 0;
1515 int wr_q_index;
1516
1517 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1518 return -EINVAL;
1519
1520 if (!device->port)
1521 return -1;
1522
1523 port = to_hisi_sas_port(sas_port);
1524
1525 /* simply get a slot and send abort command */
1526 spin_lock_irqsave(&hisi_hba->lock, flags);
1527 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
1528 if (rc) {
1529 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1530 goto err_out;
1531 }
1532 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1533
1534 slot = &hisi_hba->slot_info[slot_idx];
1535 memset(slot, 0, sizeof(struct hisi_sas_slot));
1536
1537 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
1538 GFP_ATOMIC, &slot->buf_dma);
1539 if (!slot->buf) {
1540 rc = -ENOMEM;
1541 goto err_out_tag;
1542 }
1543
1544 spin_lock_irqsave(&dq->lock, flags_dq);
1545 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1546 if (wr_q_index < 0) {
1547 spin_unlock_irqrestore(&dq->lock, flags_dq);
1548 goto err_out_buf;
1549 }
1550 list_add_tail(&slot->delivery, &dq->list);
1551 spin_unlock_irqrestore(&dq->lock, flags_dq);
1552
1553 dlvry_queue = dq->id;
1554 dlvry_queue_slot = wr_q_index;
1555
1556 slot->idx = slot_idx;
1557 slot->n_elem = n_elem;
1558 slot->dlvry_queue = dlvry_queue;
1559 slot->dlvry_queue_slot = dlvry_queue_slot;
1560 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1561 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1562 slot->task = task;
1563 slot->port = port;
1564 slot->is_internal = true;
1565 task->lldd_task = slot;
1566
1567 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1568 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1569 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
1570
1571 hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1572 abort_flag, task_tag);
1573
1574 spin_lock_irqsave(&task->task_state_lock, flags);
1575 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1576 spin_unlock_irqrestore(&task->task_state_lock, flags);
1577
1578 slot->ready = 1;
1579 /* send abort command to the chip */
1580 spin_lock_irqsave(&dq->lock, flags);
1581 list_add_tail(&slot->entry, &sas_dev->list);
1582 hisi_hba->hw->start_delivery(dq);
1583 spin_unlock_irqrestore(&dq->lock, flags);
1584
1585 return 0;
1586
1587 err_out_buf:
1588 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
1589 slot->buf_dma);
1590 err_out_tag:
1591 spin_lock_irqsave(&hisi_hba->lock, flags);
1592 hisi_sas_slot_index_free(hisi_hba, slot_idx);
1593 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1594 err_out:
1595 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1596
1597 return rc;
1598 }
1599
1600 /**
1601 * hisi_sas_internal_task_abort -- execute an internal
1602 * abort command for single IO command or a device
1603 * @hisi_hba: host controller struct
1604 * @device: domain device
1605 * @abort_flag: mode of operation, device or single IO
1606 * @tag: tag of IO to be aborted (only relevant to single
1607 * IO mode)
1608 */
1609 static int
1610 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1611 struct domain_device *device,
1612 int abort_flag, int tag)
1613 {
1614 struct sas_task *task;
1615 struct hisi_sas_device *sas_dev = device->lldd_dev;
1616 struct device *dev = hisi_hba->dev;
1617 int res;
1618
1619 /*
1620 * The interface is not realized means this HW don't support internal
1621 * abort, or don't need to do internal abort. Then here, we return
1622 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
1623 * the internal abort has been executed and returned CQ.
1624 */
1625 if (!hisi_hba->hw->prep_abort)
1626 return TMF_RESP_FUNC_FAILED;
1627
1628 task = sas_alloc_slow_task(GFP_KERNEL);
1629 if (!task)
1630 return -ENOMEM;
1631
1632 task->dev = device;
1633 task->task_proto = device->tproto;
1634 task->task_done = hisi_sas_task_done;
1635 task->slow_task->timer.function = hisi_sas_tmf_timedout;
1636 task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT*HZ;
1637 add_timer(&task->slow_task->timer);
1638
1639 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1640 task, abort_flag, tag);
1641 if (res) {
1642 del_timer(&task->slow_task->timer);
1643 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1644 res);
1645 goto exit;
1646 }
1647 wait_for_completion(&task->slow_task->completion);
1648 res = TMF_RESP_FUNC_FAILED;
1649
1650 /* Internal abort timed out */
1651 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1652 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1653 struct hisi_sas_slot *slot = task->lldd_task;
1654
1655 if (slot)
1656 slot->task = NULL;
1657 dev_err(dev, "internal task abort: timeout and not done.\n");
1658 res = -EIO;
1659 goto exit;
1660 } else
1661 dev_err(dev, "internal task abort: timeout.\n");
1662 }
1663
1664 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1665 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1666 res = TMF_RESP_FUNC_COMPLETE;
1667 goto exit;
1668 }
1669
1670 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1671 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1672 res = TMF_RESP_FUNC_SUCC;
1673 goto exit;
1674 }
1675
1676 exit:
1677 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
1678 "resp: 0x%x sts 0x%x\n",
1679 SAS_ADDR(device->sas_addr),
1680 task,
1681 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1682 task->task_status.stat);
1683 sas_free_task(task);
1684
1685 return res;
1686 }
1687
1688 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1689 {
1690 hisi_sas_port_notify_formed(sas_phy);
1691 }
1692
1693 static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
1694 {
1695 }
1696
1697 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
1698 u8 reg_index, u8 reg_count, u8 *write_data)
1699 {
1700 struct hisi_hba *hisi_hba = sha->lldd_ha;
1701
1702 if (!hisi_hba->hw->write_gpio)
1703 return -EOPNOTSUPP;
1704
1705 return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
1706 reg_index, reg_count, write_data);
1707 }
1708
1709 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1710 {
1711 phy->phy_attached = 0;
1712 phy->phy_type = 0;
1713 phy->port = NULL;
1714 }
1715
1716 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1717 {
1718 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1719 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1720 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1721
1722 if (rdy) {
1723 /* Phy down but ready */
1724 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1725 hisi_sas_port_notify_formed(sas_phy);
1726 } else {
1727 struct hisi_sas_port *port = phy->port;
1728
1729 /* Phy down and not ready */
1730 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1731 sas_phy_disconnected(sas_phy);
1732
1733 if (port) {
1734 if (phy->phy_type & PORT_TYPE_SAS) {
1735 int port_id = port->id;
1736
1737 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1738 port_id))
1739 port->port_attached = 0;
1740 } else if (phy->phy_type & PORT_TYPE_SATA)
1741 port->port_attached = 0;
1742 }
1743 hisi_sas_phy_disconnected(phy);
1744 }
1745 }
1746 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1747
1748 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
1749 {
1750 int i;
1751
1752 for (i = 0; i < hisi_hba->queue_count; i++) {
1753 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1754
1755 tasklet_kill(&cq->tasklet);
1756 }
1757 }
1758 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
1759
1760 struct scsi_transport_template *hisi_sas_stt;
1761 EXPORT_SYMBOL_GPL(hisi_sas_stt);
1762
1763 static struct device_attribute *host_attrs[] = {
1764 &dev_attr_phy_event_threshold,
1765 NULL,
1766 };
1767
1768 static struct scsi_host_template _hisi_sas_sht = {
1769 .module = THIS_MODULE,
1770 .name = DRV_NAME,
1771 .queuecommand = sas_queuecommand,
1772 .target_alloc = sas_target_alloc,
1773 .slave_configure = hisi_sas_slave_configure,
1774 .scan_finished = hisi_sas_scan_finished,
1775 .scan_start = hisi_sas_scan_start,
1776 .change_queue_depth = sas_change_queue_depth,
1777 .bios_param = sas_bios_param,
1778 .can_queue = 1,
1779 .this_id = -1,
1780 .sg_tablesize = SG_ALL,
1781 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
1782 .use_clustering = ENABLE_CLUSTERING,
1783 .eh_device_reset_handler = sas_eh_device_reset_handler,
1784 .eh_target_reset_handler = sas_eh_target_reset_handler,
1785 .target_destroy = sas_target_destroy,
1786 .ioctl = sas_ioctl,
1787 .shost_attrs = host_attrs,
1788 };
1789 struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
1790 EXPORT_SYMBOL_GPL(hisi_sas_sht);
1791
1792 static struct sas_domain_function_template hisi_sas_transport_ops = {
1793 .lldd_dev_found = hisi_sas_dev_found,
1794 .lldd_dev_gone = hisi_sas_dev_gone,
1795 .lldd_execute_task = hisi_sas_queue_command,
1796 .lldd_control_phy = hisi_sas_control_phy,
1797 .lldd_abort_task = hisi_sas_abort_task,
1798 .lldd_abort_task_set = hisi_sas_abort_task_set,
1799 .lldd_clear_aca = hisi_sas_clear_aca,
1800 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
1801 .lldd_lu_reset = hisi_sas_lu_reset,
1802 .lldd_query_task = hisi_sas_query_task,
1803 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
1804 .lldd_port_formed = hisi_sas_port_formed,
1805 .lldd_port_deformed = hisi_sas_port_deformed,
1806 .lldd_write_gpio = hisi_sas_write_gpio,
1807 };
1808
1809 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
1810 {
1811 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1812
1813 for (i = 0; i < hisi_hba->queue_count; i++) {
1814 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1815 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1816
1817 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1818 memset(hisi_hba->cmd_hdr[i], 0, s);
1819 dq->wr_point = 0;
1820
1821 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1822 memset(hisi_hba->complete_hdr[i], 0, s);
1823 cq->rd_point = 0;
1824 }
1825
1826 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
1827 memset(hisi_hba->initial_fis, 0, s);
1828
1829 s = max_command_entries * sizeof(struct hisi_sas_iost);
1830 memset(hisi_hba->iost, 0, s);
1831
1832 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1833 memset(hisi_hba->breakpoint, 0, s);
1834
1835 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1836 memset(hisi_hba->sata_breakpoint, 0, s);
1837 }
1838 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
1839
1840 int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1841 {
1842 struct device *dev = hisi_hba->dev;
1843 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1844
1845 spin_lock_init(&hisi_hba->lock);
1846 for (i = 0; i < hisi_hba->n_phy; i++) {
1847 hisi_sas_phy_init(hisi_hba, i);
1848 hisi_hba->port[i].port_attached = 0;
1849 hisi_hba->port[i].id = -1;
1850 }
1851
1852 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1853 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
1854 hisi_hba->devices[i].device_id = i;
1855 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
1856 }
1857
1858 for (i = 0; i < hisi_hba->queue_count; i++) {
1859 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1860 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1861
1862 /* Completion queue structure */
1863 cq->id = i;
1864 cq->hisi_hba = hisi_hba;
1865
1866 /* Delivery queue structure */
1867 spin_lock_init(&dq->lock);
1868 INIT_LIST_HEAD(&dq->list);
1869 dq->id = i;
1870 dq->hisi_hba = hisi_hba;
1871
1872 /* Delivery queue */
1873 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1874 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1875 &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1876 if (!hisi_hba->cmd_hdr[i])
1877 goto err_out;
1878
1879 /* Completion queue */
1880 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1881 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1882 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1883 if (!hisi_hba->complete_hdr[i])
1884 goto err_out;
1885 }
1886
1887 s = sizeof(struct hisi_sas_slot_buf_table);
1888 hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
1889 if (!hisi_hba->buffer_pool)
1890 goto err_out;
1891
1892 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1893 hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma,
1894 GFP_KERNEL);
1895 if (!hisi_hba->itct)
1896 goto err_out;
1897
1898 memset(hisi_hba->itct, 0, s);
1899
1900 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
1901 sizeof(struct hisi_sas_slot),
1902 GFP_KERNEL);
1903 if (!hisi_hba->slot_info)
1904 goto err_out;
1905
1906 s = max_command_entries * sizeof(struct hisi_sas_iost);
1907 hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1908 GFP_KERNEL);
1909 if (!hisi_hba->iost)
1910 goto err_out;
1911
1912 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1913 hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1914 &hisi_hba->breakpoint_dma, GFP_KERNEL);
1915 if (!hisi_hba->breakpoint)
1916 goto err_out;
1917
1918 hisi_hba->slot_index_count = max_command_entries;
1919 s = hisi_hba->slot_index_count / BITS_PER_BYTE;
1920 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1921 if (!hisi_hba->slot_index_tags)
1922 goto err_out;
1923
1924 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1925 hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1926 &hisi_hba->initial_fis_dma, GFP_KERNEL);
1927 if (!hisi_hba->initial_fis)
1928 goto err_out;
1929
1930 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1931 hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1932 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1933 if (!hisi_hba->sata_breakpoint)
1934 goto err_out;
1935 hisi_sas_init_mem(hisi_hba);
1936
1937 hisi_sas_slot_index_init(hisi_hba);
1938
1939 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1940 if (!hisi_hba->wq) {
1941 dev_err(dev, "sas_alloc: failed to create workqueue\n");
1942 goto err_out;
1943 }
1944
1945 return 0;
1946 err_out:
1947 return -ENOMEM;
1948 }
1949 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
1950
1951 void hisi_sas_free(struct hisi_hba *hisi_hba)
1952 {
1953 struct device *dev = hisi_hba->dev;
1954 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1955
1956 for (i = 0; i < hisi_hba->queue_count; i++) {
1957 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1958 if (hisi_hba->cmd_hdr[i])
1959 dma_free_coherent(dev, s,
1960 hisi_hba->cmd_hdr[i],
1961 hisi_hba->cmd_hdr_dma[i]);
1962
1963 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1964 if (hisi_hba->complete_hdr[i])
1965 dma_free_coherent(dev, s,
1966 hisi_hba->complete_hdr[i],
1967 hisi_hba->complete_hdr_dma[i]);
1968 }
1969
1970 dma_pool_destroy(hisi_hba->buffer_pool);
1971
1972 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1973 if (hisi_hba->itct)
1974 dma_free_coherent(dev, s,
1975 hisi_hba->itct, hisi_hba->itct_dma);
1976
1977 s = max_command_entries * sizeof(struct hisi_sas_iost);
1978 if (hisi_hba->iost)
1979 dma_free_coherent(dev, s,
1980 hisi_hba->iost, hisi_hba->iost_dma);
1981
1982 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1983 if (hisi_hba->breakpoint)
1984 dma_free_coherent(dev, s,
1985 hisi_hba->breakpoint,
1986 hisi_hba->breakpoint_dma);
1987
1988
1989 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1990 if (hisi_hba->initial_fis)
1991 dma_free_coherent(dev, s,
1992 hisi_hba->initial_fis,
1993 hisi_hba->initial_fis_dma);
1994
1995 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1996 if (hisi_hba->sata_breakpoint)
1997 dma_free_coherent(dev, s,
1998 hisi_hba->sata_breakpoint,
1999 hisi_hba->sata_breakpoint_dma);
2000
2001 if (hisi_hba->wq)
2002 destroy_workqueue(hisi_hba->wq);
2003 }
2004 EXPORT_SYMBOL_GPL(hisi_sas_free);
2005
2006 void hisi_sas_rst_work_handler(struct work_struct *work)
2007 {
2008 struct hisi_hba *hisi_hba =
2009 container_of(work, struct hisi_hba, rst_work);
2010
2011 hisi_sas_controller_reset(hisi_hba);
2012 }
2013 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
2014
2015 void hisi_sas_sync_rst_work_handler(struct work_struct *work)
2016 {
2017 struct hisi_sas_rst *rst =
2018 container_of(work, struct hisi_sas_rst, work);
2019
2020 if (!hisi_sas_controller_reset(rst->hisi_hba))
2021 rst->done = true;
2022 complete(rst->completion);
2023 }
2024 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
2025
2026 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
2027 {
2028 struct device *dev = hisi_hba->dev;
2029 struct platform_device *pdev = hisi_hba->platform_dev;
2030 struct device_node *np = pdev ? pdev->dev.of_node : NULL;
2031 struct clk *refclk;
2032
2033 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
2034 SAS_ADDR_SIZE)) {
2035 dev_err(dev, "could not get property sas-addr\n");
2036 return -ENOENT;
2037 }
2038
2039 if (np) {
2040 /*
2041 * These properties are only required for platform device-based
2042 * controller with DT firmware.
2043 */
2044 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
2045 "hisilicon,sas-syscon");
2046 if (IS_ERR(hisi_hba->ctrl)) {
2047 dev_err(dev, "could not get syscon\n");
2048 return -ENOENT;
2049 }
2050
2051 if (device_property_read_u32(dev, "ctrl-reset-reg",
2052 &hisi_hba->ctrl_reset_reg)) {
2053 dev_err(dev,
2054 "could not get property ctrl-reset-reg\n");
2055 return -ENOENT;
2056 }
2057
2058 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
2059 &hisi_hba->ctrl_reset_sts_reg)) {
2060 dev_err(dev,
2061 "could not get property ctrl-reset-sts-reg\n");
2062 return -ENOENT;
2063 }
2064
2065 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
2066 &hisi_hba->ctrl_clock_ena_reg)) {
2067 dev_err(dev,
2068 "could not get property ctrl-clock-ena-reg\n");
2069 return -ENOENT;
2070 }
2071 }
2072
2073 refclk = devm_clk_get(dev, NULL);
2074 if (IS_ERR(refclk))
2075 dev_dbg(dev, "no ref clk property\n");
2076 else
2077 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2078
2079 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2080 dev_err(dev, "could not get property phy-count\n");
2081 return -ENOENT;
2082 }
2083
2084 if (device_property_read_u32(dev, "queue-count",
2085 &hisi_hba->queue_count)) {
2086 dev_err(dev, "could not get property queue-count\n");
2087 return -ENOENT;
2088 }
2089
2090 return 0;
2091 }
2092 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2093
2094 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2095 const struct hisi_sas_hw *hw)
2096 {
2097 struct resource *res;
2098 struct Scsi_Host *shost;
2099 struct hisi_hba *hisi_hba;
2100 struct device *dev = &pdev->dev;
2101
2102 shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
2103 if (!shost) {
2104 dev_err(dev, "scsi host alloc failed\n");
2105 return NULL;
2106 }
2107 hisi_hba = shost_priv(shost);
2108
2109 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2110 hisi_hba->hw = hw;
2111 hisi_hba->dev = dev;
2112 hisi_hba->platform_dev = pdev;
2113 hisi_hba->shost = shost;
2114 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2115
2116 timer_setup(&hisi_hba->timer, NULL, 0);
2117
2118 if (hisi_sas_get_fw_info(hisi_hba) < 0)
2119 goto err_out;
2120
2121 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
2122 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2123 dev_err(dev, "No usable DMA addressing method\n");
2124 goto err_out;
2125 }
2126
2127 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2128 hisi_hba->regs = devm_ioremap_resource(dev, res);
2129 if (IS_ERR(hisi_hba->regs))
2130 goto err_out;
2131
2132 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2133 if (res) {
2134 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2135 if (IS_ERR(hisi_hba->sgpio_regs))
2136 goto err_out;
2137 }
2138
2139 if (hisi_sas_alloc(hisi_hba, shost)) {
2140 hisi_sas_free(hisi_hba);
2141 goto err_out;
2142 }
2143
2144 return shost;
2145 err_out:
2146 scsi_host_put(shost);
2147 dev_err(dev, "shost alloc failed\n");
2148 return NULL;
2149 }
2150
2151 void hisi_sas_init_add(struct hisi_hba *hisi_hba)
2152 {
2153 int i;
2154
2155 for (i = 0; i < hisi_hba->n_phy; i++)
2156 memcpy(&hisi_hba->phy[i].dev_sas_addr,
2157 hisi_hba->sas_addr,
2158 SAS_ADDR_SIZE);
2159 }
2160 EXPORT_SYMBOL_GPL(hisi_sas_init_add);
2161
2162 int hisi_sas_probe(struct platform_device *pdev,
2163 const struct hisi_sas_hw *hw)
2164 {
2165 struct Scsi_Host *shost;
2166 struct hisi_hba *hisi_hba;
2167 struct device *dev = &pdev->dev;
2168 struct asd_sas_phy **arr_phy;
2169 struct asd_sas_port **arr_port;
2170 struct sas_ha_struct *sha;
2171 int rc, phy_nr, port_nr, i;
2172
2173 shost = hisi_sas_shost_alloc(pdev, hw);
2174 if (!shost)
2175 return -ENOMEM;
2176
2177 sha = SHOST_TO_SAS_HA(shost);
2178 hisi_hba = shost_priv(shost);
2179 platform_set_drvdata(pdev, sha);
2180
2181 phy_nr = port_nr = hisi_hba->n_phy;
2182
2183 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2184 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2185 if (!arr_phy || !arr_port) {
2186 rc = -ENOMEM;
2187 goto err_out_ha;
2188 }
2189
2190 sha->sas_phy = arr_phy;
2191 sha->sas_port = arr_port;
2192 sha->lldd_ha = hisi_hba;
2193
2194 shost->transportt = hisi_sas_stt;
2195 shost->max_id = HISI_SAS_MAX_DEVICES;
2196 shost->max_lun = ~0;
2197 shost->max_channel = 1;
2198 shost->max_cmd_len = 16;
2199 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
2200 shost->can_queue = hisi_hba->hw->max_command_entries;
2201 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
2202
2203 sha->sas_ha_name = DRV_NAME;
2204 sha->dev = hisi_hba->dev;
2205 sha->lldd_module = THIS_MODULE;
2206 sha->sas_addr = &hisi_hba->sas_addr[0];
2207 sha->num_phys = hisi_hba->n_phy;
2208 sha->core.shost = hisi_hba->shost;
2209
2210 for (i = 0; i < hisi_hba->n_phy; i++) {
2211 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2212 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2213 }
2214
2215 hisi_sas_init_add(hisi_hba);
2216
2217 rc = scsi_add_host(shost, &pdev->dev);
2218 if (rc)
2219 goto err_out_ha;
2220
2221 rc = sas_register_ha(sha);
2222 if (rc)
2223 goto err_out_register_ha;
2224
2225 rc = hisi_hba->hw->hw_init(hisi_hba);
2226 if (rc)
2227 goto err_out_register_ha;
2228
2229 scsi_scan_host(shost);
2230
2231 return 0;
2232
2233 err_out_register_ha:
2234 scsi_remove_host(shost);
2235 err_out_ha:
2236 hisi_sas_free(hisi_hba);
2237 scsi_host_put(shost);
2238 return rc;
2239 }
2240 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2241
2242 int hisi_sas_remove(struct platform_device *pdev)
2243 {
2244 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2245 struct hisi_hba *hisi_hba = sha->lldd_ha;
2246 struct Scsi_Host *shost = sha->core.shost;
2247
2248 if (timer_pending(&hisi_hba->timer))
2249 del_timer(&hisi_hba->timer);
2250
2251 sas_unregister_ha(sha);
2252 sas_remove_host(sha->core.shost);
2253
2254 hisi_sas_free(hisi_hba);
2255 scsi_host_put(shost);
2256 return 0;
2257 }
2258 EXPORT_SYMBOL_GPL(hisi_sas_remove);
2259
2260 static __init int hisi_sas_init(void)
2261 {
2262 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2263 if (!hisi_sas_stt)
2264 return -ENOMEM;
2265
2266 return 0;
2267 }
2268
2269 static __exit void hisi_sas_exit(void)
2270 {
2271 sas_release_transport(hisi_sas_stt);
2272 }
2273
2274 module_init(hisi_sas_init);
2275 module_exit(hisi_sas_exit);
2276
2277 MODULE_LICENSE("GPL");
2278 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2279 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2280 MODULE_ALIAS("platform:" DRV_NAME);