]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/hisi_sas/hisi_sas_main.c
scsi: hisi_sas: fix the risk of freeing slot twice
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / hisi_sas / hisi_sas_main.c
1 /*
2 * Copyright (c) 2015 Linaro Ltd.
3 * Copyright (c) 2015 Hisilicon Limited.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 */
11
12 #include "hisi_sas.h"
13 #define DRV_NAME "hisi_sas"
14
15 #define DEV_IS_GONE(dev) \
16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
17
18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19 u8 *lun, struct hisi_sas_tmf_task *tmf);
20 static int
21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22 struct domain_device *device,
23 int abort_flag, int tag);
24 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
25
26 u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
27 {
28 switch (cmd) {
29 case ATA_CMD_FPDMA_WRITE:
30 case ATA_CMD_FPDMA_READ:
31 case ATA_CMD_FPDMA_RECV:
32 case ATA_CMD_FPDMA_SEND:
33 case ATA_CMD_NCQ_NON_DATA:
34 return HISI_SAS_SATA_PROTOCOL_FPDMA;
35
36 case ATA_CMD_DOWNLOAD_MICRO:
37 case ATA_CMD_ID_ATA:
38 case ATA_CMD_PMP_READ:
39 case ATA_CMD_READ_LOG_EXT:
40 case ATA_CMD_PIO_READ:
41 case ATA_CMD_PIO_READ_EXT:
42 case ATA_CMD_PMP_WRITE:
43 case ATA_CMD_WRITE_LOG_EXT:
44 case ATA_CMD_PIO_WRITE:
45 case ATA_CMD_PIO_WRITE_EXT:
46 return HISI_SAS_SATA_PROTOCOL_PIO;
47
48 case ATA_CMD_DSM:
49 case ATA_CMD_DOWNLOAD_MICRO_DMA:
50 case ATA_CMD_PMP_READ_DMA:
51 case ATA_CMD_PMP_WRITE_DMA:
52 case ATA_CMD_READ:
53 case ATA_CMD_READ_EXT:
54 case ATA_CMD_READ_LOG_DMA_EXT:
55 case ATA_CMD_READ_STREAM_DMA_EXT:
56 case ATA_CMD_TRUSTED_RCV_DMA:
57 case ATA_CMD_TRUSTED_SND_DMA:
58 case ATA_CMD_WRITE:
59 case ATA_CMD_WRITE_EXT:
60 case ATA_CMD_WRITE_FUA_EXT:
61 case ATA_CMD_WRITE_QUEUED:
62 case ATA_CMD_WRITE_LOG_DMA_EXT:
63 case ATA_CMD_WRITE_STREAM_DMA_EXT:
64 case ATA_CMD_ZAC_MGMT_IN:
65 return HISI_SAS_SATA_PROTOCOL_DMA;
66
67 case ATA_CMD_CHK_POWER:
68 case ATA_CMD_DEV_RESET:
69 case ATA_CMD_EDD:
70 case ATA_CMD_FLUSH:
71 case ATA_CMD_FLUSH_EXT:
72 case ATA_CMD_VERIFY:
73 case ATA_CMD_VERIFY_EXT:
74 case ATA_CMD_SET_FEATURES:
75 case ATA_CMD_STANDBY:
76 case ATA_CMD_STANDBYNOW1:
77 case ATA_CMD_ZAC_MGMT_OUT:
78 return HISI_SAS_SATA_PROTOCOL_NONDATA;
79 default:
80 if (direction == DMA_NONE)
81 return HISI_SAS_SATA_PROTOCOL_NONDATA;
82 return HISI_SAS_SATA_PROTOCOL_PIO;
83 }
84 }
85 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
86
87 void hisi_sas_sata_done(struct sas_task *task,
88 struct hisi_sas_slot *slot)
89 {
90 struct task_status_struct *ts = &task->task_status;
91 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
92 struct hisi_sas_status_buffer *status_buf =
93 hisi_sas_status_buf_addr_mem(slot);
94 u8 *iu = &status_buf->iu[0];
95 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
96
97 resp->frame_len = sizeof(struct dev_to_host_fis);
98 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
99
100 ts->buf_valid_size = sizeof(*resp);
101 }
102 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
103
104 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
105 {
106 struct ata_queued_cmd *qc = task->uldd_task;
107
108 if (qc) {
109 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
110 qc->tf.command == ATA_CMD_FPDMA_READ) {
111 *tag = qc->tag;
112 return 1;
113 }
114 }
115 return 0;
116 }
117 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
118
119 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
120 {
121 return device->port->ha->lldd_ha;
122 }
123
124 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
125 {
126 return container_of(sas_port, struct hisi_sas_port, sas_port);
127 }
128 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
129
130 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
131 {
132 int phy_no;
133
134 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
135 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
136 }
137 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
138
139 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
140 {
141 void *bitmap = hisi_hba->slot_index_tags;
142
143 clear_bit(slot_idx, bitmap);
144 }
145
146 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
147 {
148 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
149 }
150
151 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
152 {
153 void *bitmap = hisi_hba->slot_index_tags;
154
155 set_bit(slot_idx, bitmap);
156 }
157
158 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
159 {
160 unsigned int index;
161 void *bitmap = hisi_hba->slot_index_tags;
162
163 index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count);
164 if (index >= hisi_hba->slot_index_count)
165 return -SAS_QUEUE_FULL;
166 hisi_sas_slot_index_set(hisi_hba, index);
167 *slot_idx = index;
168 return 0;
169 }
170
171 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
172 {
173 int i;
174
175 for (i = 0; i < hisi_hba->slot_index_count; ++i)
176 hisi_sas_slot_index_clear(hisi_hba, i);
177 }
178
179 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
180 struct hisi_sas_slot *slot)
181 {
182
183 if (task) {
184 struct device *dev = hisi_hba->dev;
185 struct domain_device *device = task->dev;
186 struct hisi_sas_device *sas_dev = device->lldd_dev;
187
188 if (!task->lldd_task)
189 return;
190
191 task->lldd_task = NULL;
192
193 if (!sas_protocol_ata(task->task_proto))
194 if (slot->n_elem)
195 dma_unmap_sg(dev, task->scatter, slot->n_elem,
196 task->data_dir);
197
198 if (sas_dev)
199 atomic64_dec(&sas_dev->running_req);
200 }
201
202 if (slot->buf)
203 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
204
205 list_del_init(&slot->entry);
206 slot->buf = NULL;
207 slot->task = NULL;
208 slot->port = NULL;
209 hisi_sas_slot_index_free(hisi_hba, slot->idx);
210
211 /* slot memory is fully zeroed when it is reused */
212 }
213 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
214
215 static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
216 struct hisi_sas_slot *slot)
217 {
218 return hisi_hba->hw->prep_smp(hisi_hba, slot);
219 }
220
221 static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
222 struct hisi_sas_slot *slot, int is_tmf,
223 struct hisi_sas_tmf_task *tmf)
224 {
225 return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
226 }
227
228 static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
229 struct hisi_sas_slot *slot)
230 {
231 return hisi_hba->hw->prep_stp(hisi_hba, slot);
232 }
233
234 static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
235 struct hisi_sas_slot *slot,
236 int device_id, int abort_flag, int tag_to_abort)
237 {
238 return hisi_hba->hw->prep_abort(hisi_hba, slot,
239 device_id, abort_flag, tag_to_abort);
240 }
241
242 /*
243 * This function will issue an abort TMF regardless of whether the
244 * task is in the sdev or not. Then it will do the task complete
245 * cleanup and callbacks.
246 */
247 static void hisi_sas_slot_abort(struct work_struct *work)
248 {
249 struct hisi_sas_slot *abort_slot =
250 container_of(work, struct hisi_sas_slot, abort_slot);
251 struct sas_task *task = abort_slot->task;
252 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
253 struct scsi_cmnd *cmnd = task->uldd_task;
254 struct hisi_sas_tmf_task tmf_task;
255 struct scsi_lun lun;
256 struct device *dev = hisi_hba->dev;
257 int tag = abort_slot->idx;
258 unsigned long flags;
259
260 if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
261 dev_err(dev, "cannot abort slot for non-ssp task\n");
262 goto out;
263 }
264
265 int_to_scsilun(cmnd->device->lun, &lun);
266 tmf_task.tmf = TMF_ABORT_TASK;
267 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
268
269 hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
270 out:
271 /* Do cleanup for this task */
272 spin_lock_irqsave(&hisi_hba->lock, flags);
273 hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
274 spin_unlock_irqrestore(&hisi_hba->lock, flags);
275 if (task->task_done)
276 task->task_done(task);
277 }
278
279 static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
280 *dq, int is_tmf, struct hisi_sas_tmf_task *tmf,
281 int *pass)
282 {
283 struct hisi_hba *hisi_hba = dq->hisi_hba;
284 struct domain_device *device = task->dev;
285 struct hisi_sas_device *sas_dev = device->lldd_dev;
286 struct hisi_sas_port *port;
287 struct hisi_sas_slot *slot;
288 struct hisi_sas_cmd_hdr *cmd_hdr_base;
289 struct asd_sas_port *sas_port = device->port;
290 struct device *dev = hisi_hba->dev;
291 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
292 unsigned long flags;
293
294 if (!sas_port) {
295 struct task_status_struct *ts = &task->task_status;
296
297 ts->resp = SAS_TASK_UNDELIVERED;
298 ts->stat = SAS_PHY_DOWN;
299 /*
300 * libsas will use dev->port, should
301 * not call task_done for sata
302 */
303 if (device->dev_type != SAS_SATA_DEV)
304 task->task_done(task);
305 return SAS_PHY_DOWN;
306 }
307
308 if (DEV_IS_GONE(sas_dev)) {
309 if (sas_dev)
310 dev_info(dev, "task prep: device %d not ready\n",
311 sas_dev->device_id);
312 else
313 dev_info(dev, "task prep: device %016llx not ready\n",
314 SAS_ADDR(device->sas_addr));
315
316 return SAS_PHY_DOWN;
317 }
318
319 port = to_hisi_sas_port(sas_port);
320 if (port && !port->port_attached) {
321 dev_info(dev, "task prep: %s port%d not attach device\n",
322 (dev_is_sata(device)) ?
323 "SATA/STP" : "SAS",
324 device->port->id);
325
326 return SAS_PHY_DOWN;
327 }
328
329 if (!sas_protocol_ata(task->task_proto)) {
330 if (task->num_scatter) {
331 n_elem = dma_map_sg(dev, task->scatter,
332 task->num_scatter, task->data_dir);
333 if (!n_elem) {
334 rc = -ENOMEM;
335 goto prep_out;
336 }
337 }
338 } else
339 n_elem = task->num_scatter;
340
341 spin_lock_irqsave(&hisi_hba->lock, flags);
342 if (hisi_hba->hw->slot_index_alloc)
343 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
344 device);
345 else
346 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
347 if (rc) {
348 spin_unlock_irqrestore(&hisi_hba->lock, flags);
349 goto err_out;
350 }
351 spin_unlock_irqrestore(&hisi_hba->lock, flags);
352
353 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
354 if (rc)
355 goto err_out_tag;
356
357 dlvry_queue = dq->id;
358 dlvry_queue_slot = dq->wr_point;
359 slot = &hisi_hba->slot_info[slot_idx];
360 memset(slot, 0, sizeof(struct hisi_sas_slot));
361
362 slot->idx = slot_idx;
363 slot->n_elem = n_elem;
364 slot->dlvry_queue = dlvry_queue;
365 slot->dlvry_queue_slot = dlvry_queue_slot;
366 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
367 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
368 slot->task = task;
369 slot->port = port;
370 task->lldd_task = slot;
371 INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
372
373 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
374 GFP_ATOMIC, &slot->buf_dma);
375 if (!slot->buf) {
376 rc = -ENOMEM;
377 goto err_out_slot_buf;
378 }
379 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
380 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
381 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
382
383 switch (task->task_proto) {
384 case SAS_PROTOCOL_SMP:
385 rc = hisi_sas_task_prep_smp(hisi_hba, slot);
386 break;
387 case SAS_PROTOCOL_SSP:
388 rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
389 break;
390 case SAS_PROTOCOL_SATA:
391 case SAS_PROTOCOL_STP:
392 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
393 rc = hisi_sas_task_prep_ata(hisi_hba, slot);
394 break;
395 default:
396 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
397 task->task_proto);
398 rc = -EINVAL;
399 break;
400 }
401
402 if (rc) {
403 dev_err(dev, "task prep: rc = 0x%x\n", rc);
404 goto err_out_buf;
405 }
406
407 spin_lock_irqsave(&hisi_hba->lock, flags);
408 list_add_tail(&slot->entry, &sas_dev->list);
409 spin_unlock_irqrestore(&hisi_hba->lock, flags);
410 spin_lock_irqsave(&task->task_state_lock, flags);
411 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
412 spin_unlock_irqrestore(&task->task_state_lock, flags);
413
414 dq->slot_prep = slot;
415
416 atomic64_inc(&sas_dev->running_req);
417 ++(*pass);
418
419 return 0;
420
421 err_out_buf:
422 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
423 slot->buf_dma);
424 err_out_slot_buf:
425 /* Nothing to be done */
426 err_out_tag:
427 spin_lock_irqsave(&hisi_hba->lock, flags);
428 hisi_sas_slot_index_free(hisi_hba, slot_idx);
429 spin_unlock_irqrestore(&hisi_hba->lock, flags);
430 err_out:
431 dev_err(dev, "task prep: failed[%d]!\n", rc);
432 if (!sas_protocol_ata(task->task_proto))
433 if (n_elem)
434 dma_unmap_sg(dev, task->scatter, n_elem,
435 task->data_dir);
436 prep_out:
437 return rc;
438 }
439
440 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
441 int is_tmf, struct hisi_sas_tmf_task *tmf)
442 {
443 u32 rc;
444 u32 pass = 0;
445 unsigned long flags;
446 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
447 struct device *dev = hisi_hba->dev;
448 struct domain_device *device = task->dev;
449 struct hisi_sas_device *sas_dev = device->lldd_dev;
450 struct hisi_sas_dq *dq = sas_dev->dq;
451
452 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
453 return -EINVAL;
454
455 /* protect task_prep and start_delivery sequence */
456 spin_lock_irqsave(&dq->lock, flags);
457 rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass);
458 if (rc)
459 dev_err(dev, "task exec: failed[%d]!\n", rc);
460
461 if (likely(pass))
462 hisi_hba->hw->start_delivery(dq);
463 spin_unlock_irqrestore(&dq->lock, flags);
464
465 return rc;
466 }
467
468 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
469 {
470 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
471 struct asd_sas_phy *sas_phy = &phy->sas_phy;
472 struct sas_ha_struct *sas_ha;
473
474 if (!phy->phy_attached)
475 return;
476
477 sas_ha = &hisi_hba->sha;
478 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
479
480 if (sas_phy->phy) {
481 struct sas_phy *sphy = sas_phy->phy;
482
483 sphy->negotiated_linkrate = sas_phy->linkrate;
484 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
485 sphy->maximum_linkrate_hw =
486 hisi_hba->hw->phy_get_max_linkrate();
487 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
488 sphy->minimum_linkrate = phy->minimum_linkrate;
489
490 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
491 sphy->maximum_linkrate = phy->maximum_linkrate;
492 }
493
494 if (phy->phy_type & PORT_TYPE_SAS) {
495 struct sas_identify_frame *id;
496
497 id = (struct sas_identify_frame *)phy->frame_rcvd;
498 id->dev_type = phy->identify.device_type;
499 id->initiator_bits = SAS_PROTOCOL_ALL;
500 id->target_bits = phy->identify.target_port_protocols;
501 } else if (phy->phy_type & PORT_TYPE_SATA) {
502 /*Nothing*/
503 }
504
505 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
506 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
507 }
508
509 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
510 {
511 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
512 struct hisi_sas_device *sas_dev = NULL;
513 unsigned long flags;
514 int i;
515
516 spin_lock_irqsave(&hisi_hba->lock, flags);
517 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
518 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
519 int queue = i % hisi_hba->queue_count;
520 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
521
522 hisi_hba->devices[i].device_id = i;
523 sas_dev = &hisi_hba->devices[i];
524 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
525 sas_dev->dev_type = device->dev_type;
526 sas_dev->hisi_hba = hisi_hba;
527 sas_dev->sas_device = device;
528 sas_dev->dq = dq;
529 INIT_LIST_HEAD(&hisi_hba->devices[i].list);
530 break;
531 }
532 }
533 spin_unlock_irqrestore(&hisi_hba->lock, flags);
534
535 return sas_dev;
536 }
537
538 static int hisi_sas_dev_found(struct domain_device *device)
539 {
540 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
541 struct domain_device *parent_dev = device->parent;
542 struct hisi_sas_device *sas_dev;
543 struct device *dev = hisi_hba->dev;
544
545 if (hisi_hba->hw->alloc_dev)
546 sas_dev = hisi_hba->hw->alloc_dev(device);
547 else
548 sas_dev = hisi_sas_alloc_dev(device);
549 if (!sas_dev) {
550 dev_err(dev, "fail alloc dev: max support %d devices\n",
551 HISI_SAS_MAX_DEVICES);
552 return -EINVAL;
553 }
554
555 device->lldd_dev = sas_dev;
556 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
557
558 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
559 int phy_no;
560 u8 phy_num = parent_dev->ex_dev.num_phys;
561 struct ex_phy *phy;
562
563 for (phy_no = 0; phy_no < phy_num; phy_no++) {
564 phy = &parent_dev->ex_dev.ex_phy[phy_no];
565 if (SAS_ADDR(phy->attached_sas_addr) ==
566 SAS_ADDR(device->sas_addr)) {
567 sas_dev->attached_phy = phy_no;
568 break;
569 }
570 }
571
572 if (phy_no == phy_num) {
573 dev_info(dev, "dev found: no attached "
574 "dev:%016llx at ex:%016llx\n",
575 SAS_ADDR(device->sas_addr),
576 SAS_ADDR(parent_dev->sas_addr));
577 return -EINVAL;
578 }
579 }
580
581 return 0;
582 }
583
584 static int hisi_sas_slave_configure(struct scsi_device *sdev)
585 {
586 struct domain_device *dev = sdev_to_domain_dev(sdev);
587 int ret = sas_slave_configure(sdev);
588
589 if (ret)
590 return ret;
591 if (!dev_is_sata(dev))
592 sas_change_queue_depth(sdev, 64);
593
594 return 0;
595 }
596
597 static void hisi_sas_scan_start(struct Scsi_Host *shost)
598 {
599 struct hisi_hba *hisi_hba = shost_priv(shost);
600
601 hisi_hba->hw->phys_init(hisi_hba);
602 }
603
604 static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
605 {
606 struct hisi_hba *hisi_hba = shost_priv(shost);
607 struct sas_ha_struct *sha = &hisi_hba->sha;
608
609 /* Wait for PHY up interrupt to occur */
610 if (time < HZ)
611 return 0;
612
613 sas_drain_work(sha);
614 return 1;
615 }
616
617 static void hisi_sas_phyup_work(struct work_struct *work)
618 {
619 struct hisi_sas_phy *phy =
620 container_of(work, struct hisi_sas_phy, phyup_ws);
621 struct hisi_hba *hisi_hba = phy->hisi_hba;
622 struct asd_sas_phy *sas_phy = &phy->sas_phy;
623 int phy_no = sas_phy->id;
624
625 hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
626 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
627 }
628
629 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
630 {
631 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
632 struct asd_sas_phy *sas_phy = &phy->sas_phy;
633
634 phy->hisi_hba = hisi_hba;
635 phy->port = NULL;
636 init_timer(&phy->timer);
637 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
638 sas_phy->class = SAS;
639 sas_phy->iproto = SAS_PROTOCOL_ALL;
640 sas_phy->tproto = 0;
641 sas_phy->type = PHY_TYPE_PHYSICAL;
642 sas_phy->role = PHY_ROLE_INITIATOR;
643 sas_phy->oob_mode = OOB_NOT_CONNECTED;
644 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
645 sas_phy->id = phy_no;
646 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
647 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
648 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
649 sas_phy->lldd_phy = phy;
650
651 INIT_WORK(&phy->phyup_ws, hisi_sas_phyup_work);
652 }
653
654 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
655 {
656 struct sas_ha_struct *sas_ha = sas_phy->ha;
657 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
658 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
659 struct asd_sas_port *sas_port = sas_phy->port;
660 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
661 unsigned long flags;
662
663 if (!sas_port)
664 return;
665
666 spin_lock_irqsave(&hisi_hba->lock, flags);
667 port->port_attached = 1;
668 port->id = phy->port_id;
669 phy->port = port;
670 sas_port->lldd_port = port;
671 spin_unlock_irqrestore(&hisi_hba->lock, flags);
672 }
673
674 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
675 struct hisi_sas_slot *slot)
676 {
677 if (task) {
678 unsigned long flags;
679 struct task_status_struct *ts;
680
681 ts = &task->task_status;
682
683 ts->resp = SAS_TASK_COMPLETE;
684 ts->stat = SAS_ABORTED_TASK;
685 spin_lock_irqsave(&task->task_state_lock, flags);
686 task->task_state_flags &=
687 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
688 task->task_state_flags |= SAS_TASK_STATE_DONE;
689 spin_unlock_irqrestore(&task->task_state_lock, flags);
690 }
691
692 hisi_sas_slot_task_free(hisi_hba, task, slot);
693 }
694
695 /* hisi_hba.lock should be locked */
696 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
697 struct domain_device *device)
698 {
699 struct hisi_sas_slot *slot, *slot2;
700 struct hisi_sas_device *sas_dev = device->lldd_dev;
701
702 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
703 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
704 }
705
706 static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
707 {
708 struct hisi_sas_device *sas_dev;
709 struct domain_device *device;
710 int i;
711
712 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
713 sas_dev = &hisi_hba->devices[i];
714 device = sas_dev->sas_device;
715
716 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
717 !device)
718 continue;
719
720 hisi_sas_release_task(hisi_hba, device);
721 }
722 }
723
724 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
725 struct domain_device *device)
726 {
727 if (hisi_hba->hw->dereg_device)
728 hisi_hba->hw->dereg_device(hisi_hba, device);
729 }
730
731 static void hisi_sas_dev_gone(struct domain_device *device)
732 {
733 struct hisi_sas_device *sas_dev = device->lldd_dev;
734 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
735 struct device *dev = hisi_hba->dev;
736
737 dev_info(dev, "found dev[%d:%x] is gone\n",
738 sas_dev->device_id, sas_dev->dev_type);
739
740 hisi_sas_internal_task_abort(hisi_hba, device,
741 HISI_SAS_INT_ABT_DEV, 0);
742
743 hisi_sas_dereg_device(hisi_hba, device);
744
745 hisi_hba->hw->free_device(hisi_hba, sas_dev);
746 device->lldd_dev = NULL;
747 memset(sas_dev, 0, sizeof(*sas_dev));
748 sas_dev->dev_type = SAS_PHY_UNUSED;
749 }
750
751 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
752 {
753 return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
754 }
755
756 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
757 void *funcdata)
758 {
759 struct sas_ha_struct *sas_ha = sas_phy->ha;
760 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
761 int phy_no = sas_phy->id;
762
763 switch (func) {
764 case PHY_FUNC_HARD_RESET:
765 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
766 break;
767
768 case PHY_FUNC_LINK_RESET:
769 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
770 msleep(100);
771 hisi_hba->hw->phy_start(hisi_hba, phy_no);
772 break;
773
774 case PHY_FUNC_DISABLE:
775 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
776 break;
777
778 case PHY_FUNC_SET_LINK_RATE:
779 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
780 break;
781 case PHY_FUNC_GET_EVENTS:
782 if (hisi_hba->hw->get_events) {
783 hisi_hba->hw->get_events(hisi_hba, phy_no);
784 break;
785 }
786 /* fallthru */
787 case PHY_FUNC_RELEASE_SPINUP_HOLD:
788 default:
789 return -EOPNOTSUPP;
790 }
791 return 0;
792 }
793
794 static void hisi_sas_task_done(struct sas_task *task)
795 {
796 if (!del_timer(&task->slow_task->timer))
797 return;
798 complete(&task->slow_task->completion);
799 }
800
801 static void hisi_sas_tmf_timedout(unsigned long data)
802 {
803 struct sas_task *task = (struct sas_task *)data;
804 unsigned long flags;
805
806 spin_lock_irqsave(&task->task_state_lock, flags);
807 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
808 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
809 spin_unlock_irqrestore(&task->task_state_lock, flags);
810
811 complete(&task->slow_task->completion);
812 }
813
814 #define TASK_TIMEOUT 20
815 #define TASK_RETRY 3
816 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
817 void *parameter, u32 para_len,
818 struct hisi_sas_tmf_task *tmf)
819 {
820 struct hisi_sas_device *sas_dev = device->lldd_dev;
821 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
822 struct device *dev = hisi_hba->dev;
823 struct sas_task *task;
824 int res, retry;
825
826 for (retry = 0; retry < TASK_RETRY; retry++) {
827 task = sas_alloc_slow_task(GFP_KERNEL);
828 if (!task)
829 return -ENOMEM;
830
831 task->dev = device;
832 task->task_proto = device->tproto;
833
834 if (dev_is_sata(device)) {
835 task->ata_task.device_control_reg_update = 1;
836 memcpy(&task->ata_task.fis, parameter, para_len);
837 } else {
838 memcpy(&task->ssp_task, parameter, para_len);
839 }
840 task->task_done = hisi_sas_task_done;
841
842 task->slow_task->timer.data = (unsigned long) task;
843 task->slow_task->timer.function = hisi_sas_tmf_timedout;
844 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
845 add_timer(&task->slow_task->timer);
846
847 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
848
849 if (res) {
850 del_timer(&task->slow_task->timer);
851 dev_err(dev, "abort tmf: executing internal task failed: %d\n",
852 res);
853 goto ex_err;
854 }
855
856 wait_for_completion(&task->slow_task->completion);
857 res = TMF_RESP_FUNC_FAILED;
858 /* Even TMF timed out, return direct. */
859 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
860 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
861 struct hisi_sas_slot *slot = task->lldd_task;
862
863 dev_err(dev, "abort tmf: TMF task timeout\n");
864 if (slot)
865 slot->task = NULL;
866
867 goto ex_err;
868 }
869 }
870
871 if (task->task_status.resp == SAS_TASK_COMPLETE &&
872 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
873 res = TMF_RESP_FUNC_COMPLETE;
874 break;
875 }
876
877 if (task->task_status.resp == SAS_TASK_COMPLETE &&
878 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
879 res = TMF_RESP_FUNC_SUCC;
880 break;
881 }
882
883 if (task->task_status.resp == SAS_TASK_COMPLETE &&
884 task->task_status.stat == SAS_DATA_UNDERRUN) {
885 /* no error, but return the number of bytes of
886 * underrun
887 */
888 dev_warn(dev, "abort tmf: task to dev %016llx "
889 "resp: 0x%x sts 0x%x underrun\n",
890 SAS_ADDR(device->sas_addr),
891 task->task_status.resp,
892 task->task_status.stat);
893 res = task->task_status.residual;
894 break;
895 }
896
897 if (task->task_status.resp == SAS_TASK_COMPLETE &&
898 task->task_status.stat == SAS_DATA_OVERRUN) {
899 dev_warn(dev, "abort tmf: blocked task error\n");
900 res = -EMSGSIZE;
901 break;
902 }
903
904 dev_warn(dev, "abort tmf: task to dev "
905 "%016llx resp: 0x%x status 0x%x\n",
906 SAS_ADDR(device->sas_addr), task->task_status.resp,
907 task->task_status.stat);
908 sas_free_task(task);
909 task = NULL;
910 }
911 ex_err:
912 if (retry == TASK_RETRY)
913 dev_warn(dev, "abort tmf: executing internal task failed!\n");
914 sas_free_task(task);
915 return res;
916 }
917
918 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
919 bool reset, int pmp, u8 *fis)
920 {
921 struct ata_taskfile tf;
922
923 ata_tf_init(dev, &tf);
924 if (reset)
925 tf.ctl |= ATA_SRST;
926 else
927 tf.ctl &= ~ATA_SRST;
928 tf.command = ATA_CMD_DEV_RESET;
929 ata_tf_to_fis(&tf, pmp, 0, fis);
930 }
931
932 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
933 {
934 u8 fis[20] = {0};
935 struct ata_port *ap = device->sata_dev.ap;
936 struct ata_link *link;
937 int rc = TMF_RESP_FUNC_FAILED;
938 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
939 struct device *dev = hisi_hba->dev;
940 int s = sizeof(struct host_to_dev_fis);
941 unsigned long flags;
942
943 ata_for_each_link(link, ap, EDGE) {
944 int pmp = sata_srst_pmp(link);
945
946 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
947 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
948 if (rc != TMF_RESP_FUNC_COMPLETE)
949 break;
950 }
951
952 if (rc == TMF_RESP_FUNC_COMPLETE) {
953 ata_for_each_link(link, ap, EDGE) {
954 int pmp = sata_srst_pmp(link);
955
956 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
957 rc = hisi_sas_exec_internal_tmf_task(device, fis,
958 s, NULL);
959 if (rc != TMF_RESP_FUNC_COMPLETE)
960 dev_err(dev, "ata disk de-reset failed\n");
961 }
962 } else {
963 dev_err(dev, "ata disk reset failed\n");
964 }
965
966 if (rc == TMF_RESP_FUNC_COMPLETE) {
967 spin_lock_irqsave(&hisi_hba->lock, flags);
968 hisi_sas_release_task(hisi_hba, device);
969 spin_unlock_irqrestore(&hisi_hba->lock, flags);
970 }
971
972 return rc;
973 }
974
975 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
976 u8 *lun, struct hisi_sas_tmf_task *tmf)
977 {
978 struct sas_ssp_task ssp_task;
979
980 if (!(device->tproto & SAS_PROTOCOL_SSP))
981 return TMF_RESP_FUNC_ESUPP;
982
983 memcpy(ssp_task.LUN, lun, 8);
984
985 return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
986 sizeof(ssp_task), tmf);
987 }
988
989 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba,
990 struct asd_sas_port *sas_port, enum sas_linkrate linkrate)
991 {
992 struct hisi_sas_device *sas_dev;
993 struct domain_device *device;
994 int i;
995
996 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
997 sas_dev = &hisi_hba->devices[i];
998 device = sas_dev->sas_device;
999 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1000 || !device || (device->port != sas_port))
1001 continue;
1002
1003 hisi_hba->hw->free_device(hisi_hba, sas_dev);
1004
1005 /* Update linkrate of directly attached device. */
1006 if (!device->parent)
1007 device->linkrate = linkrate;
1008
1009 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1010 }
1011 }
1012
1013 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1014 u32 state)
1015 {
1016 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1017 struct asd_sas_port *_sas_port = NULL;
1018 int phy_no;
1019
1020 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1021 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1022 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1023 struct asd_sas_port *sas_port = sas_phy->port;
1024 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
1025 bool do_port_check = !!(_sas_port != sas_port);
1026
1027 if (!sas_phy->phy->enabled)
1028 continue;
1029
1030 /* Report PHY state change to libsas */
1031 if (state & (1 << phy_no)) {
1032 if (do_port_check && sas_port) {
1033 struct domain_device *dev = sas_port->port_dev;
1034
1035 _sas_port = sas_port;
1036 port->id = phy->port_id;
1037 hisi_sas_refresh_port_id(hisi_hba,
1038 sas_port, sas_phy->linkrate);
1039
1040 if (DEV_IS_EXPANDER(dev->dev_type))
1041 sas_ha->notify_port_event(sas_phy,
1042 PORTE_BROADCAST_RCVD);
1043 }
1044 } else if (old_state & (1 << phy_no))
1045 /* PHY down but was up before */
1046 hisi_sas_phy_down(hisi_hba, phy_no, 0);
1047
1048 }
1049
1050 drain_workqueue(hisi_hba->shost->work_q);
1051 }
1052
1053 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1054 {
1055 struct device *dev = hisi_hba->dev;
1056 struct Scsi_Host *shost = hisi_hba->shost;
1057 u32 old_state, state;
1058 unsigned long flags;
1059 int rc;
1060
1061 if (!hisi_hba->hw->soft_reset)
1062 return -1;
1063
1064 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1065 return -1;
1066
1067 dev_dbg(dev, "controller resetting...\n");
1068 old_state = hisi_hba->hw->get_phys_state(hisi_hba);
1069
1070 scsi_block_requests(shost);
1071 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1072 rc = hisi_hba->hw->soft_reset(hisi_hba);
1073 if (rc) {
1074 dev_warn(dev, "controller reset failed (%d)\n", rc);
1075 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1076 goto out;
1077 }
1078 spin_lock_irqsave(&hisi_hba->lock, flags);
1079 hisi_sas_release_tasks(hisi_hba);
1080 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1081
1082 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1083
1084 /* Init and wait for PHYs to come up and all libsas event finished. */
1085 hisi_hba->hw->phys_init(hisi_hba);
1086 msleep(1000);
1087 drain_workqueue(hisi_hba->wq);
1088 drain_workqueue(shost->work_q);
1089
1090 state = hisi_hba->hw->get_phys_state(hisi_hba);
1091 hisi_sas_rescan_topology(hisi_hba, old_state, state);
1092 dev_dbg(dev, "controller reset complete\n");
1093
1094 out:
1095 scsi_unblock_requests(shost);
1096 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1097
1098 return rc;
1099 }
1100
1101 static int hisi_sas_abort_task(struct sas_task *task)
1102 {
1103 struct scsi_lun lun;
1104 struct hisi_sas_tmf_task tmf_task;
1105 struct domain_device *device = task->dev;
1106 struct hisi_sas_device *sas_dev = device->lldd_dev;
1107 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
1108 struct device *dev = hisi_hba->dev;
1109 int rc = TMF_RESP_FUNC_FAILED;
1110 unsigned long flags;
1111
1112 if (!sas_dev) {
1113 dev_warn(dev, "Device has been removed\n");
1114 return TMF_RESP_FUNC_FAILED;
1115 }
1116
1117 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1118 rc = TMF_RESP_FUNC_COMPLETE;
1119 goto out;
1120 }
1121
1122 sas_dev->dev_status = HISI_SAS_DEV_EH;
1123 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1124 struct scsi_cmnd *cmnd = task->uldd_task;
1125 struct hisi_sas_slot *slot = task->lldd_task;
1126 u32 tag = slot->idx;
1127 int rc2;
1128
1129 int_to_scsilun(cmnd->device->lun, &lun);
1130 tmf_task.tmf = TMF_ABORT_TASK;
1131 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1132
1133 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1134 &tmf_task);
1135
1136 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1137 HISI_SAS_INT_ABT_CMD, tag);
1138 /*
1139 * If the TMF finds that the IO is not in the device and also
1140 * the internal abort does not succeed, then it is safe to
1141 * free the slot.
1142 * Note: if the internal abort succeeds then the slot
1143 * will have already been completed
1144 */
1145 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1146 if (task->lldd_task) {
1147 spin_lock_irqsave(&hisi_hba->lock, flags);
1148 hisi_sas_do_release_task(hisi_hba, task, slot);
1149 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1150 }
1151 }
1152 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1153 task->task_proto & SAS_PROTOCOL_STP) {
1154 if (task->dev->dev_type == SAS_SATA_DEV) {
1155 hisi_sas_internal_task_abort(hisi_hba, device,
1156 HISI_SAS_INT_ABT_DEV, 0);
1157 hisi_sas_dereg_device(hisi_hba, device);
1158 rc = hisi_sas_softreset_ata_disk(device);
1159 }
1160 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1161 /* SMP */
1162 struct hisi_sas_slot *slot = task->lldd_task;
1163 u32 tag = slot->idx;
1164
1165 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1166 HISI_SAS_INT_ABT_CMD, tag);
1167 if (rc == TMF_RESP_FUNC_FAILED && task->lldd_task) {
1168 spin_lock_irqsave(&hisi_hba->lock, flags);
1169 hisi_sas_do_release_task(hisi_hba, task, slot);
1170 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1171 }
1172 }
1173
1174 out:
1175 if (rc != TMF_RESP_FUNC_COMPLETE)
1176 dev_notice(dev, "abort task: rc=%d\n", rc);
1177 return rc;
1178 }
1179
1180 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1181 {
1182 struct hisi_sas_tmf_task tmf_task;
1183 int rc = TMF_RESP_FUNC_FAILED;
1184
1185 tmf_task.tmf = TMF_ABORT_TASK_SET;
1186 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1187
1188 return rc;
1189 }
1190
1191 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1192 {
1193 int rc = TMF_RESP_FUNC_FAILED;
1194 struct hisi_sas_tmf_task tmf_task;
1195
1196 tmf_task.tmf = TMF_CLEAR_ACA;
1197 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1198
1199 return rc;
1200 }
1201
1202 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1203 {
1204 struct sas_phy *phy = sas_get_local_phy(device);
1205 int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1206 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1207 rc = sas_phy_reset(phy, reset_type);
1208 sas_put_local_phy(phy);
1209 msleep(2000);
1210 return rc;
1211 }
1212
1213 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1214 {
1215 struct hisi_sas_device *sas_dev = device->lldd_dev;
1216 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1217 unsigned long flags;
1218 int rc = TMF_RESP_FUNC_FAILED;
1219
1220 if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1221 return TMF_RESP_FUNC_FAILED;
1222 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1223
1224 hisi_sas_internal_task_abort(hisi_hba, device,
1225 HISI_SAS_INT_ABT_DEV, 0);
1226 hisi_sas_dereg_device(hisi_hba, device);
1227
1228 rc = hisi_sas_debug_I_T_nexus_reset(device);
1229
1230 if (rc == TMF_RESP_FUNC_COMPLETE) {
1231 spin_lock_irqsave(&hisi_hba->lock, flags);
1232 hisi_sas_release_task(hisi_hba, device);
1233 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1234 }
1235 return rc;
1236 }
1237
1238 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1239 {
1240 struct hisi_sas_device *sas_dev = device->lldd_dev;
1241 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1242 struct device *dev = hisi_hba->dev;
1243 unsigned long flags;
1244 int rc = TMF_RESP_FUNC_FAILED;
1245
1246 sas_dev->dev_status = HISI_SAS_DEV_EH;
1247 if (dev_is_sata(device)) {
1248 struct sas_phy *phy;
1249
1250 /* Clear internal IO and then hardreset */
1251 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1252 HISI_SAS_INT_ABT_DEV, 0);
1253 if (rc == TMF_RESP_FUNC_FAILED)
1254 goto out;
1255 hisi_sas_dereg_device(hisi_hba, device);
1256
1257 phy = sas_get_local_phy(device);
1258
1259 rc = sas_phy_reset(phy, 1);
1260
1261 if (rc == 0) {
1262 spin_lock_irqsave(&hisi_hba->lock, flags);
1263 hisi_sas_release_task(hisi_hba, device);
1264 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1265 }
1266 sas_put_local_phy(phy);
1267 } else {
1268 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
1269
1270 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1271 if (rc == TMF_RESP_FUNC_COMPLETE) {
1272 spin_lock_irqsave(&hisi_hba->lock, flags);
1273 hisi_sas_release_task(hisi_hba, device);
1274 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1275 }
1276 }
1277 out:
1278 if (rc != TMF_RESP_FUNC_COMPLETE)
1279 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1280 sas_dev->device_id, rc);
1281 return rc;
1282 }
1283
1284 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1285 {
1286 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1287
1288 return hisi_sas_controller_reset(hisi_hba);
1289 }
1290
1291 static int hisi_sas_query_task(struct sas_task *task)
1292 {
1293 struct scsi_lun lun;
1294 struct hisi_sas_tmf_task tmf_task;
1295 int rc = TMF_RESP_FUNC_FAILED;
1296
1297 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1298 struct scsi_cmnd *cmnd = task->uldd_task;
1299 struct domain_device *device = task->dev;
1300 struct hisi_sas_slot *slot = task->lldd_task;
1301 u32 tag = slot->idx;
1302
1303 int_to_scsilun(cmnd->device->lun, &lun);
1304 tmf_task.tmf = TMF_QUERY_TASK;
1305 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1306
1307 rc = hisi_sas_debug_issue_ssp_tmf(device,
1308 lun.scsi_lun,
1309 &tmf_task);
1310 switch (rc) {
1311 /* The task is still in Lun, release it then */
1312 case TMF_RESP_FUNC_SUCC:
1313 /* The task is not in Lun or failed, reset the phy */
1314 case TMF_RESP_FUNC_FAILED:
1315 case TMF_RESP_FUNC_COMPLETE:
1316 break;
1317 default:
1318 rc = TMF_RESP_FUNC_FAILED;
1319 break;
1320 }
1321 }
1322 return rc;
1323 }
1324
1325 static int
1326 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1327 struct sas_task *task, int abort_flag,
1328 int task_tag)
1329 {
1330 struct domain_device *device = task->dev;
1331 struct hisi_sas_device *sas_dev = device->lldd_dev;
1332 struct device *dev = hisi_hba->dev;
1333 struct hisi_sas_port *port;
1334 struct hisi_sas_slot *slot;
1335 struct asd_sas_port *sas_port = device->port;
1336 struct hisi_sas_cmd_hdr *cmd_hdr_base;
1337 struct hisi_sas_dq *dq = sas_dev->dq;
1338 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1339 unsigned long flags, flags_dq;
1340
1341 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1342 return -EINVAL;
1343
1344 if (!device->port)
1345 return -1;
1346
1347 port = to_hisi_sas_port(sas_port);
1348
1349 /* simply get a slot and send abort command */
1350 spin_lock_irqsave(&hisi_hba->lock, flags);
1351 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
1352 if (rc) {
1353 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1354 goto err_out;
1355 }
1356 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1357
1358 spin_lock_irqsave(&dq->lock, flags_dq);
1359 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1360 if (rc)
1361 goto err_out_tag;
1362
1363 dlvry_queue = dq->id;
1364 dlvry_queue_slot = dq->wr_point;
1365
1366 slot = &hisi_hba->slot_info[slot_idx];
1367 memset(slot, 0, sizeof(struct hisi_sas_slot));
1368
1369 slot->idx = slot_idx;
1370 slot->n_elem = n_elem;
1371 slot->dlvry_queue = dlvry_queue;
1372 slot->dlvry_queue_slot = dlvry_queue_slot;
1373 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1374 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1375 slot->task = task;
1376 slot->port = port;
1377 task->lldd_task = slot;
1378
1379 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
1380 GFP_ATOMIC, &slot->buf_dma);
1381 if (!slot->buf) {
1382 rc = -ENOMEM;
1383 goto err_out_tag;
1384 }
1385
1386 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1387 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1388 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
1389
1390 rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1391 abort_flag, task_tag);
1392 if (rc)
1393 goto err_out_buf;
1394
1395 spin_lock_irqsave(&hisi_hba->lock, flags);
1396 list_add_tail(&slot->entry, &sas_dev->list);
1397 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1398 spin_lock_irqsave(&task->task_state_lock, flags);
1399 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1400 spin_unlock_irqrestore(&task->task_state_lock, flags);
1401
1402 dq->slot_prep = slot;
1403
1404 atomic64_inc(&sas_dev->running_req);
1405
1406 /* send abort command to the chip */
1407 hisi_hba->hw->start_delivery(dq);
1408 spin_unlock_irqrestore(&dq->lock, flags_dq);
1409
1410 return 0;
1411
1412 err_out_buf:
1413 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
1414 slot->buf_dma);
1415 err_out_tag:
1416 spin_lock_irqsave(&hisi_hba->lock, flags);
1417 hisi_sas_slot_index_free(hisi_hba, slot_idx);
1418 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1419 spin_unlock_irqrestore(&dq->lock, flags_dq);
1420 err_out:
1421 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1422
1423 return rc;
1424 }
1425
1426 /**
1427 * hisi_sas_internal_task_abort -- execute an internal
1428 * abort command for single IO command or a device
1429 * @hisi_hba: host controller struct
1430 * @device: domain device
1431 * @abort_flag: mode of operation, device or single IO
1432 * @tag: tag of IO to be aborted (only relevant to single
1433 * IO mode)
1434 */
1435 static int
1436 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1437 struct domain_device *device,
1438 int abort_flag, int tag)
1439 {
1440 struct sas_task *task;
1441 struct hisi_sas_device *sas_dev = device->lldd_dev;
1442 struct device *dev = hisi_hba->dev;
1443 int res;
1444
1445 if (!hisi_hba->hw->prep_abort)
1446 return -EOPNOTSUPP;
1447
1448 task = sas_alloc_slow_task(GFP_KERNEL);
1449 if (!task)
1450 return -ENOMEM;
1451
1452 task->dev = device;
1453 task->task_proto = device->tproto;
1454 task->task_done = hisi_sas_task_done;
1455 task->slow_task->timer.data = (unsigned long)task;
1456 task->slow_task->timer.function = hisi_sas_tmf_timedout;
1457 task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110);
1458 add_timer(&task->slow_task->timer);
1459
1460 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1461 task, abort_flag, tag);
1462 if (res) {
1463 del_timer(&task->slow_task->timer);
1464 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1465 res);
1466 goto exit;
1467 }
1468 wait_for_completion(&task->slow_task->completion);
1469 res = TMF_RESP_FUNC_FAILED;
1470
1471 /* Internal abort timed out */
1472 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1473 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1474 struct hisi_sas_slot *slot = task->lldd_task;
1475
1476 if (slot)
1477 slot->task = NULL;
1478 dev_err(dev, "internal task abort: timeout.\n");
1479 goto exit;
1480 }
1481 }
1482
1483 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1484 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1485 res = TMF_RESP_FUNC_COMPLETE;
1486 goto exit;
1487 }
1488
1489 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1490 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1491 res = TMF_RESP_FUNC_SUCC;
1492 goto exit;
1493 }
1494
1495 exit:
1496 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
1497 "resp: 0x%x sts 0x%x\n",
1498 SAS_ADDR(device->sas_addr),
1499 task,
1500 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1501 task->task_status.stat);
1502 sas_free_task(task);
1503
1504 return res;
1505 }
1506
1507 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1508 {
1509 hisi_sas_port_notify_formed(sas_phy);
1510 }
1511
1512 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1513 {
1514 phy->phy_attached = 0;
1515 phy->phy_type = 0;
1516 phy->port = NULL;
1517 }
1518
1519 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1520 {
1521 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1522 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1523 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1524
1525 if (rdy) {
1526 /* Phy down but ready */
1527 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1528 hisi_sas_port_notify_formed(sas_phy);
1529 } else {
1530 struct hisi_sas_port *port = phy->port;
1531
1532 /* Phy down and not ready */
1533 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1534 sas_phy_disconnected(sas_phy);
1535
1536 if (port) {
1537 if (phy->phy_type & PORT_TYPE_SAS) {
1538 int port_id = port->id;
1539
1540 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1541 port_id))
1542 port->port_attached = 0;
1543 } else if (phy->phy_type & PORT_TYPE_SATA)
1544 port->port_attached = 0;
1545 }
1546 hisi_sas_phy_disconnected(phy);
1547 }
1548 }
1549 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1550
1551
1552 struct scsi_transport_template *hisi_sas_stt;
1553 EXPORT_SYMBOL_GPL(hisi_sas_stt);
1554
1555 static struct scsi_host_template _hisi_sas_sht = {
1556 .module = THIS_MODULE,
1557 .name = DRV_NAME,
1558 .queuecommand = sas_queuecommand,
1559 .target_alloc = sas_target_alloc,
1560 .slave_configure = hisi_sas_slave_configure,
1561 .scan_finished = hisi_sas_scan_finished,
1562 .scan_start = hisi_sas_scan_start,
1563 .change_queue_depth = sas_change_queue_depth,
1564 .bios_param = sas_bios_param,
1565 .can_queue = 1,
1566 .this_id = -1,
1567 .sg_tablesize = SG_ALL,
1568 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
1569 .use_clustering = ENABLE_CLUSTERING,
1570 .eh_device_reset_handler = sas_eh_device_reset_handler,
1571 .eh_target_reset_handler = sas_eh_target_reset_handler,
1572 .target_destroy = sas_target_destroy,
1573 .ioctl = sas_ioctl,
1574 };
1575 struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
1576 EXPORT_SYMBOL_GPL(hisi_sas_sht);
1577
1578 static struct sas_domain_function_template hisi_sas_transport_ops = {
1579 .lldd_dev_found = hisi_sas_dev_found,
1580 .lldd_dev_gone = hisi_sas_dev_gone,
1581 .lldd_execute_task = hisi_sas_queue_command,
1582 .lldd_control_phy = hisi_sas_control_phy,
1583 .lldd_abort_task = hisi_sas_abort_task,
1584 .lldd_abort_task_set = hisi_sas_abort_task_set,
1585 .lldd_clear_aca = hisi_sas_clear_aca,
1586 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
1587 .lldd_lu_reset = hisi_sas_lu_reset,
1588 .lldd_query_task = hisi_sas_query_task,
1589 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
1590 .lldd_port_formed = hisi_sas_port_formed,
1591 };
1592
1593 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
1594 {
1595 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1596
1597 for (i = 0; i < hisi_hba->queue_count; i++) {
1598 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1599 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1600
1601 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1602 memset(hisi_hba->cmd_hdr[i], 0, s);
1603 dq->wr_point = 0;
1604
1605 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1606 memset(hisi_hba->complete_hdr[i], 0, s);
1607 cq->rd_point = 0;
1608 }
1609
1610 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
1611 memset(hisi_hba->initial_fis, 0, s);
1612
1613 s = max_command_entries * sizeof(struct hisi_sas_iost);
1614 memset(hisi_hba->iost, 0, s);
1615
1616 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1617 memset(hisi_hba->breakpoint, 0, s);
1618
1619 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1620 memset(hisi_hba->sata_breakpoint, 0, s);
1621 }
1622 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
1623
1624 int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1625 {
1626 struct device *dev = hisi_hba->dev;
1627 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1628
1629 spin_lock_init(&hisi_hba->lock);
1630 for (i = 0; i < hisi_hba->n_phy; i++) {
1631 hisi_sas_phy_init(hisi_hba, i);
1632 hisi_hba->port[i].port_attached = 0;
1633 hisi_hba->port[i].id = -1;
1634 }
1635
1636 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1637 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
1638 hisi_hba->devices[i].device_id = i;
1639 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
1640 }
1641
1642 for (i = 0; i < hisi_hba->queue_count; i++) {
1643 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1644 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1645
1646 /* Completion queue structure */
1647 cq->id = i;
1648 cq->hisi_hba = hisi_hba;
1649
1650 /* Delivery queue structure */
1651 dq->id = i;
1652 dq->hisi_hba = hisi_hba;
1653
1654 /* Delivery queue */
1655 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1656 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1657 &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1658 if (!hisi_hba->cmd_hdr[i])
1659 goto err_out;
1660
1661 /* Completion queue */
1662 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1663 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1664 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1665 if (!hisi_hba->complete_hdr[i])
1666 goto err_out;
1667 }
1668
1669 s = sizeof(struct hisi_sas_slot_buf_table);
1670 hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
1671 if (!hisi_hba->buffer_pool)
1672 goto err_out;
1673
1674 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1675 hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma,
1676 GFP_KERNEL);
1677 if (!hisi_hba->itct)
1678 goto err_out;
1679
1680 memset(hisi_hba->itct, 0, s);
1681
1682 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
1683 sizeof(struct hisi_sas_slot),
1684 GFP_KERNEL);
1685 if (!hisi_hba->slot_info)
1686 goto err_out;
1687
1688 s = max_command_entries * sizeof(struct hisi_sas_iost);
1689 hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1690 GFP_KERNEL);
1691 if (!hisi_hba->iost)
1692 goto err_out;
1693
1694 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1695 hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1696 &hisi_hba->breakpoint_dma, GFP_KERNEL);
1697 if (!hisi_hba->breakpoint)
1698 goto err_out;
1699
1700 hisi_hba->slot_index_count = max_command_entries;
1701 s = hisi_hba->slot_index_count / BITS_PER_BYTE;
1702 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1703 if (!hisi_hba->slot_index_tags)
1704 goto err_out;
1705
1706 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1707 hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1708 &hisi_hba->initial_fis_dma, GFP_KERNEL);
1709 if (!hisi_hba->initial_fis)
1710 goto err_out;
1711
1712 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1713 hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1714 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1715 if (!hisi_hba->sata_breakpoint)
1716 goto err_out;
1717 hisi_sas_init_mem(hisi_hba);
1718
1719 hisi_sas_slot_index_init(hisi_hba);
1720
1721 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1722 if (!hisi_hba->wq) {
1723 dev_err(dev, "sas_alloc: failed to create workqueue\n");
1724 goto err_out;
1725 }
1726
1727 return 0;
1728 err_out:
1729 return -ENOMEM;
1730 }
1731 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
1732
1733 void hisi_sas_free(struct hisi_hba *hisi_hba)
1734 {
1735 struct device *dev = hisi_hba->dev;
1736 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1737
1738 for (i = 0; i < hisi_hba->queue_count; i++) {
1739 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1740 if (hisi_hba->cmd_hdr[i])
1741 dma_free_coherent(dev, s,
1742 hisi_hba->cmd_hdr[i],
1743 hisi_hba->cmd_hdr_dma[i]);
1744
1745 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1746 if (hisi_hba->complete_hdr[i])
1747 dma_free_coherent(dev, s,
1748 hisi_hba->complete_hdr[i],
1749 hisi_hba->complete_hdr_dma[i]);
1750 }
1751
1752 dma_pool_destroy(hisi_hba->buffer_pool);
1753
1754 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1755 if (hisi_hba->itct)
1756 dma_free_coherent(dev, s,
1757 hisi_hba->itct, hisi_hba->itct_dma);
1758
1759 s = max_command_entries * sizeof(struct hisi_sas_iost);
1760 if (hisi_hba->iost)
1761 dma_free_coherent(dev, s,
1762 hisi_hba->iost, hisi_hba->iost_dma);
1763
1764 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1765 if (hisi_hba->breakpoint)
1766 dma_free_coherent(dev, s,
1767 hisi_hba->breakpoint,
1768 hisi_hba->breakpoint_dma);
1769
1770
1771 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1772 if (hisi_hba->initial_fis)
1773 dma_free_coherent(dev, s,
1774 hisi_hba->initial_fis,
1775 hisi_hba->initial_fis_dma);
1776
1777 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1778 if (hisi_hba->sata_breakpoint)
1779 dma_free_coherent(dev, s,
1780 hisi_hba->sata_breakpoint,
1781 hisi_hba->sata_breakpoint_dma);
1782
1783 if (hisi_hba->wq)
1784 destroy_workqueue(hisi_hba->wq);
1785 }
1786 EXPORT_SYMBOL_GPL(hisi_sas_free);
1787
1788 static void hisi_sas_rst_work_handler(struct work_struct *work)
1789 {
1790 struct hisi_hba *hisi_hba =
1791 container_of(work, struct hisi_hba, rst_work);
1792
1793 hisi_sas_controller_reset(hisi_hba);
1794 }
1795
1796 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
1797 {
1798 struct device *dev = hisi_hba->dev;
1799 struct platform_device *pdev = hisi_hba->platform_dev;
1800 struct device_node *np = pdev ? pdev->dev.of_node : NULL;
1801 struct clk *refclk;
1802
1803 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
1804 SAS_ADDR_SIZE)) {
1805 dev_err(dev, "could not get property sas-addr\n");
1806 return -ENOENT;
1807 }
1808
1809 if (np) {
1810 /*
1811 * These properties are only required for platform device-based
1812 * controller with DT firmware.
1813 */
1814 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
1815 "hisilicon,sas-syscon");
1816 if (IS_ERR(hisi_hba->ctrl)) {
1817 dev_err(dev, "could not get syscon\n");
1818 return -ENOENT;
1819 }
1820
1821 if (device_property_read_u32(dev, "ctrl-reset-reg",
1822 &hisi_hba->ctrl_reset_reg)) {
1823 dev_err(dev,
1824 "could not get property ctrl-reset-reg\n");
1825 return -ENOENT;
1826 }
1827
1828 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
1829 &hisi_hba->ctrl_reset_sts_reg)) {
1830 dev_err(dev,
1831 "could not get property ctrl-reset-sts-reg\n");
1832 return -ENOENT;
1833 }
1834
1835 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
1836 &hisi_hba->ctrl_clock_ena_reg)) {
1837 dev_err(dev,
1838 "could not get property ctrl-clock-ena-reg\n");
1839 return -ENOENT;
1840 }
1841 }
1842
1843 refclk = devm_clk_get(dev, NULL);
1844 if (IS_ERR(refclk))
1845 dev_dbg(dev, "no ref clk property\n");
1846 else
1847 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
1848
1849 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
1850 dev_err(dev, "could not get property phy-count\n");
1851 return -ENOENT;
1852 }
1853
1854 if (device_property_read_u32(dev, "queue-count",
1855 &hisi_hba->queue_count)) {
1856 dev_err(dev, "could not get property queue-count\n");
1857 return -ENOENT;
1858 }
1859
1860 return 0;
1861 }
1862 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
1863
1864 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
1865 const struct hisi_sas_hw *hw)
1866 {
1867 struct resource *res;
1868 struct Scsi_Host *shost;
1869 struct hisi_hba *hisi_hba;
1870 struct device *dev = &pdev->dev;
1871
1872 shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
1873 if (!shost) {
1874 dev_err(dev, "scsi host alloc failed\n");
1875 return NULL;
1876 }
1877 hisi_hba = shost_priv(shost);
1878
1879 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
1880 hisi_hba->hw = hw;
1881 hisi_hba->dev = dev;
1882 hisi_hba->platform_dev = pdev;
1883 hisi_hba->shost = shost;
1884 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
1885
1886 init_timer(&hisi_hba->timer);
1887
1888 if (hisi_sas_get_fw_info(hisi_hba) < 0)
1889 goto err_out;
1890
1891 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
1892 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
1893 dev_err(dev, "No usable DMA addressing method\n");
1894 goto err_out;
1895 }
1896
1897 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1898 hisi_hba->regs = devm_ioremap_resource(dev, res);
1899 if (IS_ERR(hisi_hba->regs))
1900 goto err_out;
1901
1902 if (hisi_sas_alloc(hisi_hba, shost)) {
1903 hisi_sas_free(hisi_hba);
1904 goto err_out;
1905 }
1906
1907 return shost;
1908 err_out:
1909 scsi_host_put(shost);
1910 dev_err(dev, "shost alloc failed\n");
1911 return NULL;
1912 }
1913
1914 void hisi_sas_init_add(struct hisi_hba *hisi_hba)
1915 {
1916 int i;
1917
1918 for (i = 0; i < hisi_hba->n_phy; i++)
1919 memcpy(&hisi_hba->phy[i].dev_sas_addr,
1920 hisi_hba->sas_addr,
1921 SAS_ADDR_SIZE);
1922 }
1923 EXPORT_SYMBOL_GPL(hisi_sas_init_add);
1924
1925 int hisi_sas_probe(struct platform_device *pdev,
1926 const struct hisi_sas_hw *hw)
1927 {
1928 struct Scsi_Host *shost;
1929 struct hisi_hba *hisi_hba;
1930 struct device *dev = &pdev->dev;
1931 struct asd_sas_phy **arr_phy;
1932 struct asd_sas_port **arr_port;
1933 struct sas_ha_struct *sha;
1934 int rc, phy_nr, port_nr, i;
1935
1936 shost = hisi_sas_shost_alloc(pdev, hw);
1937 if (!shost)
1938 return -ENOMEM;
1939
1940 sha = SHOST_TO_SAS_HA(shost);
1941 hisi_hba = shost_priv(shost);
1942 platform_set_drvdata(pdev, sha);
1943
1944 phy_nr = port_nr = hisi_hba->n_phy;
1945
1946 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
1947 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
1948 if (!arr_phy || !arr_port) {
1949 rc = -ENOMEM;
1950 goto err_out_ha;
1951 }
1952
1953 sha->sas_phy = arr_phy;
1954 sha->sas_port = arr_port;
1955 sha->lldd_ha = hisi_hba;
1956
1957 shost->transportt = hisi_sas_stt;
1958 shost->max_id = HISI_SAS_MAX_DEVICES;
1959 shost->max_lun = ~0;
1960 shost->max_channel = 1;
1961 shost->max_cmd_len = 16;
1962 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
1963 shost->can_queue = hisi_hba->hw->max_command_entries;
1964 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
1965
1966 sha->sas_ha_name = DRV_NAME;
1967 sha->dev = hisi_hba->dev;
1968 sha->lldd_module = THIS_MODULE;
1969 sha->sas_addr = &hisi_hba->sas_addr[0];
1970 sha->num_phys = hisi_hba->n_phy;
1971 sha->core.shost = hisi_hba->shost;
1972
1973 for (i = 0; i < hisi_hba->n_phy; i++) {
1974 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
1975 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
1976 }
1977
1978 hisi_sas_init_add(hisi_hba);
1979
1980 rc = scsi_add_host(shost, &pdev->dev);
1981 if (rc)
1982 goto err_out_ha;
1983
1984 rc = sas_register_ha(sha);
1985 if (rc)
1986 goto err_out_register_ha;
1987
1988 rc = hisi_hba->hw->hw_init(hisi_hba);
1989 if (rc)
1990 goto err_out_register_ha;
1991
1992 scsi_scan_host(shost);
1993
1994 return 0;
1995
1996 err_out_register_ha:
1997 scsi_remove_host(shost);
1998 err_out_ha:
1999 hisi_sas_free(hisi_hba);
2000 scsi_host_put(shost);
2001 return rc;
2002 }
2003 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2004
2005 int hisi_sas_remove(struct platform_device *pdev)
2006 {
2007 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2008 struct hisi_hba *hisi_hba = sha->lldd_ha;
2009 struct Scsi_Host *shost = sha->core.shost;
2010
2011 sas_unregister_ha(sha);
2012 sas_remove_host(sha->core.shost);
2013
2014 hisi_sas_free(hisi_hba);
2015 scsi_host_put(shost);
2016 return 0;
2017 }
2018 EXPORT_SYMBOL_GPL(hisi_sas_remove);
2019
2020 static __init int hisi_sas_init(void)
2021 {
2022 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2023 if (!hisi_sas_stt)
2024 return -ENOMEM;
2025
2026 return 0;
2027 }
2028
2029 static __exit void hisi_sas_exit(void)
2030 {
2031 sas_release_transport(hisi_sas_stt);
2032 }
2033
2034 module_init(hisi_sas_init);
2035 module_exit(hisi_sas_exit);
2036
2037 MODULE_LICENSE("GPL");
2038 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2039 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2040 MODULE_ALIAS("platform:" DRV_NAME);