]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/hisi_sas/hisi_sas_main.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / hisi_sas / hisi_sas_main.c
1 /*
2 * Copyright (c) 2015 Linaro Ltd.
3 * Copyright (c) 2015 Hisilicon Limited.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 */
11
12 #include "hisi_sas.h"
13 #define DRV_NAME "hisi_sas"
14
15 #define DEV_IS_GONE(dev) \
16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
17
18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19 u8 *lun, struct hisi_sas_tmf_task *tmf);
20 static int
21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22 struct domain_device *device,
23 int abort_flag, int tag);
24 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
25
26 u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
27 {
28 switch (cmd) {
29 case ATA_CMD_FPDMA_WRITE:
30 case ATA_CMD_FPDMA_READ:
31 case ATA_CMD_FPDMA_RECV:
32 case ATA_CMD_FPDMA_SEND:
33 case ATA_CMD_NCQ_NON_DATA:
34 return HISI_SAS_SATA_PROTOCOL_FPDMA;
35
36 case ATA_CMD_DOWNLOAD_MICRO:
37 case ATA_CMD_ID_ATA:
38 case ATA_CMD_PMP_READ:
39 case ATA_CMD_READ_LOG_EXT:
40 case ATA_CMD_PIO_READ:
41 case ATA_CMD_PIO_READ_EXT:
42 case ATA_CMD_PMP_WRITE:
43 case ATA_CMD_WRITE_LOG_EXT:
44 case ATA_CMD_PIO_WRITE:
45 case ATA_CMD_PIO_WRITE_EXT:
46 return HISI_SAS_SATA_PROTOCOL_PIO;
47
48 case ATA_CMD_DSM:
49 case ATA_CMD_DOWNLOAD_MICRO_DMA:
50 case ATA_CMD_PMP_READ_DMA:
51 case ATA_CMD_PMP_WRITE_DMA:
52 case ATA_CMD_READ:
53 case ATA_CMD_READ_EXT:
54 case ATA_CMD_READ_LOG_DMA_EXT:
55 case ATA_CMD_READ_STREAM_DMA_EXT:
56 case ATA_CMD_TRUSTED_RCV_DMA:
57 case ATA_CMD_TRUSTED_SND_DMA:
58 case ATA_CMD_WRITE:
59 case ATA_CMD_WRITE_EXT:
60 case ATA_CMD_WRITE_FUA_EXT:
61 case ATA_CMD_WRITE_QUEUED:
62 case ATA_CMD_WRITE_LOG_DMA_EXT:
63 case ATA_CMD_WRITE_STREAM_DMA_EXT:
64 case ATA_CMD_ZAC_MGMT_IN:
65 return HISI_SAS_SATA_PROTOCOL_DMA;
66
67 case ATA_CMD_CHK_POWER:
68 case ATA_CMD_DEV_RESET:
69 case ATA_CMD_EDD:
70 case ATA_CMD_FLUSH:
71 case ATA_CMD_FLUSH_EXT:
72 case ATA_CMD_VERIFY:
73 case ATA_CMD_VERIFY_EXT:
74 case ATA_CMD_SET_FEATURES:
75 case ATA_CMD_STANDBY:
76 case ATA_CMD_STANDBYNOW1:
77 case ATA_CMD_ZAC_MGMT_OUT:
78 return HISI_SAS_SATA_PROTOCOL_NONDATA;
79 default:
80 if (direction == DMA_NONE)
81 return HISI_SAS_SATA_PROTOCOL_NONDATA;
82 return HISI_SAS_SATA_PROTOCOL_PIO;
83 }
84 }
85 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
86
87 void hisi_sas_sata_done(struct sas_task *task,
88 struct hisi_sas_slot *slot)
89 {
90 struct task_status_struct *ts = &task->task_status;
91 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
92 struct hisi_sas_status_buffer *status_buf =
93 hisi_sas_status_buf_addr_mem(slot);
94 u8 *iu = &status_buf->iu[0];
95 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
96
97 resp->frame_len = sizeof(struct dev_to_host_fis);
98 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
99
100 ts->buf_valid_size = sizeof(*resp);
101 }
102 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
103
104 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
105 {
106 struct ata_queued_cmd *qc = task->uldd_task;
107
108 if (qc) {
109 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
110 qc->tf.command == ATA_CMD_FPDMA_READ) {
111 *tag = qc->tag;
112 return 1;
113 }
114 }
115 return 0;
116 }
117 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
118
119 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
120 {
121 return device->port->ha->lldd_ha;
122 }
123
124 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
125 {
126 return container_of(sas_port, struct hisi_sas_port, sas_port);
127 }
128 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
129
130 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
131 {
132 int phy_no;
133
134 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
135 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
136 }
137 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
138
139 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
140 {
141 void *bitmap = hisi_hba->slot_index_tags;
142
143 clear_bit(slot_idx, bitmap);
144 }
145
146 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
147 {
148 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
149 }
150
151 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
152 {
153 void *bitmap = hisi_hba->slot_index_tags;
154
155 set_bit(slot_idx, bitmap);
156 }
157
158 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
159 {
160 unsigned int index;
161 void *bitmap = hisi_hba->slot_index_tags;
162
163 index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count);
164 if (index >= hisi_hba->slot_index_count)
165 return -SAS_QUEUE_FULL;
166 hisi_sas_slot_index_set(hisi_hba, index);
167 *slot_idx = index;
168 return 0;
169 }
170
171 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
172 {
173 int i;
174
175 for (i = 0; i < hisi_hba->slot_index_count; ++i)
176 hisi_sas_slot_index_clear(hisi_hba, i);
177 }
178
179 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
180 struct hisi_sas_slot *slot)
181 {
182
183 if (task) {
184 struct device *dev = hisi_hba->dev;
185 struct domain_device *device = task->dev;
186 struct hisi_sas_device *sas_dev = device->lldd_dev;
187
188 if (!task->lldd_task)
189 return;
190
191 task->lldd_task = NULL;
192
193 if (!sas_protocol_ata(task->task_proto))
194 if (slot->n_elem)
195 dma_unmap_sg(dev, task->scatter, slot->n_elem,
196 task->data_dir);
197
198 if (sas_dev)
199 atomic64_dec(&sas_dev->running_req);
200 }
201
202 if (slot->buf)
203 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
204
205 list_del_init(&slot->entry);
206 slot->buf = NULL;
207 slot->task = NULL;
208 slot->port = NULL;
209 hisi_sas_slot_index_free(hisi_hba, slot->idx);
210
211 /* slot memory is fully zeroed when it is reused */
212 }
213 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
214
215 static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
216 struct hisi_sas_slot *slot)
217 {
218 return hisi_hba->hw->prep_smp(hisi_hba, slot);
219 }
220
221 static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
222 struct hisi_sas_slot *slot, int is_tmf,
223 struct hisi_sas_tmf_task *tmf)
224 {
225 return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
226 }
227
228 static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
229 struct hisi_sas_slot *slot)
230 {
231 return hisi_hba->hw->prep_stp(hisi_hba, slot);
232 }
233
234 static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
235 struct hisi_sas_slot *slot,
236 int device_id, int abort_flag, int tag_to_abort)
237 {
238 return hisi_hba->hw->prep_abort(hisi_hba, slot,
239 device_id, abort_flag, tag_to_abort);
240 }
241
242 /*
243 * This function will issue an abort TMF regardless of whether the
244 * task is in the sdev or not. Then it will do the task complete
245 * cleanup and callbacks.
246 */
247 static void hisi_sas_slot_abort(struct work_struct *work)
248 {
249 struct hisi_sas_slot *abort_slot =
250 container_of(work, struct hisi_sas_slot, abort_slot);
251 struct sas_task *task = abort_slot->task;
252 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
253 struct scsi_cmnd *cmnd = task->uldd_task;
254 struct hisi_sas_tmf_task tmf_task;
255 struct scsi_lun lun;
256 struct device *dev = hisi_hba->dev;
257 int tag = abort_slot->idx;
258 unsigned long flags;
259
260 if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
261 dev_err(dev, "cannot abort slot for non-ssp task\n");
262 goto out;
263 }
264
265 int_to_scsilun(cmnd->device->lun, &lun);
266 tmf_task.tmf = TMF_ABORT_TASK;
267 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
268
269 hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
270 out:
271 /* Do cleanup for this task */
272 spin_lock_irqsave(&hisi_hba->lock, flags);
273 hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
274 spin_unlock_irqrestore(&hisi_hba->lock, flags);
275 if (task->task_done)
276 task->task_done(task);
277 }
278
279 static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
280 *dq, int is_tmf, struct hisi_sas_tmf_task *tmf,
281 int *pass)
282 {
283 struct hisi_hba *hisi_hba = dq->hisi_hba;
284 struct domain_device *device = task->dev;
285 struct hisi_sas_device *sas_dev = device->lldd_dev;
286 struct hisi_sas_port *port;
287 struct hisi_sas_slot *slot;
288 struct hisi_sas_cmd_hdr *cmd_hdr_base;
289 struct asd_sas_port *sas_port = device->port;
290 struct device *dev = hisi_hba->dev;
291 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
292 unsigned long flags;
293
294 if (!sas_port) {
295 struct task_status_struct *ts = &task->task_status;
296
297 ts->resp = SAS_TASK_UNDELIVERED;
298 ts->stat = SAS_PHY_DOWN;
299 /*
300 * libsas will use dev->port, should
301 * not call task_done for sata
302 */
303 if (device->dev_type != SAS_SATA_DEV)
304 task->task_done(task);
305 return SAS_PHY_DOWN;
306 }
307
308 if (DEV_IS_GONE(sas_dev)) {
309 if (sas_dev)
310 dev_info(dev, "task prep: device %d not ready\n",
311 sas_dev->device_id);
312 else
313 dev_info(dev, "task prep: device %016llx not ready\n",
314 SAS_ADDR(device->sas_addr));
315
316 return SAS_PHY_DOWN;
317 }
318
319 port = to_hisi_sas_port(sas_port);
320 if (port && !port->port_attached) {
321 dev_info(dev, "task prep: %s port%d not attach device\n",
322 (dev_is_sata(device)) ?
323 "SATA/STP" : "SAS",
324 device->port->id);
325
326 return SAS_PHY_DOWN;
327 }
328
329 if (!sas_protocol_ata(task->task_proto)) {
330 if (task->num_scatter) {
331 n_elem = dma_map_sg(dev, task->scatter,
332 task->num_scatter, task->data_dir);
333 if (!n_elem) {
334 rc = -ENOMEM;
335 goto prep_out;
336 }
337 }
338 } else
339 n_elem = task->num_scatter;
340
341 spin_lock_irqsave(&hisi_hba->lock, flags);
342 if (hisi_hba->hw->slot_index_alloc)
343 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
344 device);
345 else
346 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
347 if (rc) {
348 spin_unlock_irqrestore(&hisi_hba->lock, flags);
349 goto err_out;
350 }
351 spin_unlock_irqrestore(&hisi_hba->lock, flags);
352
353 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
354 if (rc)
355 goto err_out_tag;
356
357 dlvry_queue = dq->id;
358 dlvry_queue_slot = dq->wr_point;
359 slot = &hisi_hba->slot_info[slot_idx];
360 memset(slot, 0, sizeof(struct hisi_sas_slot));
361
362 slot->idx = slot_idx;
363 slot->n_elem = n_elem;
364 slot->dlvry_queue = dlvry_queue;
365 slot->dlvry_queue_slot = dlvry_queue_slot;
366 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
367 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
368 slot->task = task;
369 slot->port = port;
370 task->lldd_task = slot;
371 INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
372
373 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
374 GFP_ATOMIC, &slot->buf_dma);
375 if (!slot->buf) {
376 rc = -ENOMEM;
377 goto err_out_slot_buf;
378 }
379 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
380 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
381 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
382
383 switch (task->task_proto) {
384 case SAS_PROTOCOL_SMP:
385 rc = hisi_sas_task_prep_smp(hisi_hba, slot);
386 break;
387 case SAS_PROTOCOL_SSP:
388 rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
389 break;
390 case SAS_PROTOCOL_SATA:
391 case SAS_PROTOCOL_STP:
392 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
393 rc = hisi_sas_task_prep_ata(hisi_hba, slot);
394 break;
395 default:
396 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
397 task->task_proto);
398 rc = -EINVAL;
399 break;
400 }
401
402 if (rc) {
403 dev_err(dev, "task prep: rc = 0x%x\n", rc);
404 goto err_out_buf;
405 }
406
407 spin_lock_irqsave(&hisi_hba->lock, flags);
408 list_add_tail(&slot->entry, &sas_dev->list);
409 spin_unlock_irqrestore(&hisi_hba->lock, flags);
410 spin_lock_irqsave(&task->task_state_lock, flags);
411 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
412 spin_unlock_irqrestore(&task->task_state_lock, flags);
413
414 dq->slot_prep = slot;
415
416 atomic64_inc(&sas_dev->running_req);
417 ++(*pass);
418
419 return 0;
420
421 err_out_buf:
422 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
423 slot->buf_dma);
424 err_out_slot_buf:
425 /* Nothing to be done */
426 err_out_tag:
427 spin_lock_irqsave(&hisi_hba->lock, flags);
428 hisi_sas_slot_index_free(hisi_hba, slot_idx);
429 spin_unlock_irqrestore(&hisi_hba->lock, flags);
430 err_out:
431 dev_err(dev, "task prep: failed[%d]!\n", rc);
432 if (!sas_protocol_ata(task->task_proto))
433 if (n_elem)
434 dma_unmap_sg(dev, task->scatter, n_elem,
435 task->data_dir);
436 prep_out:
437 return rc;
438 }
439
440 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
441 int is_tmf, struct hisi_sas_tmf_task *tmf)
442 {
443 u32 rc;
444 u32 pass = 0;
445 unsigned long flags;
446 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
447 struct device *dev = hisi_hba->dev;
448 struct domain_device *device = task->dev;
449 struct hisi_sas_device *sas_dev = device->lldd_dev;
450 struct hisi_sas_dq *dq = sas_dev->dq;
451
452 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
453 return -EINVAL;
454
455 /* protect task_prep and start_delivery sequence */
456 spin_lock_irqsave(&dq->lock, flags);
457 rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass);
458 if (rc)
459 dev_err(dev, "task exec: failed[%d]!\n", rc);
460
461 if (likely(pass))
462 hisi_hba->hw->start_delivery(dq);
463 spin_unlock_irqrestore(&dq->lock, flags);
464
465 return rc;
466 }
467
468 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
469 {
470 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
471 struct asd_sas_phy *sas_phy = &phy->sas_phy;
472 struct sas_ha_struct *sas_ha;
473
474 if (!phy->phy_attached)
475 return;
476
477 sas_ha = &hisi_hba->sha;
478 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
479
480 if (sas_phy->phy) {
481 struct sas_phy *sphy = sas_phy->phy;
482
483 sphy->negotiated_linkrate = sas_phy->linkrate;
484 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
485 sphy->maximum_linkrate_hw =
486 hisi_hba->hw->phy_get_max_linkrate();
487 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
488 sphy->minimum_linkrate = phy->minimum_linkrate;
489
490 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
491 sphy->maximum_linkrate = phy->maximum_linkrate;
492 }
493
494 if (phy->phy_type & PORT_TYPE_SAS) {
495 struct sas_identify_frame *id;
496
497 id = (struct sas_identify_frame *)phy->frame_rcvd;
498 id->dev_type = phy->identify.device_type;
499 id->initiator_bits = SAS_PROTOCOL_ALL;
500 id->target_bits = phy->identify.target_port_protocols;
501 } else if (phy->phy_type & PORT_TYPE_SATA) {
502 /*Nothing*/
503 }
504
505 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
506 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
507 }
508
509 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
510 {
511 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
512 struct hisi_sas_device *sas_dev = NULL;
513 unsigned long flags;
514 int i;
515
516 spin_lock_irqsave(&hisi_hba->lock, flags);
517 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
518 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
519 int queue = i % hisi_hba->queue_count;
520 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
521
522 hisi_hba->devices[i].device_id = i;
523 sas_dev = &hisi_hba->devices[i];
524 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
525 sas_dev->dev_type = device->dev_type;
526 sas_dev->hisi_hba = hisi_hba;
527 sas_dev->sas_device = device;
528 sas_dev->dq = dq;
529 INIT_LIST_HEAD(&hisi_hba->devices[i].list);
530 break;
531 }
532 }
533 spin_unlock_irqrestore(&hisi_hba->lock, flags);
534
535 return sas_dev;
536 }
537
538 static int hisi_sas_dev_found(struct domain_device *device)
539 {
540 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
541 struct domain_device *parent_dev = device->parent;
542 struct hisi_sas_device *sas_dev;
543 struct device *dev = hisi_hba->dev;
544
545 if (hisi_hba->hw->alloc_dev)
546 sas_dev = hisi_hba->hw->alloc_dev(device);
547 else
548 sas_dev = hisi_sas_alloc_dev(device);
549 if (!sas_dev) {
550 dev_err(dev, "fail alloc dev: max support %d devices\n",
551 HISI_SAS_MAX_DEVICES);
552 return -EINVAL;
553 }
554
555 device->lldd_dev = sas_dev;
556 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
557
558 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
559 int phy_no;
560 u8 phy_num = parent_dev->ex_dev.num_phys;
561 struct ex_phy *phy;
562
563 for (phy_no = 0; phy_no < phy_num; phy_no++) {
564 phy = &parent_dev->ex_dev.ex_phy[phy_no];
565 if (SAS_ADDR(phy->attached_sas_addr) ==
566 SAS_ADDR(device->sas_addr)) {
567 sas_dev->attached_phy = phy_no;
568 break;
569 }
570 }
571
572 if (phy_no == phy_num) {
573 dev_info(dev, "dev found: no attached "
574 "dev:%016llx at ex:%016llx\n",
575 SAS_ADDR(device->sas_addr),
576 SAS_ADDR(parent_dev->sas_addr));
577 return -EINVAL;
578 }
579 }
580
581 return 0;
582 }
583
584 static int hisi_sas_slave_configure(struct scsi_device *sdev)
585 {
586 struct domain_device *dev = sdev_to_domain_dev(sdev);
587 int ret = sas_slave_configure(sdev);
588
589 if (ret)
590 return ret;
591 if (!dev_is_sata(dev))
592 sas_change_queue_depth(sdev, 64);
593
594 return 0;
595 }
596
597 static void hisi_sas_scan_start(struct Scsi_Host *shost)
598 {
599 struct hisi_hba *hisi_hba = shost_priv(shost);
600
601 hisi_hba->hw->phys_init(hisi_hba);
602 }
603
604 static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
605 {
606 struct hisi_hba *hisi_hba = shost_priv(shost);
607 struct sas_ha_struct *sha = &hisi_hba->sha;
608
609 /* Wait for PHY up interrupt to occur */
610 if (time < HZ)
611 return 0;
612
613 sas_drain_work(sha);
614 return 1;
615 }
616
617 static void hisi_sas_phyup_work(struct work_struct *work)
618 {
619 struct hisi_sas_phy *phy =
620 container_of(work, struct hisi_sas_phy, phyup_ws);
621 struct hisi_hba *hisi_hba = phy->hisi_hba;
622 struct asd_sas_phy *sas_phy = &phy->sas_phy;
623 int phy_no = sas_phy->id;
624
625 hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
626 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
627 }
628
629 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
630 {
631 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
632 struct asd_sas_phy *sas_phy = &phy->sas_phy;
633
634 phy->hisi_hba = hisi_hba;
635 phy->port = NULL;
636 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
637 sas_phy->class = SAS;
638 sas_phy->iproto = SAS_PROTOCOL_ALL;
639 sas_phy->tproto = 0;
640 sas_phy->type = PHY_TYPE_PHYSICAL;
641 sas_phy->role = PHY_ROLE_INITIATOR;
642 sas_phy->oob_mode = OOB_NOT_CONNECTED;
643 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
644 sas_phy->id = phy_no;
645 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
646 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
647 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
648 sas_phy->lldd_phy = phy;
649
650 INIT_WORK(&phy->phyup_ws, hisi_sas_phyup_work);
651 }
652
653 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
654 {
655 struct sas_ha_struct *sas_ha = sas_phy->ha;
656 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
657 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
658 struct asd_sas_port *sas_port = sas_phy->port;
659 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
660 unsigned long flags;
661
662 if (!sas_port)
663 return;
664
665 spin_lock_irqsave(&hisi_hba->lock, flags);
666 port->port_attached = 1;
667 port->id = phy->port_id;
668 phy->port = port;
669 sas_port->lldd_port = port;
670 spin_unlock_irqrestore(&hisi_hba->lock, flags);
671 }
672
673 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
674 struct hisi_sas_slot *slot)
675 {
676 if (task) {
677 unsigned long flags;
678 struct task_status_struct *ts;
679
680 ts = &task->task_status;
681
682 ts->resp = SAS_TASK_COMPLETE;
683 ts->stat = SAS_ABORTED_TASK;
684 spin_lock_irqsave(&task->task_state_lock, flags);
685 task->task_state_flags &=
686 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
687 task->task_state_flags |= SAS_TASK_STATE_DONE;
688 spin_unlock_irqrestore(&task->task_state_lock, flags);
689 }
690
691 hisi_sas_slot_task_free(hisi_hba, task, slot);
692 }
693
694 /* hisi_hba.lock should be locked */
695 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
696 struct domain_device *device)
697 {
698 struct hisi_sas_slot *slot, *slot2;
699 struct hisi_sas_device *sas_dev = device->lldd_dev;
700
701 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
702 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
703 }
704
705 static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
706 {
707 struct hisi_sas_device *sas_dev;
708 struct domain_device *device;
709 int i;
710
711 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
712 sas_dev = &hisi_hba->devices[i];
713 device = sas_dev->sas_device;
714
715 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
716 !device)
717 continue;
718
719 hisi_sas_release_task(hisi_hba, device);
720 }
721 }
722
723 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
724 struct domain_device *device)
725 {
726 if (hisi_hba->hw->dereg_device)
727 hisi_hba->hw->dereg_device(hisi_hba, device);
728 }
729
730 static void hisi_sas_dev_gone(struct domain_device *device)
731 {
732 struct hisi_sas_device *sas_dev = device->lldd_dev;
733 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
734 struct device *dev = hisi_hba->dev;
735
736 dev_info(dev, "found dev[%d:%x] is gone\n",
737 sas_dev->device_id, sas_dev->dev_type);
738
739 hisi_sas_internal_task_abort(hisi_hba, device,
740 HISI_SAS_INT_ABT_DEV, 0);
741
742 hisi_sas_dereg_device(hisi_hba, device);
743
744 hisi_hba->hw->free_device(hisi_hba, sas_dev);
745 device->lldd_dev = NULL;
746 memset(sas_dev, 0, sizeof(*sas_dev));
747 sas_dev->dev_type = SAS_PHY_UNUSED;
748 }
749
750 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
751 {
752 return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
753 }
754
755 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
756 void *funcdata)
757 {
758 struct sas_ha_struct *sas_ha = sas_phy->ha;
759 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
760 int phy_no = sas_phy->id;
761
762 switch (func) {
763 case PHY_FUNC_HARD_RESET:
764 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
765 break;
766
767 case PHY_FUNC_LINK_RESET:
768 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
769 msleep(100);
770 hisi_hba->hw->phy_start(hisi_hba, phy_no);
771 break;
772
773 case PHY_FUNC_DISABLE:
774 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
775 break;
776
777 case PHY_FUNC_SET_LINK_RATE:
778 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
779 break;
780 case PHY_FUNC_GET_EVENTS:
781 if (hisi_hba->hw->get_events) {
782 hisi_hba->hw->get_events(hisi_hba, phy_no);
783 break;
784 }
785 /* fallthru */
786 case PHY_FUNC_RELEASE_SPINUP_HOLD:
787 default:
788 return -EOPNOTSUPP;
789 }
790 return 0;
791 }
792
793 static void hisi_sas_task_done(struct sas_task *task)
794 {
795 if (!del_timer(&task->slow_task->timer))
796 return;
797 complete(&task->slow_task->completion);
798 }
799
800 static void hisi_sas_tmf_timedout(struct timer_list *t)
801 {
802 struct sas_task_slow *slow = from_timer(slow, t, timer);
803 struct sas_task *task = slow->task;
804 unsigned long flags;
805
806 spin_lock_irqsave(&task->task_state_lock, flags);
807 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
808 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
809 spin_unlock_irqrestore(&task->task_state_lock, flags);
810
811 complete(&task->slow_task->completion);
812 }
813
814 #define TASK_TIMEOUT 20
815 #define TASK_RETRY 3
816 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
817 void *parameter, u32 para_len,
818 struct hisi_sas_tmf_task *tmf)
819 {
820 struct hisi_sas_device *sas_dev = device->lldd_dev;
821 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
822 struct device *dev = hisi_hba->dev;
823 struct sas_task *task;
824 int res, retry;
825
826 for (retry = 0; retry < TASK_RETRY; retry++) {
827 task = sas_alloc_slow_task(GFP_KERNEL);
828 if (!task)
829 return -ENOMEM;
830
831 task->dev = device;
832 task->task_proto = device->tproto;
833
834 if (dev_is_sata(device)) {
835 task->ata_task.device_control_reg_update = 1;
836 memcpy(&task->ata_task.fis, parameter, para_len);
837 } else {
838 memcpy(&task->ssp_task, parameter, para_len);
839 }
840 task->task_done = hisi_sas_task_done;
841
842 task->slow_task->timer.function = hisi_sas_tmf_timedout;
843 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
844 add_timer(&task->slow_task->timer);
845
846 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
847
848 if (res) {
849 del_timer(&task->slow_task->timer);
850 dev_err(dev, "abort tmf: executing internal task failed: %d\n",
851 res);
852 goto ex_err;
853 }
854
855 wait_for_completion(&task->slow_task->completion);
856 res = TMF_RESP_FUNC_FAILED;
857 /* Even TMF timed out, return direct. */
858 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
859 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
860 struct hisi_sas_slot *slot = task->lldd_task;
861
862 dev_err(dev, "abort tmf: TMF task timeout\n");
863 if (slot)
864 slot->task = NULL;
865
866 goto ex_err;
867 }
868 }
869
870 if (task->task_status.resp == SAS_TASK_COMPLETE &&
871 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
872 res = TMF_RESP_FUNC_COMPLETE;
873 break;
874 }
875
876 if (task->task_status.resp == SAS_TASK_COMPLETE &&
877 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
878 res = TMF_RESP_FUNC_SUCC;
879 break;
880 }
881
882 if (task->task_status.resp == SAS_TASK_COMPLETE &&
883 task->task_status.stat == SAS_DATA_UNDERRUN) {
884 /* no error, but return the number of bytes of
885 * underrun
886 */
887 dev_warn(dev, "abort tmf: task to dev %016llx "
888 "resp: 0x%x sts 0x%x underrun\n",
889 SAS_ADDR(device->sas_addr),
890 task->task_status.resp,
891 task->task_status.stat);
892 res = task->task_status.residual;
893 break;
894 }
895
896 if (task->task_status.resp == SAS_TASK_COMPLETE &&
897 task->task_status.stat == SAS_DATA_OVERRUN) {
898 dev_warn(dev, "abort tmf: blocked task error\n");
899 res = -EMSGSIZE;
900 break;
901 }
902
903 dev_warn(dev, "abort tmf: task to dev "
904 "%016llx resp: 0x%x status 0x%x\n",
905 SAS_ADDR(device->sas_addr), task->task_status.resp,
906 task->task_status.stat);
907 sas_free_task(task);
908 task = NULL;
909 }
910 ex_err:
911 if (retry == TASK_RETRY)
912 dev_warn(dev, "abort tmf: executing internal task failed!\n");
913 sas_free_task(task);
914 return res;
915 }
916
917 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
918 bool reset, int pmp, u8 *fis)
919 {
920 struct ata_taskfile tf;
921
922 ata_tf_init(dev, &tf);
923 if (reset)
924 tf.ctl |= ATA_SRST;
925 else
926 tf.ctl &= ~ATA_SRST;
927 tf.command = ATA_CMD_DEV_RESET;
928 ata_tf_to_fis(&tf, pmp, 0, fis);
929 }
930
931 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
932 {
933 u8 fis[20] = {0};
934 struct ata_port *ap = device->sata_dev.ap;
935 struct ata_link *link;
936 int rc = TMF_RESP_FUNC_FAILED;
937 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
938 struct device *dev = hisi_hba->dev;
939 int s = sizeof(struct host_to_dev_fis);
940 unsigned long flags;
941
942 ata_for_each_link(link, ap, EDGE) {
943 int pmp = sata_srst_pmp(link);
944
945 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
946 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
947 if (rc != TMF_RESP_FUNC_COMPLETE)
948 break;
949 }
950
951 if (rc == TMF_RESP_FUNC_COMPLETE) {
952 ata_for_each_link(link, ap, EDGE) {
953 int pmp = sata_srst_pmp(link);
954
955 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
956 rc = hisi_sas_exec_internal_tmf_task(device, fis,
957 s, NULL);
958 if (rc != TMF_RESP_FUNC_COMPLETE)
959 dev_err(dev, "ata disk de-reset failed\n");
960 }
961 } else {
962 dev_err(dev, "ata disk reset failed\n");
963 }
964
965 if (rc == TMF_RESP_FUNC_COMPLETE) {
966 spin_lock_irqsave(&hisi_hba->lock, flags);
967 hisi_sas_release_task(hisi_hba, device);
968 spin_unlock_irqrestore(&hisi_hba->lock, flags);
969 }
970
971 return rc;
972 }
973
974 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
975 u8 *lun, struct hisi_sas_tmf_task *tmf)
976 {
977 struct sas_ssp_task ssp_task;
978
979 if (!(device->tproto & SAS_PROTOCOL_SSP))
980 return TMF_RESP_FUNC_ESUPP;
981
982 memcpy(ssp_task.LUN, lun, 8);
983
984 return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
985 sizeof(ssp_task), tmf);
986 }
987
988 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba,
989 struct asd_sas_port *sas_port, enum sas_linkrate linkrate)
990 {
991 struct hisi_sas_device *sas_dev;
992 struct domain_device *device;
993 int i;
994
995 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
996 sas_dev = &hisi_hba->devices[i];
997 device = sas_dev->sas_device;
998 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
999 || !device || (device->port != sas_port))
1000 continue;
1001
1002 hisi_hba->hw->free_device(hisi_hba, sas_dev);
1003
1004 /* Update linkrate of directly attached device. */
1005 if (!device->parent)
1006 device->linkrate = linkrate;
1007
1008 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1009 }
1010 }
1011
1012 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1013 u32 state)
1014 {
1015 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1016 struct asd_sas_port *_sas_port = NULL;
1017 int phy_no;
1018
1019 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1020 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1021 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1022 struct asd_sas_port *sas_port = sas_phy->port;
1023 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
1024 bool do_port_check = !!(_sas_port != sas_port);
1025
1026 if (!sas_phy->phy->enabled)
1027 continue;
1028
1029 /* Report PHY state change to libsas */
1030 if (state & (1 << phy_no)) {
1031 if (do_port_check && sas_port) {
1032 struct domain_device *dev = sas_port->port_dev;
1033
1034 _sas_port = sas_port;
1035 port->id = phy->port_id;
1036 hisi_sas_refresh_port_id(hisi_hba,
1037 sas_port, sas_phy->linkrate);
1038
1039 if (DEV_IS_EXPANDER(dev->dev_type))
1040 sas_ha->notify_port_event(sas_phy,
1041 PORTE_BROADCAST_RCVD);
1042 }
1043 } else if (old_state & (1 << phy_no))
1044 /* PHY down but was up before */
1045 hisi_sas_phy_down(hisi_hba, phy_no, 0);
1046
1047 }
1048
1049 drain_workqueue(hisi_hba->shost->work_q);
1050 }
1051
1052 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1053 {
1054 struct device *dev = hisi_hba->dev;
1055 struct Scsi_Host *shost = hisi_hba->shost;
1056 u32 old_state, state;
1057 unsigned long flags;
1058 int rc;
1059
1060 if (!hisi_hba->hw->soft_reset)
1061 return -1;
1062
1063 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1064 return -1;
1065
1066 dev_dbg(dev, "controller resetting...\n");
1067 old_state = hisi_hba->hw->get_phys_state(hisi_hba);
1068
1069 scsi_block_requests(shost);
1070 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1071 rc = hisi_hba->hw->soft_reset(hisi_hba);
1072 if (rc) {
1073 dev_warn(dev, "controller reset failed (%d)\n", rc);
1074 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1075 goto out;
1076 }
1077 spin_lock_irqsave(&hisi_hba->lock, flags);
1078 hisi_sas_release_tasks(hisi_hba);
1079 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1080
1081 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1082
1083 /* Init and wait for PHYs to come up and all libsas event finished. */
1084 hisi_hba->hw->phys_init(hisi_hba);
1085 msleep(1000);
1086 drain_workqueue(hisi_hba->wq);
1087 drain_workqueue(shost->work_q);
1088
1089 state = hisi_hba->hw->get_phys_state(hisi_hba);
1090 hisi_sas_rescan_topology(hisi_hba, old_state, state);
1091 dev_dbg(dev, "controller reset complete\n");
1092
1093 out:
1094 scsi_unblock_requests(shost);
1095 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1096
1097 return rc;
1098 }
1099
1100 static int hisi_sas_abort_task(struct sas_task *task)
1101 {
1102 struct scsi_lun lun;
1103 struct hisi_sas_tmf_task tmf_task;
1104 struct domain_device *device = task->dev;
1105 struct hisi_sas_device *sas_dev = device->lldd_dev;
1106 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
1107 struct device *dev = hisi_hba->dev;
1108 int rc = TMF_RESP_FUNC_FAILED;
1109 unsigned long flags;
1110
1111 if (!sas_dev) {
1112 dev_warn(dev, "Device has been removed\n");
1113 return TMF_RESP_FUNC_FAILED;
1114 }
1115
1116 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1117 rc = TMF_RESP_FUNC_COMPLETE;
1118 goto out;
1119 }
1120
1121 sas_dev->dev_status = HISI_SAS_DEV_EH;
1122 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1123 struct scsi_cmnd *cmnd = task->uldd_task;
1124 struct hisi_sas_slot *slot = task->lldd_task;
1125 u32 tag = slot->idx;
1126 int rc2;
1127
1128 int_to_scsilun(cmnd->device->lun, &lun);
1129 tmf_task.tmf = TMF_ABORT_TASK;
1130 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1131
1132 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1133 &tmf_task);
1134
1135 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1136 HISI_SAS_INT_ABT_CMD, tag);
1137 /*
1138 * If the TMF finds that the IO is not in the device and also
1139 * the internal abort does not succeed, then it is safe to
1140 * free the slot.
1141 * Note: if the internal abort succeeds then the slot
1142 * will have already been completed
1143 */
1144 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1145 if (task->lldd_task) {
1146 spin_lock_irqsave(&hisi_hba->lock, flags);
1147 hisi_sas_do_release_task(hisi_hba, task, slot);
1148 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1149 }
1150 }
1151 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1152 task->task_proto & SAS_PROTOCOL_STP) {
1153 if (task->dev->dev_type == SAS_SATA_DEV) {
1154 hisi_sas_internal_task_abort(hisi_hba, device,
1155 HISI_SAS_INT_ABT_DEV, 0);
1156 hisi_sas_dereg_device(hisi_hba, device);
1157 rc = hisi_sas_softreset_ata_disk(device);
1158 }
1159 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1160 /* SMP */
1161 struct hisi_sas_slot *slot = task->lldd_task;
1162 u32 tag = slot->idx;
1163
1164 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1165 HISI_SAS_INT_ABT_CMD, tag);
1166 if (rc == TMF_RESP_FUNC_FAILED && task->lldd_task) {
1167 spin_lock_irqsave(&hisi_hba->lock, flags);
1168 hisi_sas_do_release_task(hisi_hba, task, slot);
1169 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1170 }
1171 }
1172
1173 out:
1174 if (rc != TMF_RESP_FUNC_COMPLETE)
1175 dev_notice(dev, "abort task: rc=%d\n", rc);
1176 return rc;
1177 }
1178
1179 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1180 {
1181 struct hisi_sas_tmf_task tmf_task;
1182 int rc = TMF_RESP_FUNC_FAILED;
1183
1184 tmf_task.tmf = TMF_ABORT_TASK_SET;
1185 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1186
1187 return rc;
1188 }
1189
1190 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1191 {
1192 int rc = TMF_RESP_FUNC_FAILED;
1193 struct hisi_sas_tmf_task tmf_task;
1194
1195 tmf_task.tmf = TMF_CLEAR_ACA;
1196 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1197
1198 return rc;
1199 }
1200
1201 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1202 {
1203 struct sas_phy *phy = sas_get_local_phy(device);
1204 int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1205 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1206 rc = sas_phy_reset(phy, reset_type);
1207 sas_put_local_phy(phy);
1208 msleep(2000);
1209 return rc;
1210 }
1211
1212 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1213 {
1214 struct hisi_sas_device *sas_dev = device->lldd_dev;
1215 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1216 unsigned long flags;
1217 int rc = TMF_RESP_FUNC_FAILED;
1218
1219 if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1220 return TMF_RESP_FUNC_FAILED;
1221 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1222
1223 hisi_sas_internal_task_abort(hisi_hba, device,
1224 HISI_SAS_INT_ABT_DEV, 0);
1225 hisi_sas_dereg_device(hisi_hba, device);
1226
1227 rc = hisi_sas_debug_I_T_nexus_reset(device);
1228
1229 if (rc == TMF_RESP_FUNC_COMPLETE) {
1230 spin_lock_irqsave(&hisi_hba->lock, flags);
1231 hisi_sas_release_task(hisi_hba, device);
1232 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1233 }
1234 return rc;
1235 }
1236
1237 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1238 {
1239 struct hisi_sas_device *sas_dev = device->lldd_dev;
1240 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1241 struct device *dev = hisi_hba->dev;
1242 unsigned long flags;
1243 int rc = TMF_RESP_FUNC_FAILED;
1244
1245 sas_dev->dev_status = HISI_SAS_DEV_EH;
1246 if (dev_is_sata(device)) {
1247 struct sas_phy *phy;
1248
1249 /* Clear internal IO and then hardreset */
1250 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1251 HISI_SAS_INT_ABT_DEV, 0);
1252 if (rc == TMF_RESP_FUNC_FAILED)
1253 goto out;
1254 hisi_sas_dereg_device(hisi_hba, device);
1255
1256 phy = sas_get_local_phy(device);
1257
1258 rc = sas_phy_reset(phy, 1);
1259
1260 if (rc == 0) {
1261 spin_lock_irqsave(&hisi_hba->lock, flags);
1262 hisi_sas_release_task(hisi_hba, device);
1263 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1264 }
1265 sas_put_local_phy(phy);
1266 } else {
1267 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
1268
1269 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1270 if (rc == TMF_RESP_FUNC_COMPLETE) {
1271 spin_lock_irqsave(&hisi_hba->lock, flags);
1272 hisi_sas_release_task(hisi_hba, device);
1273 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1274 }
1275 }
1276 out:
1277 if (rc != TMF_RESP_FUNC_COMPLETE)
1278 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1279 sas_dev->device_id, rc);
1280 return rc;
1281 }
1282
1283 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1284 {
1285 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1286
1287 return hisi_sas_controller_reset(hisi_hba);
1288 }
1289
1290 static int hisi_sas_query_task(struct sas_task *task)
1291 {
1292 struct scsi_lun lun;
1293 struct hisi_sas_tmf_task tmf_task;
1294 int rc = TMF_RESP_FUNC_FAILED;
1295
1296 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1297 struct scsi_cmnd *cmnd = task->uldd_task;
1298 struct domain_device *device = task->dev;
1299 struct hisi_sas_slot *slot = task->lldd_task;
1300 u32 tag = slot->idx;
1301
1302 int_to_scsilun(cmnd->device->lun, &lun);
1303 tmf_task.tmf = TMF_QUERY_TASK;
1304 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1305
1306 rc = hisi_sas_debug_issue_ssp_tmf(device,
1307 lun.scsi_lun,
1308 &tmf_task);
1309 switch (rc) {
1310 /* The task is still in Lun, release it then */
1311 case TMF_RESP_FUNC_SUCC:
1312 /* The task is not in Lun or failed, reset the phy */
1313 case TMF_RESP_FUNC_FAILED:
1314 case TMF_RESP_FUNC_COMPLETE:
1315 break;
1316 default:
1317 rc = TMF_RESP_FUNC_FAILED;
1318 break;
1319 }
1320 }
1321 return rc;
1322 }
1323
1324 static int
1325 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1326 struct sas_task *task, int abort_flag,
1327 int task_tag)
1328 {
1329 struct domain_device *device = task->dev;
1330 struct hisi_sas_device *sas_dev = device->lldd_dev;
1331 struct device *dev = hisi_hba->dev;
1332 struct hisi_sas_port *port;
1333 struct hisi_sas_slot *slot;
1334 struct asd_sas_port *sas_port = device->port;
1335 struct hisi_sas_cmd_hdr *cmd_hdr_base;
1336 struct hisi_sas_dq *dq = sas_dev->dq;
1337 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1338 unsigned long flags, flags_dq;
1339
1340 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1341 return -EINVAL;
1342
1343 if (!device->port)
1344 return -1;
1345
1346 port = to_hisi_sas_port(sas_port);
1347
1348 /* simply get a slot and send abort command */
1349 spin_lock_irqsave(&hisi_hba->lock, flags);
1350 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
1351 if (rc) {
1352 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1353 goto err_out;
1354 }
1355 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1356
1357 spin_lock_irqsave(&dq->lock, flags_dq);
1358 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1359 if (rc)
1360 goto err_out_tag;
1361
1362 dlvry_queue = dq->id;
1363 dlvry_queue_slot = dq->wr_point;
1364
1365 slot = &hisi_hba->slot_info[slot_idx];
1366 memset(slot, 0, sizeof(struct hisi_sas_slot));
1367
1368 slot->idx = slot_idx;
1369 slot->n_elem = n_elem;
1370 slot->dlvry_queue = dlvry_queue;
1371 slot->dlvry_queue_slot = dlvry_queue_slot;
1372 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1373 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1374 slot->task = task;
1375 slot->port = port;
1376 task->lldd_task = slot;
1377
1378 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
1379 GFP_ATOMIC, &slot->buf_dma);
1380 if (!slot->buf) {
1381 rc = -ENOMEM;
1382 goto err_out_tag;
1383 }
1384
1385 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1386 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1387 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
1388
1389 rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1390 abort_flag, task_tag);
1391 if (rc)
1392 goto err_out_buf;
1393
1394 spin_lock_irqsave(&hisi_hba->lock, flags);
1395 list_add_tail(&slot->entry, &sas_dev->list);
1396 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1397 spin_lock_irqsave(&task->task_state_lock, flags);
1398 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1399 spin_unlock_irqrestore(&task->task_state_lock, flags);
1400
1401 dq->slot_prep = slot;
1402
1403 atomic64_inc(&sas_dev->running_req);
1404
1405 /* send abort command to the chip */
1406 hisi_hba->hw->start_delivery(dq);
1407 spin_unlock_irqrestore(&dq->lock, flags_dq);
1408
1409 return 0;
1410
1411 err_out_buf:
1412 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
1413 slot->buf_dma);
1414 err_out_tag:
1415 spin_lock_irqsave(&hisi_hba->lock, flags);
1416 hisi_sas_slot_index_free(hisi_hba, slot_idx);
1417 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1418 spin_unlock_irqrestore(&dq->lock, flags_dq);
1419 err_out:
1420 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1421
1422 return rc;
1423 }
1424
1425 /**
1426 * hisi_sas_internal_task_abort -- execute an internal
1427 * abort command for single IO command or a device
1428 * @hisi_hba: host controller struct
1429 * @device: domain device
1430 * @abort_flag: mode of operation, device or single IO
1431 * @tag: tag of IO to be aborted (only relevant to single
1432 * IO mode)
1433 */
1434 static int
1435 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1436 struct domain_device *device,
1437 int abort_flag, int tag)
1438 {
1439 struct sas_task *task;
1440 struct hisi_sas_device *sas_dev = device->lldd_dev;
1441 struct device *dev = hisi_hba->dev;
1442 int res;
1443
1444 if (!hisi_hba->hw->prep_abort)
1445 return -EOPNOTSUPP;
1446
1447 task = sas_alloc_slow_task(GFP_KERNEL);
1448 if (!task)
1449 return -ENOMEM;
1450
1451 task->dev = device;
1452 task->task_proto = device->tproto;
1453 task->task_done = hisi_sas_task_done;
1454 task->slow_task->timer.function = hisi_sas_tmf_timedout;
1455 task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110);
1456 add_timer(&task->slow_task->timer);
1457
1458 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1459 task, abort_flag, tag);
1460 if (res) {
1461 del_timer(&task->slow_task->timer);
1462 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1463 res);
1464 goto exit;
1465 }
1466 wait_for_completion(&task->slow_task->completion);
1467 res = TMF_RESP_FUNC_FAILED;
1468
1469 /* Internal abort timed out */
1470 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1471 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1472 struct hisi_sas_slot *slot = task->lldd_task;
1473
1474 if (slot)
1475 slot->task = NULL;
1476 dev_err(dev, "internal task abort: timeout.\n");
1477 goto exit;
1478 }
1479 }
1480
1481 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1482 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1483 res = TMF_RESP_FUNC_COMPLETE;
1484 goto exit;
1485 }
1486
1487 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1488 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1489 res = TMF_RESP_FUNC_SUCC;
1490 goto exit;
1491 }
1492
1493 exit:
1494 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
1495 "resp: 0x%x sts 0x%x\n",
1496 SAS_ADDR(device->sas_addr),
1497 task,
1498 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1499 task->task_status.stat);
1500 sas_free_task(task);
1501
1502 return res;
1503 }
1504
1505 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1506 {
1507 hisi_sas_port_notify_formed(sas_phy);
1508 }
1509
1510 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1511 {
1512 phy->phy_attached = 0;
1513 phy->phy_type = 0;
1514 phy->port = NULL;
1515 }
1516
1517 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1518 {
1519 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1520 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1521 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1522
1523 if (rdy) {
1524 /* Phy down but ready */
1525 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1526 hisi_sas_port_notify_formed(sas_phy);
1527 } else {
1528 struct hisi_sas_port *port = phy->port;
1529
1530 /* Phy down and not ready */
1531 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1532 sas_phy_disconnected(sas_phy);
1533
1534 if (port) {
1535 if (phy->phy_type & PORT_TYPE_SAS) {
1536 int port_id = port->id;
1537
1538 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1539 port_id))
1540 port->port_attached = 0;
1541 } else if (phy->phy_type & PORT_TYPE_SATA)
1542 port->port_attached = 0;
1543 }
1544 hisi_sas_phy_disconnected(phy);
1545 }
1546 }
1547 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1548
1549 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
1550 {
1551 int i;
1552
1553 for (i = 0; i < hisi_hba->queue_count; i++) {
1554 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1555
1556 tasklet_kill(&cq->tasklet);
1557 }
1558 }
1559 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
1560
1561 struct scsi_transport_template *hisi_sas_stt;
1562 EXPORT_SYMBOL_GPL(hisi_sas_stt);
1563
1564 static struct scsi_host_template _hisi_sas_sht = {
1565 .module = THIS_MODULE,
1566 .name = DRV_NAME,
1567 .queuecommand = sas_queuecommand,
1568 .target_alloc = sas_target_alloc,
1569 .slave_configure = hisi_sas_slave_configure,
1570 .scan_finished = hisi_sas_scan_finished,
1571 .scan_start = hisi_sas_scan_start,
1572 .change_queue_depth = sas_change_queue_depth,
1573 .bios_param = sas_bios_param,
1574 .can_queue = 1,
1575 .this_id = -1,
1576 .sg_tablesize = SG_ALL,
1577 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
1578 .use_clustering = ENABLE_CLUSTERING,
1579 .eh_device_reset_handler = sas_eh_device_reset_handler,
1580 .eh_target_reset_handler = sas_eh_target_reset_handler,
1581 .target_destroy = sas_target_destroy,
1582 .ioctl = sas_ioctl,
1583 };
1584 struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
1585 EXPORT_SYMBOL_GPL(hisi_sas_sht);
1586
1587 static struct sas_domain_function_template hisi_sas_transport_ops = {
1588 .lldd_dev_found = hisi_sas_dev_found,
1589 .lldd_dev_gone = hisi_sas_dev_gone,
1590 .lldd_execute_task = hisi_sas_queue_command,
1591 .lldd_control_phy = hisi_sas_control_phy,
1592 .lldd_abort_task = hisi_sas_abort_task,
1593 .lldd_abort_task_set = hisi_sas_abort_task_set,
1594 .lldd_clear_aca = hisi_sas_clear_aca,
1595 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
1596 .lldd_lu_reset = hisi_sas_lu_reset,
1597 .lldd_query_task = hisi_sas_query_task,
1598 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
1599 .lldd_port_formed = hisi_sas_port_formed,
1600 };
1601
1602 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
1603 {
1604 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1605
1606 for (i = 0; i < hisi_hba->queue_count; i++) {
1607 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1608 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1609
1610 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1611 memset(hisi_hba->cmd_hdr[i], 0, s);
1612 dq->wr_point = 0;
1613
1614 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1615 memset(hisi_hba->complete_hdr[i], 0, s);
1616 cq->rd_point = 0;
1617 }
1618
1619 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
1620 memset(hisi_hba->initial_fis, 0, s);
1621
1622 s = max_command_entries * sizeof(struct hisi_sas_iost);
1623 memset(hisi_hba->iost, 0, s);
1624
1625 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1626 memset(hisi_hba->breakpoint, 0, s);
1627
1628 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1629 memset(hisi_hba->sata_breakpoint, 0, s);
1630 }
1631 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
1632
1633 int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1634 {
1635 struct device *dev = hisi_hba->dev;
1636 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1637
1638 spin_lock_init(&hisi_hba->lock);
1639 for (i = 0; i < hisi_hba->n_phy; i++) {
1640 hisi_sas_phy_init(hisi_hba, i);
1641 hisi_hba->port[i].port_attached = 0;
1642 hisi_hba->port[i].id = -1;
1643 }
1644
1645 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1646 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
1647 hisi_hba->devices[i].device_id = i;
1648 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
1649 }
1650
1651 for (i = 0; i < hisi_hba->queue_count; i++) {
1652 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1653 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1654
1655 /* Completion queue structure */
1656 cq->id = i;
1657 cq->hisi_hba = hisi_hba;
1658
1659 /* Delivery queue structure */
1660 dq->id = i;
1661 dq->hisi_hba = hisi_hba;
1662
1663 /* Delivery queue */
1664 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1665 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1666 &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1667 if (!hisi_hba->cmd_hdr[i])
1668 goto err_out;
1669
1670 /* Completion queue */
1671 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1672 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1673 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1674 if (!hisi_hba->complete_hdr[i])
1675 goto err_out;
1676 }
1677
1678 s = sizeof(struct hisi_sas_slot_buf_table);
1679 hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
1680 if (!hisi_hba->buffer_pool)
1681 goto err_out;
1682
1683 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1684 hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma,
1685 GFP_KERNEL);
1686 if (!hisi_hba->itct)
1687 goto err_out;
1688
1689 memset(hisi_hba->itct, 0, s);
1690
1691 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
1692 sizeof(struct hisi_sas_slot),
1693 GFP_KERNEL);
1694 if (!hisi_hba->slot_info)
1695 goto err_out;
1696
1697 s = max_command_entries * sizeof(struct hisi_sas_iost);
1698 hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1699 GFP_KERNEL);
1700 if (!hisi_hba->iost)
1701 goto err_out;
1702
1703 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1704 hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1705 &hisi_hba->breakpoint_dma, GFP_KERNEL);
1706 if (!hisi_hba->breakpoint)
1707 goto err_out;
1708
1709 hisi_hba->slot_index_count = max_command_entries;
1710 s = hisi_hba->slot_index_count / BITS_PER_BYTE;
1711 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1712 if (!hisi_hba->slot_index_tags)
1713 goto err_out;
1714
1715 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1716 hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1717 &hisi_hba->initial_fis_dma, GFP_KERNEL);
1718 if (!hisi_hba->initial_fis)
1719 goto err_out;
1720
1721 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1722 hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1723 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1724 if (!hisi_hba->sata_breakpoint)
1725 goto err_out;
1726 hisi_sas_init_mem(hisi_hba);
1727
1728 hisi_sas_slot_index_init(hisi_hba);
1729
1730 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1731 if (!hisi_hba->wq) {
1732 dev_err(dev, "sas_alloc: failed to create workqueue\n");
1733 goto err_out;
1734 }
1735
1736 return 0;
1737 err_out:
1738 return -ENOMEM;
1739 }
1740 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
1741
1742 void hisi_sas_free(struct hisi_hba *hisi_hba)
1743 {
1744 struct device *dev = hisi_hba->dev;
1745 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1746
1747 for (i = 0; i < hisi_hba->queue_count; i++) {
1748 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1749 if (hisi_hba->cmd_hdr[i])
1750 dma_free_coherent(dev, s,
1751 hisi_hba->cmd_hdr[i],
1752 hisi_hba->cmd_hdr_dma[i]);
1753
1754 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1755 if (hisi_hba->complete_hdr[i])
1756 dma_free_coherent(dev, s,
1757 hisi_hba->complete_hdr[i],
1758 hisi_hba->complete_hdr_dma[i]);
1759 }
1760
1761 dma_pool_destroy(hisi_hba->buffer_pool);
1762
1763 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1764 if (hisi_hba->itct)
1765 dma_free_coherent(dev, s,
1766 hisi_hba->itct, hisi_hba->itct_dma);
1767
1768 s = max_command_entries * sizeof(struct hisi_sas_iost);
1769 if (hisi_hba->iost)
1770 dma_free_coherent(dev, s,
1771 hisi_hba->iost, hisi_hba->iost_dma);
1772
1773 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1774 if (hisi_hba->breakpoint)
1775 dma_free_coherent(dev, s,
1776 hisi_hba->breakpoint,
1777 hisi_hba->breakpoint_dma);
1778
1779
1780 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1781 if (hisi_hba->initial_fis)
1782 dma_free_coherent(dev, s,
1783 hisi_hba->initial_fis,
1784 hisi_hba->initial_fis_dma);
1785
1786 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1787 if (hisi_hba->sata_breakpoint)
1788 dma_free_coherent(dev, s,
1789 hisi_hba->sata_breakpoint,
1790 hisi_hba->sata_breakpoint_dma);
1791
1792 if (hisi_hba->wq)
1793 destroy_workqueue(hisi_hba->wq);
1794 }
1795 EXPORT_SYMBOL_GPL(hisi_sas_free);
1796
1797 void hisi_sas_rst_work_handler(struct work_struct *work)
1798 {
1799 struct hisi_hba *hisi_hba =
1800 container_of(work, struct hisi_hba, rst_work);
1801
1802 hisi_sas_controller_reset(hisi_hba);
1803 }
1804 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
1805
1806 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
1807 {
1808 struct device *dev = hisi_hba->dev;
1809 struct platform_device *pdev = hisi_hba->platform_dev;
1810 struct device_node *np = pdev ? pdev->dev.of_node : NULL;
1811 struct clk *refclk;
1812
1813 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
1814 SAS_ADDR_SIZE)) {
1815 dev_err(dev, "could not get property sas-addr\n");
1816 return -ENOENT;
1817 }
1818
1819 if (np) {
1820 /*
1821 * These properties are only required for platform device-based
1822 * controller with DT firmware.
1823 */
1824 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
1825 "hisilicon,sas-syscon");
1826 if (IS_ERR(hisi_hba->ctrl)) {
1827 dev_err(dev, "could not get syscon\n");
1828 return -ENOENT;
1829 }
1830
1831 if (device_property_read_u32(dev, "ctrl-reset-reg",
1832 &hisi_hba->ctrl_reset_reg)) {
1833 dev_err(dev,
1834 "could not get property ctrl-reset-reg\n");
1835 return -ENOENT;
1836 }
1837
1838 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
1839 &hisi_hba->ctrl_reset_sts_reg)) {
1840 dev_err(dev,
1841 "could not get property ctrl-reset-sts-reg\n");
1842 return -ENOENT;
1843 }
1844
1845 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
1846 &hisi_hba->ctrl_clock_ena_reg)) {
1847 dev_err(dev,
1848 "could not get property ctrl-clock-ena-reg\n");
1849 return -ENOENT;
1850 }
1851 }
1852
1853 refclk = devm_clk_get(dev, NULL);
1854 if (IS_ERR(refclk))
1855 dev_dbg(dev, "no ref clk property\n");
1856 else
1857 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
1858
1859 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
1860 dev_err(dev, "could not get property phy-count\n");
1861 return -ENOENT;
1862 }
1863
1864 if (device_property_read_u32(dev, "queue-count",
1865 &hisi_hba->queue_count)) {
1866 dev_err(dev, "could not get property queue-count\n");
1867 return -ENOENT;
1868 }
1869
1870 return 0;
1871 }
1872 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
1873
1874 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
1875 const struct hisi_sas_hw *hw)
1876 {
1877 struct resource *res;
1878 struct Scsi_Host *shost;
1879 struct hisi_hba *hisi_hba;
1880 struct device *dev = &pdev->dev;
1881
1882 shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
1883 if (!shost) {
1884 dev_err(dev, "scsi host alloc failed\n");
1885 return NULL;
1886 }
1887 hisi_hba = shost_priv(shost);
1888
1889 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
1890 hisi_hba->hw = hw;
1891 hisi_hba->dev = dev;
1892 hisi_hba->platform_dev = pdev;
1893 hisi_hba->shost = shost;
1894 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
1895
1896 timer_setup(&hisi_hba->timer, NULL, 0);
1897
1898 if (hisi_sas_get_fw_info(hisi_hba) < 0)
1899 goto err_out;
1900
1901 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
1902 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
1903 dev_err(dev, "No usable DMA addressing method\n");
1904 goto err_out;
1905 }
1906
1907 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1908 hisi_hba->regs = devm_ioremap_resource(dev, res);
1909 if (IS_ERR(hisi_hba->regs))
1910 goto err_out;
1911
1912 if (hisi_sas_alloc(hisi_hba, shost)) {
1913 hisi_sas_free(hisi_hba);
1914 goto err_out;
1915 }
1916
1917 return shost;
1918 err_out:
1919 scsi_host_put(shost);
1920 dev_err(dev, "shost alloc failed\n");
1921 return NULL;
1922 }
1923
1924 void hisi_sas_init_add(struct hisi_hba *hisi_hba)
1925 {
1926 int i;
1927
1928 for (i = 0; i < hisi_hba->n_phy; i++)
1929 memcpy(&hisi_hba->phy[i].dev_sas_addr,
1930 hisi_hba->sas_addr,
1931 SAS_ADDR_SIZE);
1932 }
1933 EXPORT_SYMBOL_GPL(hisi_sas_init_add);
1934
1935 int hisi_sas_probe(struct platform_device *pdev,
1936 const struct hisi_sas_hw *hw)
1937 {
1938 struct Scsi_Host *shost;
1939 struct hisi_hba *hisi_hba;
1940 struct device *dev = &pdev->dev;
1941 struct asd_sas_phy **arr_phy;
1942 struct asd_sas_port **arr_port;
1943 struct sas_ha_struct *sha;
1944 int rc, phy_nr, port_nr, i;
1945
1946 shost = hisi_sas_shost_alloc(pdev, hw);
1947 if (!shost)
1948 return -ENOMEM;
1949
1950 sha = SHOST_TO_SAS_HA(shost);
1951 hisi_hba = shost_priv(shost);
1952 platform_set_drvdata(pdev, sha);
1953
1954 phy_nr = port_nr = hisi_hba->n_phy;
1955
1956 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
1957 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
1958 if (!arr_phy || !arr_port) {
1959 rc = -ENOMEM;
1960 goto err_out_ha;
1961 }
1962
1963 sha->sas_phy = arr_phy;
1964 sha->sas_port = arr_port;
1965 sha->lldd_ha = hisi_hba;
1966
1967 shost->transportt = hisi_sas_stt;
1968 shost->max_id = HISI_SAS_MAX_DEVICES;
1969 shost->max_lun = ~0;
1970 shost->max_channel = 1;
1971 shost->max_cmd_len = 16;
1972 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
1973 shost->can_queue = hisi_hba->hw->max_command_entries;
1974 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
1975
1976 sha->sas_ha_name = DRV_NAME;
1977 sha->dev = hisi_hba->dev;
1978 sha->lldd_module = THIS_MODULE;
1979 sha->sas_addr = &hisi_hba->sas_addr[0];
1980 sha->num_phys = hisi_hba->n_phy;
1981 sha->core.shost = hisi_hba->shost;
1982
1983 for (i = 0; i < hisi_hba->n_phy; i++) {
1984 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
1985 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
1986 }
1987
1988 hisi_sas_init_add(hisi_hba);
1989
1990 rc = scsi_add_host(shost, &pdev->dev);
1991 if (rc)
1992 goto err_out_ha;
1993
1994 rc = sas_register_ha(sha);
1995 if (rc)
1996 goto err_out_register_ha;
1997
1998 rc = hisi_hba->hw->hw_init(hisi_hba);
1999 if (rc)
2000 goto err_out_register_ha;
2001
2002 scsi_scan_host(shost);
2003
2004 return 0;
2005
2006 err_out_register_ha:
2007 scsi_remove_host(shost);
2008 err_out_ha:
2009 hisi_sas_free(hisi_hba);
2010 scsi_host_put(shost);
2011 return rc;
2012 }
2013 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2014
2015 int hisi_sas_remove(struct platform_device *pdev)
2016 {
2017 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2018 struct hisi_hba *hisi_hba = sha->lldd_ha;
2019 struct Scsi_Host *shost = sha->core.shost;
2020
2021 sas_unregister_ha(sha);
2022 sas_remove_host(sha->core.shost);
2023
2024 hisi_sas_free(hisi_hba);
2025 scsi_host_put(shost);
2026 return 0;
2027 }
2028 EXPORT_SYMBOL_GPL(hisi_sas_remove);
2029
2030 static __init int hisi_sas_init(void)
2031 {
2032 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2033 if (!hisi_sas_stt)
2034 return -ENOMEM;
2035
2036 return 0;
2037 }
2038
2039 static __exit void hisi_sas_exit(void)
2040 {
2041 sas_release_transport(hisi_sas_stt);
2042 }
2043
2044 module_init(hisi_sas_init);
2045 module_exit(hisi_sas_exit);
2046
2047 MODULE_LICENSE("GPL");
2048 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2049 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2050 MODULE_ALIAS("platform:" DRV_NAME);