]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/hisi_sas/hisi_sas_main.c
3d63a24fea5426bbe1faf139ea90e6a4336ab614
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / hisi_sas / hisi_sas_main.c
1 /*
2 * Copyright (c) 2015 Linaro Ltd.
3 * Copyright (c) 2015 Hisilicon Limited.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 */
11
12 #include "hisi_sas.h"
13 #define DRV_NAME "hisi_sas"
14
15 #define DEV_IS_GONE(dev) \
16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
17
18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19 u8 *lun, struct hisi_sas_tmf_task *tmf);
20 static int
21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22 struct domain_device *device,
23 int abort_flag, int tag);
24 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
25
26 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
27 {
28 return device->port->ha->lldd_ha;
29 }
30
31 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
32 {
33 return container_of(sas_port, struct hisi_sas_port, sas_port);
34 }
35 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
36
37 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
38 {
39 void *bitmap = hisi_hba->slot_index_tags;
40
41 clear_bit(slot_idx, bitmap);
42 }
43
44 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
45 {
46 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
47 }
48
49 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
50 {
51 void *bitmap = hisi_hba->slot_index_tags;
52
53 set_bit(slot_idx, bitmap);
54 }
55
56 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
57 {
58 unsigned int index;
59 void *bitmap = hisi_hba->slot_index_tags;
60
61 index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count);
62 if (index >= hisi_hba->slot_index_count)
63 return -SAS_QUEUE_FULL;
64 hisi_sas_slot_index_set(hisi_hba, index);
65 *slot_idx = index;
66 return 0;
67 }
68
69 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
70 {
71 int i;
72
73 for (i = 0; i < hisi_hba->slot_index_count; ++i)
74 hisi_sas_slot_index_clear(hisi_hba, i);
75 }
76
77 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
78 struct hisi_sas_slot *slot)
79 {
80 struct device *dev = &hisi_hba->pdev->dev;
81 struct domain_device *device = task->dev;
82 struct hisi_sas_device *sas_dev = device->lldd_dev;
83
84 if (!slot->task)
85 return;
86
87 if (!sas_protocol_ata(task->task_proto))
88 if (slot->n_elem)
89 dma_unmap_sg(dev, task->scatter, slot->n_elem,
90 task->data_dir);
91
92 if (slot->command_table)
93 dma_pool_free(hisi_hba->command_table_pool,
94 slot->command_table, slot->command_table_dma);
95
96 if (slot->status_buffer)
97 dma_pool_free(hisi_hba->status_buffer_pool,
98 slot->status_buffer, slot->status_buffer_dma);
99
100 if (slot->sge_page)
101 dma_pool_free(hisi_hba->sge_page_pool, slot->sge_page,
102 slot->sge_page_dma);
103
104 list_del_init(&slot->entry);
105 task->lldd_task = NULL;
106 slot->task = NULL;
107 slot->port = NULL;
108 hisi_sas_slot_index_free(hisi_hba, slot->idx);
109 if (sas_dev)
110 atomic64_dec(&sas_dev->running_req);
111 /* slot memory is fully zeroed when it is reused */
112 }
113 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
114
115 static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
116 struct hisi_sas_slot *slot)
117 {
118 return hisi_hba->hw->prep_smp(hisi_hba, slot);
119 }
120
121 static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
122 struct hisi_sas_slot *slot, int is_tmf,
123 struct hisi_sas_tmf_task *tmf)
124 {
125 return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
126 }
127
128 static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
129 struct hisi_sas_slot *slot)
130 {
131 return hisi_hba->hw->prep_stp(hisi_hba, slot);
132 }
133
134 static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
135 struct hisi_sas_slot *slot,
136 int device_id, int abort_flag, int tag_to_abort)
137 {
138 return hisi_hba->hw->prep_abort(hisi_hba, slot,
139 device_id, abort_flag, tag_to_abort);
140 }
141
142 /*
143 * This function will issue an abort TMF regardless of whether the
144 * task is in the sdev or not. Then it will do the task complete
145 * cleanup and callbacks.
146 */
147 static void hisi_sas_slot_abort(struct work_struct *work)
148 {
149 struct hisi_sas_slot *abort_slot =
150 container_of(work, struct hisi_sas_slot, abort_slot);
151 struct sas_task *task = abort_slot->task;
152 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
153 struct scsi_cmnd *cmnd = task->uldd_task;
154 struct hisi_sas_tmf_task tmf_task;
155 struct scsi_lun lun;
156 struct device *dev = &hisi_hba->pdev->dev;
157 int tag = abort_slot->idx;
158 unsigned long flags;
159
160 if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
161 dev_err(dev, "cannot abort slot for non-ssp task\n");
162 goto out;
163 }
164
165 int_to_scsilun(cmnd->device->lun, &lun);
166 tmf_task.tmf = TMF_ABORT_TASK;
167 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
168
169 hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
170 out:
171 /* Do cleanup for this task */
172 spin_lock_irqsave(&hisi_hba->lock, flags);
173 hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
174 spin_unlock_irqrestore(&hisi_hba->lock, flags);
175 if (task->task_done)
176 task->task_done(task);
177 }
178
179 static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
180 int is_tmf, struct hisi_sas_tmf_task *tmf,
181 int *pass)
182 {
183 struct domain_device *device = task->dev;
184 struct hisi_sas_device *sas_dev = device->lldd_dev;
185 struct hisi_sas_port *port;
186 struct hisi_sas_slot *slot;
187 struct hisi_sas_cmd_hdr *cmd_hdr_base;
188 struct asd_sas_port *sas_port = device->port;
189 struct device *dev = &hisi_hba->pdev->dev;
190 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
191 unsigned long flags;
192
193 if (!sas_port) {
194 struct task_status_struct *ts = &task->task_status;
195
196 ts->resp = SAS_TASK_UNDELIVERED;
197 ts->stat = SAS_PHY_DOWN;
198 /*
199 * libsas will use dev->port, should
200 * not call task_done for sata
201 */
202 if (device->dev_type != SAS_SATA_DEV)
203 task->task_done(task);
204 return SAS_PHY_DOWN;
205 }
206
207 if (DEV_IS_GONE(sas_dev)) {
208 if (sas_dev)
209 dev_info(dev, "task prep: device %llu not ready\n",
210 sas_dev->device_id);
211 else
212 dev_info(dev, "task prep: device %016llx not ready\n",
213 SAS_ADDR(device->sas_addr));
214
215 return SAS_PHY_DOWN;
216 }
217
218 port = to_hisi_sas_port(sas_port);
219 if (port && !port->port_attached) {
220 dev_info(dev, "task prep: %s port%d not attach device\n",
221 (sas_protocol_ata(task->task_proto)) ?
222 "SATA/STP" : "SAS",
223 device->port->id);
224
225 return SAS_PHY_DOWN;
226 }
227
228 if (!sas_protocol_ata(task->task_proto)) {
229 if (task->num_scatter) {
230 n_elem = dma_map_sg(dev, task->scatter,
231 task->num_scatter, task->data_dir);
232 if (!n_elem) {
233 rc = -ENOMEM;
234 goto prep_out;
235 }
236 }
237 } else
238 n_elem = task->num_scatter;
239
240 if (hisi_hba->hw->slot_index_alloc)
241 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
242 device);
243 else
244 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
245 if (rc)
246 goto err_out;
247 rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id,
248 &dlvry_queue, &dlvry_queue_slot);
249 if (rc)
250 goto err_out_tag;
251
252 slot = &hisi_hba->slot_info[slot_idx];
253 memset(slot, 0, sizeof(struct hisi_sas_slot));
254
255 slot->idx = slot_idx;
256 slot->n_elem = n_elem;
257 slot->dlvry_queue = dlvry_queue;
258 slot->dlvry_queue_slot = dlvry_queue_slot;
259 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
260 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
261 slot->task = task;
262 slot->port = port;
263 task->lldd_task = slot;
264 INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
265
266 slot->status_buffer = dma_pool_alloc(hisi_hba->status_buffer_pool,
267 GFP_ATOMIC,
268 &slot->status_buffer_dma);
269 if (!slot->status_buffer) {
270 rc = -ENOMEM;
271 goto err_out_slot_buf;
272 }
273 memset(slot->status_buffer, 0, HISI_SAS_STATUS_BUF_SZ);
274
275 slot->command_table = dma_pool_alloc(hisi_hba->command_table_pool,
276 GFP_ATOMIC,
277 &slot->command_table_dma);
278 if (!slot->command_table) {
279 rc = -ENOMEM;
280 goto err_out_status_buf;
281 }
282 memset(slot->command_table, 0, HISI_SAS_COMMAND_TABLE_SZ);
283 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
284
285 switch (task->task_proto) {
286 case SAS_PROTOCOL_SMP:
287 rc = hisi_sas_task_prep_smp(hisi_hba, slot);
288 break;
289 case SAS_PROTOCOL_SSP:
290 rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
291 break;
292 case SAS_PROTOCOL_SATA:
293 case SAS_PROTOCOL_STP:
294 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
295 rc = hisi_sas_task_prep_ata(hisi_hba, slot);
296 break;
297 default:
298 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
299 task->task_proto);
300 rc = -EINVAL;
301 break;
302 }
303
304 if (rc) {
305 dev_err(dev, "task prep: rc = 0x%x\n", rc);
306 if (slot->sge_page)
307 goto err_out_sge;
308 goto err_out_command_table;
309 }
310
311 list_add_tail(&slot->entry, &sas_dev->list);
312 spin_lock_irqsave(&task->task_state_lock, flags);
313 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
314 spin_unlock_irqrestore(&task->task_state_lock, flags);
315
316 hisi_hba->slot_prep = slot;
317
318 atomic64_inc(&sas_dev->running_req);
319 ++(*pass);
320
321 return 0;
322
323 err_out_sge:
324 dma_pool_free(hisi_hba->sge_page_pool, slot->sge_page,
325 slot->sge_page_dma);
326 err_out_command_table:
327 dma_pool_free(hisi_hba->command_table_pool, slot->command_table,
328 slot->command_table_dma);
329 err_out_status_buf:
330 dma_pool_free(hisi_hba->status_buffer_pool, slot->status_buffer,
331 slot->status_buffer_dma);
332 err_out_slot_buf:
333 /* Nothing to be done */
334 err_out_tag:
335 hisi_sas_slot_index_free(hisi_hba, slot_idx);
336 err_out:
337 dev_err(dev, "task prep: failed[%d]!\n", rc);
338 if (!sas_protocol_ata(task->task_proto))
339 if (n_elem)
340 dma_unmap_sg(dev, task->scatter, n_elem,
341 task->data_dir);
342 prep_out:
343 return rc;
344 }
345
346 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
347 int is_tmf, struct hisi_sas_tmf_task *tmf)
348 {
349 u32 rc;
350 u32 pass = 0;
351 unsigned long flags;
352 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
353 struct device *dev = &hisi_hba->pdev->dev;
354
355 if (unlikely(test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)))
356 return -EINVAL;
357
358 /* protect task_prep and start_delivery sequence */
359 spin_lock_irqsave(&hisi_hba->lock, flags);
360 rc = hisi_sas_task_prep(task, hisi_hba, is_tmf, tmf, &pass);
361 if (rc)
362 dev_err(dev, "task exec: failed[%d]!\n", rc);
363
364 if (likely(pass))
365 hisi_hba->hw->start_delivery(hisi_hba);
366 spin_unlock_irqrestore(&hisi_hba->lock, flags);
367
368 return rc;
369 }
370
371 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
372 {
373 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
374 struct asd_sas_phy *sas_phy = &phy->sas_phy;
375 struct sas_ha_struct *sas_ha;
376
377 if (!phy->phy_attached)
378 return;
379
380 sas_ha = &hisi_hba->sha;
381 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
382
383 if (sas_phy->phy) {
384 struct sas_phy *sphy = sas_phy->phy;
385
386 sphy->negotiated_linkrate = sas_phy->linkrate;
387 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
388 sphy->maximum_linkrate_hw =
389 hisi_hba->hw->phy_get_max_linkrate();
390 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
391 sphy->minimum_linkrate = phy->minimum_linkrate;
392
393 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
394 sphy->maximum_linkrate = phy->maximum_linkrate;
395 }
396
397 if (phy->phy_type & PORT_TYPE_SAS) {
398 struct sas_identify_frame *id;
399
400 id = (struct sas_identify_frame *)phy->frame_rcvd;
401 id->dev_type = phy->identify.device_type;
402 id->initiator_bits = SAS_PROTOCOL_ALL;
403 id->target_bits = phy->identify.target_port_protocols;
404 } else if (phy->phy_type & PORT_TYPE_SATA) {
405 /*Nothing*/
406 }
407
408 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
409 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
410 }
411
412 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
413 {
414 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
415 struct hisi_sas_device *sas_dev = NULL;
416 int i;
417
418 spin_lock(&hisi_hba->lock);
419 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
420 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
421 hisi_hba->devices[i].device_id = i;
422 sas_dev = &hisi_hba->devices[i];
423 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
424 sas_dev->dev_type = device->dev_type;
425 sas_dev->hisi_hba = hisi_hba;
426 sas_dev->sas_device = device;
427 INIT_LIST_HEAD(&hisi_hba->devices[i].list);
428 break;
429 }
430 }
431 spin_unlock(&hisi_hba->lock);
432
433 return sas_dev;
434 }
435
436 static int hisi_sas_dev_found(struct domain_device *device)
437 {
438 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
439 struct domain_device *parent_dev = device->parent;
440 struct hisi_sas_device *sas_dev;
441 struct device *dev = &hisi_hba->pdev->dev;
442
443 if (hisi_hba->hw->alloc_dev)
444 sas_dev = hisi_hba->hw->alloc_dev(device);
445 else
446 sas_dev = hisi_sas_alloc_dev(device);
447 if (!sas_dev) {
448 dev_err(dev, "fail alloc dev: max support %d devices\n",
449 HISI_SAS_MAX_DEVICES);
450 return -EINVAL;
451 }
452
453 device->lldd_dev = sas_dev;
454 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
455
456 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
457 int phy_no;
458 u8 phy_num = parent_dev->ex_dev.num_phys;
459 struct ex_phy *phy;
460
461 for (phy_no = 0; phy_no < phy_num; phy_no++) {
462 phy = &parent_dev->ex_dev.ex_phy[phy_no];
463 if (SAS_ADDR(phy->attached_sas_addr) ==
464 SAS_ADDR(device->sas_addr)) {
465 sas_dev->attached_phy = phy_no;
466 break;
467 }
468 }
469
470 if (phy_no == phy_num) {
471 dev_info(dev, "dev found: no attached "
472 "dev:%016llx at ex:%016llx\n",
473 SAS_ADDR(device->sas_addr),
474 SAS_ADDR(parent_dev->sas_addr));
475 return -EINVAL;
476 }
477 }
478
479 return 0;
480 }
481
482 static int hisi_sas_slave_configure(struct scsi_device *sdev)
483 {
484 struct domain_device *dev = sdev_to_domain_dev(sdev);
485 int ret = sas_slave_configure(sdev);
486
487 if (ret)
488 return ret;
489 if (!dev_is_sata(dev))
490 sas_change_queue_depth(sdev, 64);
491
492 return 0;
493 }
494
495 static void hisi_sas_scan_start(struct Scsi_Host *shost)
496 {
497 struct hisi_hba *hisi_hba = shost_priv(shost);
498
499 hisi_hba->hw->phys_init(hisi_hba);
500 }
501
502 static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
503 {
504 struct hisi_hba *hisi_hba = shost_priv(shost);
505 struct sas_ha_struct *sha = &hisi_hba->sha;
506
507 /* Wait for PHY up interrupt to occur */
508 if (time < HZ)
509 return 0;
510
511 sas_drain_work(sha);
512 return 1;
513 }
514
515 static void hisi_sas_phyup_work(struct work_struct *work)
516 {
517 struct hisi_sas_phy *phy =
518 container_of(work, struct hisi_sas_phy, phyup_ws);
519 struct hisi_hba *hisi_hba = phy->hisi_hba;
520 struct asd_sas_phy *sas_phy = &phy->sas_phy;
521 int phy_no = sas_phy->id;
522
523 hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
524 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
525 }
526
527 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
528 {
529 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
530 struct asd_sas_phy *sas_phy = &phy->sas_phy;
531
532 phy->hisi_hba = hisi_hba;
533 phy->port = NULL;
534 init_timer(&phy->timer);
535 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
536 sas_phy->class = SAS;
537 sas_phy->iproto = SAS_PROTOCOL_ALL;
538 sas_phy->tproto = 0;
539 sas_phy->type = PHY_TYPE_PHYSICAL;
540 sas_phy->role = PHY_ROLE_INITIATOR;
541 sas_phy->oob_mode = OOB_NOT_CONNECTED;
542 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
543 sas_phy->id = phy_no;
544 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
545 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
546 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
547 sas_phy->lldd_phy = phy;
548
549 INIT_WORK(&phy->phyup_ws, hisi_sas_phyup_work);
550 }
551
552 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
553 {
554 struct sas_ha_struct *sas_ha = sas_phy->ha;
555 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
556 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
557 struct asd_sas_port *sas_port = sas_phy->port;
558 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
559 unsigned long flags;
560
561 if (!sas_port)
562 return;
563
564 spin_lock_irqsave(&hisi_hba->lock, flags);
565 port->port_attached = 1;
566 port->id = phy->port_id;
567 phy->port = port;
568 sas_port->lldd_port = port;
569 spin_unlock_irqrestore(&hisi_hba->lock, flags);
570 }
571
572 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba,
573 struct sas_task *task,
574 struct hisi_sas_slot *slot)
575 {
576 struct task_status_struct *ts;
577 unsigned long flags;
578
579 if (!task)
580 return;
581
582 ts = &task->task_status;
583
584 ts->resp = SAS_TASK_COMPLETE;
585 ts->stat = SAS_ABORTED_TASK;
586 spin_lock_irqsave(&task->task_state_lock, flags);
587 task->task_state_flags &=
588 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
589 task->task_state_flags |= SAS_TASK_STATE_DONE;
590 spin_unlock_irqrestore(&task->task_state_lock, flags);
591
592 hisi_sas_slot_task_free(hisi_hba, task, slot);
593 }
594
595 /* hisi_hba.lock should be locked */
596 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
597 struct domain_device *device)
598 {
599 struct hisi_sas_slot *slot, *slot2;
600 struct hisi_sas_device *sas_dev = device->lldd_dev;
601
602 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
603 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
604 }
605
606 static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
607 {
608 struct hisi_sas_device *sas_dev;
609 struct domain_device *device;
610 int i;
611
612 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
613 sas_dev = &hisi_hba->devices[i];
614 device = sas_dev->sas_device;
615
616 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
617 !device)
618 continue;
619
620 hisi_sas_release_task(hisi_hba, device);
621 }
622 }
623
624 static void hisi_sas_dev_gone(struct domain_device *device)
625 {
626 struct hisi_sas_device *sas_dev = device->lldd_dev;
627 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
628 struct device *dev = &hisi_hba->pdev->dev;
629 u64 dev_id = sas_dev->device_id;
630
631 dev_info(dev, "found dev[%lld:%x] is gone\n",
632 sas_dev->device_id, sas_dev->dev_type);
633
634 hisi_sas_internal_task_abort(hisi_hba, device,
635 HISI_SAS_INT_ABT_DEV, 0);
636
637 hisi_hba->hw->free_device(hisi_hba, sas_dev);
638 device->lldd_dev = NULL;
639 memset(sas_dev, 0, sizeof(*sas_dev));
640 sas_dev->device_id = dev_id;
641 sas_dev->dev_type = SAS_PHY_UNUSED;
642 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
643 }
644
645 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
646 {
647 return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
648 }
649
650 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
651 void *funcdata)
652 {
653 struct sas_ha_struct *sas_ha = sas_phy->ha;
654 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
655 int phy_no = sas_phy->id;
656
657 switch (func) {
658 case PHY_FUNC_HARD_RESET:
659 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
660 break;
661
662 case PHY_FUNC_LINK_RESET:
663 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
664 msleep(100);
665 hisi_hba->hw->phy_enable(hisi_hba, phy_no);
666 break;
667
668 case PHY_FUNC_DISABLE:
669 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
670 break;
671
672 case PHY_FUNC_SET_LINK_RATE:
673 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
674 break;
675
676 case PHY_FUNC_RELEASE_SPINUP_HOLD:
677 default:
678 return -EOPNOTSUPP;
679 }
680 return 0;
681 }
682
683 static void hisi_sas_task_done(struct sas_task *task)
684 {
685 if (!del_timer(&task->slow_task->timer))
686 return;
687 complete(&task->slow_task->completion);
688 }
689
690 static void hisi_sas_tmf_timedout(unsigned long data)
691 {
692 struct sas_task *task = (struct sas_task *)data;
693
694 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
695 complete(&task->slow_task->completion);
696 }
697
698 #define TASK_TIMEOUT 20
699 #define TASK_RETRY 3
700 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
701 void *parameter, u32 para_len,
702 struct hisi_sas_tmf_task *tmf)
703 {
704 struct hisi_sas_device *sas_dev = device->lldd_dev;
705 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
706 struct device *dev = &hisi_hba->pdev->dev;
707 struct sas_task *task;
708 int res, retry;
709
710 for (retry = 0; retry < TASK_RETRY; retry++) {
711 task = sas_alloc_slow_task(GFP_KERNEL);
712 if (!task)
713 return -ENOMEM;
714
715 task->dev = device;
716 task->task_proto = device->tproto;
717
718 if (dev_is_sata(device)) {
719 task->ata_task.device_control_reg_update = 1;
720 memcpy(&task->ata_task.fis, parameter, para_len);
721 } else {
722 memcpy(&task->ssp_task, parameter, para_len);
723 }
724 task->task_done = hisi_sas_task_done;
725
726 task->slow_task->timer.data = (unsigned long) task;
727 task->slow_task->timer.function = hisi_sas_tmf_timedout;
728 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
729 add_timer(&task->slow_task->timer);
730
731 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
732
733 if (res) {
734 del_timer(&task->slow_task->timer);
735 dev_err(dev, "abort tmf: executing internal task failed: %d\n",
736 res);
737 goto ex_err;
738 }
739
740 wait_for_completion(&task->slow_task->completion);
741 res = TMF_RESP_FUNC_FAILED;
742 /* Even TMF timed out, return direct. */
743 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
744 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
745 dev_err(dev, "abort tmf: TMF task timeout\n");
746 goto ex_err;
747 }
748 }
749
750 if (task->task_status.resp == SAS_TASK_COMPLETE &&
751 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
752 res = TMF_RESP_FUNC_COMPLETE;
753 break;
754 }
755
756 if (task->task_status.resp == SAS_TASK_COMPLETE &&
757 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
758 res = TMF_RESP_FUNC_SUCC;
759 break;
760 }
761
762 if (task->task_status.resp == SAS_TASK_COMPLETE &&
763 task->task_status.stat == SAS_DATA_UNDERRUN) {
764 /* no error, but return the number of bytes of
765 * underrun
766 */
767 dev_warn(dev, "abort tmf: task to dev %016llx "
768 "resp: 0x%x sts 0x%x underrun\n",
769 SAS_ADDR(device->sas_addr),
770 task->task_status.resp,
771 task->task_status.stat);
772 res = task->task_status.residual;
773 break;
774 }
775
776 if (task->task_status.resp == SAS_TASK_COMPLETE &&
777 task->task_status.stat == SAS_DATA_OVERRUN) {
778 dev_warn(dev, "abort tmf: blocked task error\n");
779 res = -EMSGSIZE;
780 break;
781 }
782
783 dev_warn(dev, "abort tmf: task to dev "
784 "%016llx resp: 0x%x status 0x%x\n",
785 SAS_ADDR(device->sas_addr), task->task_status.resp,
786 task->task_status.stat);
787 sas_free_task(task);
788 task = NULL;
789 }
790 ex_err:
791 if (retry == TASK_RETRY)
792 dev_warn(dev, "abort tmf: executing internal task failed!\n");
793 sas_free_task(task);
794 return res;
795 }
796
797 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
798 bool reset, int pmp, u8 *fis)
799 {
800 struct ata_taskfile tf;
801
802 ata_tf_init(dev, &tf);
803 if (reset)
804 tf.ctl |= ATA_SRST;
805 else
806 tf.ctl &= ~ATA_SRST;
807 tf.command = ATA_CMD_DEV_RESET;
808 ata_tf_to_fis(&tf, pmp, 0, fis);
809 }
810
811 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
812 {
813 u8 fis[20] = {0};
814 struct ata_port *ap = device->sata_dev.ap;
815 struct ata_link *link;
816 int rc = TMF_RESP_FUNC_FAILED;
817 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
818 struct device *dev = &hisi_hba->pdev->dev;
819 int s = sizeof(struct host_to_dev_fis);
820 unsigned long flags;
821
822 ata_for_each_link(link, ap, EDGE) {
823 int pmp = sata_srst_pmp(link);
824
825 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
826 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
827 if (rc != TMF_RESP_FUNC_COMPLETE)
828 break;
829 }
830
831 if (rc == TMF_RESP_FUNC_COMPLETE) {
832 ata_for_each_link(link, ap, EDGE) {
833 int pmp = sata_srst_pmp(link);
834
835 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
836 rc = hisi_sas_exec_internal_tmf_task(device, fis,
837 s, NULL);
838 if (rc != TMF_RESP_FUNC_COMPLETE)
839 dev_err(dev, "ata disk de-reset failed\n");
840 }
841 } else {
842 dev_err(dev, "ata disk reset failed\n");
843 }
844
845 if (rc == TMF_RESP_FUNC_COMPLETE) {
846 spin_lock_irqsave(&hisi_hba->lock, flags);
847 hisi_sas_release_task(hisi_hba, device);
848 spin_unlock_irqrestore(&hisi_hba->lock, flags);
849 }
850
851 return rc;
852 }
853
854 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
855 u8 *lun, struct hisi_sas_tmf_task *tmf)
856 {
857 struct sas_ssp_task ssp_task;
858
859 if (!(device->tproto & SAS_PROTOCOL_SSP))
860 return TMF_RESP_FUNC_ESUPP;
861
862 memcpy(ssp_task.LUN, lun, 8);
863
864 return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
865 sizeof(ssp_task), tmf);
866 }
867
868 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
869 {
870 int rc;
871
872 if (!hisi_hba->hw->soft_reset)
873 return -1;
874
875 if (!test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
876 struct device *dev = &hisi_hba->pdev->dev;
877 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
878 unsigned long flags;
879
880 dev_dbg(dev, "controller reset begins!\n");
881 scsi_block_requests(hisi_hba->shost);
882 rc = hisi_hba->hw->soft_reset(hisi_hba);
883 if (rc) {
884 dev_warn(dev, "controller reset failed (%d)\n", rc);
885 goto out;
886 }
887 spin_lock_irqsave(&hisi_hba->lock, flags);
888 hisi_sas_release_tasks(hisi_hba);
889 spin_unlock_irqrestore(&hisi_hba->lock, flags);
890
891 sas_ha->notify_ha_event(sas_ha, HAE_RESET);
892 dev_dbg(dev, "controller reset successful!\n");
893 } else
894 return -1;
895
896 out:
897 scsi_unblock_requests(hisi_hba->shost);
898 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
899 return rc;
900 }
901
902 static int hisi_sas_abort_task(struct sas_task *task)
903 {
904 struct scsi_lun lun;
905 struct hisi_sas_tmf_task tmf_task;
906 struct domain_device *device = task->dev;
907 struct hisi_sas_device *sas_dev = device->lldd_dev;
908 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
909 struct device *dev = &hisi_hba->pdev->dev;
910 int rc = TMF_RESP_FUNC_FAILED;
911 unsigned long flags;
912
913 if (!sas_dev) {
914 dev_warn(dev, "Device has been removed\n");
915 return TMF_RESP_FUNC_FAILED;
916 }
917
918 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
919 rc = TMF_RESP_FUNC_COMPLETE;
920 goto out;
921 }
922
923 sas_dev->dev_status = HISI_SAS_DEV_EH;
924 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
925 struct scsi_cmnd *cmnd = task->uldd_task;
926 struct hisi_sas_slot *slot = task->lldd_task;
927 u32 tag = slot->idx;
928 int rc2;
929
930 int_to_scsilun(cmnd->device->lun, &lun);
931 tmf_task.tmf = TMF_ABORT_TASK;
932 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
933
934 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
935 &tmf_task);
936
937 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
938 HISI_SAS_INT_ABT_CMD, tag);
939 /*
940 * If the TMF finds that the IO is not in the device and also
941 * the internal abort does not succeed, then it is safe to
942 * free the slot.
943 * Note: if the internal abort succeeds then the slot
944 * will have already been completed
945 */
946 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
947 if (task->lldd_task) {
948 spin_lock_irqsave(&hisi_hba->lock, flags);
949 hisi_sas_do_release_task(hisi_hba, task, slot);
950 spin_unlock_irqrestore(&hisi_hba->lock, flags);
951 }
952 }
953 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
954 task->task_proto & SAS_PROTOCOL_STP) {
955 if (task->dev->dev_type == SAS_SATA_DEV) {
956 hisi_sas_internal_task_abort(hisi_hba, device,
957 HISI_SAS_INT_ABT_DEV, 0);
958 rc = hisi_sas_softreset_ata_disk(device);
959 }
960 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
961 /* SMP */
962 struct hisi_sas_slot *slot = task->lldd_task;
963 u32 tag = slot->idx;
964
965 hisi_sas_internal_task_abort(hisi_hba, device,
966 HISI_SAS_INT_ABT_CMD, tag);
967 }
968
969 out:
970 if (rc != TMF_RESP_FUNC_COMPLETE)
971 dev_notice(dev, "abort task: rc=%d\n", rc);
972 return rc;
973 }
974
975 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
976 {
977 struct hisi_sas_tmf_task tmf_task;
978 int rc = TMF_RESP_FUNC_FAILED;
979
980 tmf_task.tmf = TMF_ABORT_TASK_SET;
981 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
982
983 return rc;
984 }
985
986 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
987 {
988 int rc = TMF_RESP_FUNC_FAILED;
989 struct hisi_sas_tmf_task tmf_task;
990
991 tmf_task.tmf = TMF_CLEAR_ACA;
992 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
993
994 return rc;
995 }
996
997 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
998 {
999 struct sas_phy *phy = sas_get_local_phy(device);
1000 int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1001 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1002 rc = sas_phy_reset(phy, reset_type);
1003 sas_put_local_phy(phy);
1004 msleep(2000);
1005 return rc;
1006 }
1007
1008 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1009 {
1010 struct hisi_sas_device *sas_dev = device->lldd_dev;
1011 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1012 unsigned long flags;
1013 int rc = TMF_RESP_FUNC_FAILED;
1014
1015 if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1016 return TMF_RESP_FUNC_FAILED;
1017 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1018
1019 rc = hisi_sas_debug_I_T_nexus_reset(device);
1020
1021 if (rc == TMF_RESP_FUNC_COMPLETE) {
1022 spin_lock_irqsave(&hisi_hba->lock, flags);
1023 hisi_sas_release_task(hisi_hba, device);
1024 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1025 }
1026 return rc;
1027 }
1028
1029 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1030 {
1031 struct hisi_sas_device *sas_dev = device->lldd_dev;
1032 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1033 struct device *dev = &hisi_hba->pdev->dev;
1034 unsigned long flags;
1035 int rc = TMF_RESP_FUNC_FAILED;
1036
1037 sas_dev->dev_status = HISI_SAS_DEV_EH;
1038 if (dev_is_sata(device)) {
1039 struct sas_phy *phy;
1040
1041 /* Clear internal IO and then hardreset */
1042 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1043 HISI_SAS_INT_ABT_DEV, 0);
1044 if (rc == TMF_RESP_FUNC_FAILED)
1045 goto out;
1046
1047 phy = sas_get_local_phy(device);
1048
1049 rc = sas_phy_reset(phy, 1);
1050
1051 if (rc == 0) {
1052 spin_lock_irqsave(&hisi_hba->lock, flags);
1053 hisi_sas_release_task(hisi_hba, device);
1054 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1055 }
1056 sas_put_local_phy(phy);
1057 } else {
1058 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
1059
1060 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1061 if (rc == TMF_RESP_FUNC_COMPLETE) {
1062 spin_lock_irqsave(&hisi_hba->lock, flags);
1063 hisi_sas_release_task(hisi_hba, device);
1064 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1065 }
1066 }
1067 out:
1068 dev_err(dev, "lu_reset: for device[%llx]:rc= %d\n",
1069 sas_dev->device_id, rc);
1070 return rc;
1071 }
1072
1073 static int hisi_sas_query_task(struct sas_task *task)
1074 {
1075 struct scsi_lun lun;
1076 struct hisi_sas_tmf_task tmf_task;
1077 int rc = TMF_RESP_FUNC_FAILED;
1078
1079 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1080 struct scsi_cmnd *cmnd = task->uldd_task;
1081 struct domain_device *device = task->dev;
1082 struct hisi_sas_slot *slot = task->lldd_task;
1083 u32 tag = slot->idx;
1084
1085 int_to_scsilun(cmnd->device->lun, &lun);
1086 tmf_task.tmf = TMF_QUERY_TASK;
1087 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1088
1089 rc = hisi_sas_debug_issue_ssp_tmf(device,
1090 lun.scsi_lun,
1091 &tmf_task);
1092 switch (rc) {
1093 /* The task is still in Lun, release it then */
1094 case TMF_RESP_FUNC_SUCC:
1095 /* The task is not in Lun or failed, reset the phy */
1096 case TMF_RESP_FUNC_FAILED:
1097 case TMF_RESP_FUNC_COMPLETE:
1098 break;
1099 default:
1100 rc = TMF_RESP_FUNC_FAILED;
1101 break;
1102 }
1103 }
1104 return rc;
1105 }
1106
1107 static int
1108 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
1109 struct sas_task *task, int abort_flag,
1110 int task_tag)
1111 {
1112 struct domain_device *device = task->dev;
1113 struct hisi_sas_device *sas_dev = device->lldd_dev;
1114 struct device *dev = &hisi_hba->pdev->dev;
1115 struct hisi_sas_port *port;
1116 struct hisi_sas_slot *slot;
1117 struct asd_sas_port *sas_port = device->port;
1118 struct hisi_sas_cmd_hdr *cmd_hdr_base;
1119 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1120 unsigned long flags;
1121
1122 if (unlikely(test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)))
1123 return -EINVAL;
1124
1125 if (!device->port)
1126 return -1;
1127
1128 port = to_hisi_sas_port(sas_port);
1129
1130 /* simply get a slot and send abort command */
1131 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
1132 if (rc)
1133 goto err_out;
1134 rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id,
1135 &dlvry_queue, &dlvry_queue_slot);
1136 if (rc)
1137 goto err_out_tag;
1138
1139 slot = &hisi_hba->slot_info[slot_idx];
1140 memset(slot, 0, sizeof(struct hisi_sas_slot));
1141
1142 slot->idx = slot_idx;
1143 slot->n_elem = n_elem;
1144 slot->dlvry_queue = dlvry_queue;
1145 slot->dlvry_queue_slot = dlvry_queue_slot;
1146 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1147 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1148 slot->task = task;
1149 slot->port = port;
1150 task->lldd_task = slot;
1151
1152 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1153
1154 rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1155 abort_flag, task_tag);
1156 if (rc)
1157 goto err_out_tag;
1158
1159
1160 list_add_tail(&slot->entry, &sas_dev->list);
1161 spin_lock_irqsave(&task->task_state_lock, flags);
1162 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1163 spin_unlock_irqrestore(&task->task_state_lock, flags);
1164
1165 hisi_hba->slot_prep = slot;
1166
1167 atomic64_inc(&sas_dev->running_req);
1168
1169 /* send abort command to our chip */
1170 hisi_hba->hw->start_delivery(hisi_hba);
1171
1172 return 0;
1173
1174 err_out_tag:
1175 hisi_sas_slot_index_free(hisi_hba, slot_idx);
1176 err_out:
1177 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1178
1179 return rc;
1180 }
1181
1182 /**
1183 * hisi_sas_internal_task_abort -- execute an internal
1184 * abort command for single IO command or a device
1185 * @hisi_hba: host controller struct
1186 * @device: domain device
1187 * @abort_flag: mode of operation, device or single IO
1188 * @tag: tag of IO to be aborted (only relevant to single
1189 * IO mode)
1190 */
1191 static int
1192 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1193 struct domain_device *device,
1194 int abort_flag, int tag)
1195 {
1196 struct sas_task *task;
1197 struct hisi_sas_device *sas_dev = device->lldd_dev;
1198 struct device *dev = &hisi_hba->pdev->dev;
1199 int res;
1200 unsigned long flags;
1201
1202 if (!hisi_hba->hw->prep_abort)
1203 return -EOPNOTSUPP;
1204
1205 task = sas_alloc_slow_task(GFP_KERNEL);
1206 if (!task)
1207 return -ENOMEM;
1208
1209 task->dev = device;
1210 task->task_proto = device->tproto;
1211 task->task_done = hisi_sas_task_done;
1212 task->slow_task->timer.data = (unsigned long)task;
1213 task->slow_task->timer.function = hisi_sas_tmf_timedout;
1214 task->slow_task->timer.expires = jiffies + 20*HZ;
1215 add_timer(&task->slow_task->timer);
1216
1217 /* Lock as we are alloc'ing a slot, which cannot be interrupted */
1218 spin_lock_irqsave(&hisi_hba->lock, flags);
1219 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1220 task, abort_flag, tag);
1221 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1222 if (res) {
1223 del_timer(&task->slow_task->timer);
1224 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1225 res);
1226 goto exit;
1227 }
1228 wait_for_completion(&task->slow_task->completion);
1229 res = TMF_RESP_FUNC_FAILED;
1230
1231 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1232 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1233 res = TMF_RESP_FUNC_COMPLETE;
1234 goto exit;
1235 }
1236
1237 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1238 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1239 res = TMF_RESP_FUNC_SUCC;
1240 goto exit;
1241 }
1242
1243 /* Internal abort timed out */
1244 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1245 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1246 dev_err(dev, "internal task abort: timeout.\n");
1247 }
1248 }
1249
1250 exit:
1251 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
1252 "resp: 0x%x sts 0x%x\n",
1253 SAS_ADDR(device->sas_addr),
1254 task,
1255 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1256 task->task_status.stat);
1257 sas_free_task(task);
1258
1259 return res;
1260 }
1261
1262 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1263 {
1264 hisi_sas_port_notify_formed(sas_phy);
1265 }
1266
1267 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1268 {
1269 phy->phy_attached = 0;
1270 phy->phy_type = 0;
1271 phy->port = NULL;
1272 }
1273
1274 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1275 {
1276 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1277 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1278 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1279
1280 if (rdy) {
1281 /* Phy down but ready */
1282 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1283 hisi_sas_port_notify_formed(sas_phy);
1284 } else {
1285 struct hisi_sas_port *port = phy->port;
1286
1287 /* Phy down and not ready */
1288 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1289 sas_phy_disconnected(sas_phy);
1290
1291 if (port) {
1292 if (phy->phy_type & PORT_TYPE_SAS) {
1293 int port_id = port->id;
1294
1295 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1296 port_id))
1297 port->port_attached = 0;
1298 } else if (phy->phy_type & PORT_TYPE_SATA)
1299 port->port_attached = 0;
1300 }
1301 hisi_sas_phy_disconnected(phy);
1302 }
1303 }
1304 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1305
1306 void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1307 u32 state)
1308 {
1309 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1310 int phy_no;
1311
1312 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1313 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1314 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1315 struct asd_sas_port *sas_port = sas_phy->port;
1316 struct domain_device *dev;
1317
1318 if (sas_phy->enabled) {
1319 /* Report PHY state change to libsas */
1320 if (state & (1 << phy_no))
1321 continue;
1322
1323 if (old_state & (1 << phy_no))
1324 /* PHY down but was up before */
1325 hisi_sas_phy_down(hisi_hba, phy_no, 0);
1326 }
1327 if (!sas_port)
1328 continue;
1329 dev = sas_port->port_dev;
1330
1331 if (DEV_IS_EXPANDER(dev->dev_type))
1332 sas_ha->notify_phy_event(sas_phy, PORTE_BROADCAST_RCVD);
1333 }
1334 }
1335 EXPORT_SYMBOL_GPL(hisi_sas_rescan_topology);
1336
1337 static struct scsi_transport_template *hisi_sas_stt;
1338
1339 static struct scsi_host_template hisi_sas_sht = {
1340 .module = THIS_MODULE,
1341 .name = DRV_NAME,
1342 .queuecommand = sas_queuecommand,
1343 .target_alloc = sas_target_alloc,
1344 .slave_configure = hisi_sas_slave_configure,
1345 .scan_finished = hisi_sas_scan_finished,
1346 .scan_start = hisi_sas_scan_start,
1347 .change_queue_depth = sas_change_queue_depth,
1348 .bios_param = sas_bios_param,
1349 .can_queue = 1,
1350 .this_id = -1,
1351 .sg_tablesize = SG_ALL,
1352 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
1353 .use_clustering = ENABLE_CLUSTERING,
1354 .eh_device_reset_handler = sas_eh_device_reset_handler,
1355 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
1356 .target_destroy = sas_target_destroy,
1357 .ioctl = sas_ioctl,
1358 };
1359
1360 static struct sas_domain_function_template hisi_sas_transport_ops = {
1361 .lldd_dev_found = hisi_sas_dev_found,
1362 .lldd_dev_gone = hisi_sas_dev_gone,
1363 .lldd_execute_task = hisi_sas_queue_command,
1364 .lldd_control_phy = hisi_sas_control_phy,
1365 .lldd_abort_task = hisi_sas_abort_task,
1366 .lldd_abort_task_set = hisi_sas_abort_task_set,
1367 .lldd_clear_aca = hisi_sas_clear_aca,
1368 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
1369 .lldd_lu_reset = hisi_sas_lu_reset,
1370 .lldd_query_task = hisi_sas_query_task,
1371 .lldd_port_formed = hisi_sas_port_formed,
1372 };
1373
1374 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
1375 {
1376 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1377
1378 for (i = 0; i < hisi_hba->queue_count; i++) {
1379 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1380 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1381
1382 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1383 memset(hisi_hba->cmd_hdr[i], 0, s);
1384 dq->wr_point = 0;
1385
1386 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1387 memset(hisi_hba->complete_hdr[i], 0, s);
1388 cq->rd_point = 0;
1389 }
1390
1391 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
1392 memset(hisi_hba->initial_fis, 0, s);
1393
1394 s = max_command_entries * sizeof(struct hisi_sas_iost);
1395 memset(hisi_hba->iost, 0, s);
1396
1397 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1398 memset(hisi_hba->breakpoint, 0, s);
1399
1400 s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
1401 memset(hisi_hba->sata_breakpoint, 0, s);
1402 }
1403 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
1404
1405 static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1406 {
1407 struct platform_device *pdev = hisi_hba->pdev;
1408 struct device *dev = &pdev->dev;
1409 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1410
1411 spin_lock_init(&hisi_hba->lock);
1412 for (i = 0; i < hisi_hba->n_phy; i++) {
1413 hisi_sas_phy_init(hisi_hba, i);
1414 hisi_hba->port[i].port_attached = 0;
1415 hisi_hba->port[i].id = -1;
1416 }
1417
1418 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1419 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
1420 hisi_hba->devices[i].device_id = i;
1421 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
1422 }
1423
1424 for (i = 0; i < hisi_hba->queue_count; i++) {
1425 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1426 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1427
1428 /* Completion queue structure */
1429 cq->id = i;
1430 cq->hisi_hba = hisi_hba;
1431
1432 /* Delivery queue structure */
1433 dq->id = i;
1434 dq->hisi_hba = hisi_hba;
1435
1436 /* Delivery queue */
1437 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1438 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1439 &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1440 if (!hisi_hba->cmd_hdr[i])
1441 goto err_out;
1442
1443 /* Completion queue */
1444 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1445 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1446 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1447 if (!hisi_hba->complete_hdr[i])
1448 goto err_out;
1449 }
1450
1451 s = HISI_SAS_STATUS_BUF_SZ;
1452 hisi_hba->status_buffer_pool = dma_pool_create("status_buffer",
1453 dev, s, 16, 0);
1454 if (!hisi_hba->status_buffer_pool)
1455 goto err_out;
1456
1457 s = HISI_SAS_COMMAND_TABLE_SZ;
1458 hisi_hba->command_table_pool = dma_pool_create("command_table",
1459 dev, s, 16, 0);
1460 if (!hisi_hba->command_table_pool)
1461 goto err_out;
1462
1463 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1464 hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma,
1465 GFP_KERNEL);
1466 if (!hisi_hba->itct)
1467 goto err_out;
1468
1469 memset(hisi_hba->itct, 0, s);
1470
1471 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
1472 sizeof(struct hisi_sas_slot),
1473 GFP_KERNEL);
1474 if (!hisi_hba->slot_info)
1475 goto err_out;
1476
1477 s = max_command_entries * sizeof(struct hisi_sas_iost);
1478 hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1479 GFP_KERNEL);
1480 if (!hisi_hba->iost)
1481 goto err_out;
1482
1483 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1484 hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1485 &hisi_hba->breakpoint_dma, GFP_KERNEL);
1486 if (!hisi_hba->breakpoint)
1487 goto err_out;
1488
1489 hisi_hba->slot_index_count = max_command_entries;
1490 s = hisi_hba->slot_index_count / BITS_PER_BYTE;
1491 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1492 if (!hisi_hba->slot_index_tags)
1493 goto err_out;
1494
1495 hisi_hba->sge_page_pool = dma_pool_create("status_sge", dev,
1496 sizeof(struct hisi_sas_sge_page), 16, 0);
1497 if (!hisi_hba->sge_page_pool)
1498 goto err_out;
1499
1500 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1501 hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1502 &hisi_hba->initial_fis_dma, GFP_KERNEL);
1503 if (!hisi_hba->initial_fis)
1504 goto err_out;
1505
1506 s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
1507 hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1508 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1509 if (!hisi_hba->sata_breakpoint)
1510 goto err_out;
1511 hisi_sas_init_mem(hisi_hba);
1512
1513 hisi_sas_slot_index_init(hisi_hba);
1514
1515 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1516 if (!hisi_hba->wq) {
1517 dev_err(dev, "sas_alloc: failed to create workqueue\n");
1518 goto err_out;
1519 }
1520
1521 return 0;
1522 err_out:
1523 return -ENOMEM;
1524 }
1525
1526 static void hisi_sas_free(struct hisi_hba *hisi_hba)
1527 {
1528 struct device *dev = &hisi_hba->pdev->dev;
1529 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1530
1531 for (i = 0; i < hisi_hba->queue_count; i++) {
1532 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1533 if (hisi_hba->cmd_hdr[i])
1534 dma_free_coherent(dev, s,
1535 hisi_hba->cmd_hdr[i],
1536 hisi_hba->cmd_hdr_dma[i]);
1537
1538 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1539 if (hisi_hba->complete_hdr[i])
1540 dma_free_coherent(dev, s,
1541 hisi_hba->complete_hdr[i],
1542 hisi_hba->complete_hdr_dma[i]);
1543 }
1544
1545 dma_pool_destroy(hisi_hba->status_buffer_pool);
1546 dma_pool_destroy(hisi_hba->command_table_pool);
1547 dma_pool_destroy(hisi_hba->sge_page_pool);
1548
1549 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1550 if (hisi_hba->itct)
1551 dma_free_coherent(dev, s,
1552 hisi_hba->itct, hisi_hba->itct_dma);
1553
1554 s = max_command_entries * sizeof(struct hisi_sas_iost);
1555 if (hisi_hba->iost)
1556 dma_free_coherent(dev, s,
1557 hisi_hba->iost, hisi_hba->iost_dma);
1558
1559 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1560 if (hisi_hba->breakpoint)
1561 dma_free_coherent(dev, s,
1562 hisi_hba->breakpoint,
1563 hisi_hba->breakpoint_dma);
1564
1565
1566 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1567 if (hisi_hba->initial_fis)
1568 dma_free_coherent(dev, s,
1569 hisi_hba->initial_fis,
1570 hisi_hba->initial_fis_dma);
1571
1572 s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
1573 if (hisi_hba->sata_breakpoint)
1574 dma_free_coherent(dev, s,
1575 hisi_hba->sata_breakpoint,
1576 hisi_hba->sata_breakpoint_dma);
1577
1578 if (hisi_hba->wq)
1579 destroy_workqueue(hisi_hba->wq);
1580 }
1581
1582 static void hisi_sas_rst_work_handler(struct work_struct *work)
1583 {
1584 struct hisi_hba *hisi_hba =
1585 container_of(work, struct hisi_hba, rst_work);
1586
1587 hisi_sas_controller_reset(hisi_hba);
1588 }
1589
1590 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
1591 const struct hisi_sas_hw *hw)
1592 {
1593 struct resource *res;
1594 struct Scsi_Host *shost;
1595 struct hisi_hba *hisi_hba;
1596 struct device *dev = &pdev->dev;
1597 struct device_node *np = pdev->dev.of_node;
1598 struct clk *refclk;
1599
1600 shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba));
1601 if (!shost) {
1602 dev_err(dev, "scsi host alloc failed\n");
1603 return NULL;
1604 }
1605 hisi_hba = shost_priv(shost);
1606
1607 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
1608 hisi_hba->hw = hw;
1609 hisi_hba->pdev = pdev;
1610 hisi_hba->shost = shost;
1611 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
1612
1613 init_timer(&hisi_hba->timer);
1614
1615 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
1616 SAS_ADDR_SIZE))
1617 goto err_out;
1618
1619 if (np) {
1620 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
1621 "hisilicon,sas-syscon");
1622 if (IS_ERR(hisi_hba->ctrl))
1623 goto err_out;
1624
1625 if (device_property_read_u32(dev, "ctrl-reset-reg",
1626 &hisi_hba->ctrl_reset_reg))
1627 goto err_out;
1628
1629 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
1630 &hisi_hba->ctrl_reset_sts_reg))
1631 goto err_out;
1632
1633 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
1634 &hisi_hba->ctrl_clock_ena_reg))
1635 goto err_out;
1636 }
1637
1638 refclk = devm_clk_get(&pdev->dev, NULL);
1639 if (IS_ERR(refclk))
1640 dev_dbg(dev, "no ref clk property\n");
1641 else
1642 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
1643
1644 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy))
1645 goto err_out;
1646
1647 if (device_property_read_u32(dev, "queue-count",
1648 &hisi_hba->queue_count))
1649 goto err_out;
1650
1651 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
1652 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
1653 dev_err(dev, "No usable DMA addressing method\n");
1654 goto err_out;
1655 }
1656
1657 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1658 hisi_hba->regs = devm_ioremap_resource(dev, res);
1659 if (IS_ERR(hisi_hba->regs))
1660 goto err_out;
1661
1662 if (hisi_sas_alloc(hisi_hba, shost)) {
1663 hisi_sas_free(hisi_hba);
1664 goto err_out;
1665 }
1666
1667 return shost;
1668 err_out:
1669 kfree(shost);
1670 dev_err(dev, "shost alloc failed\n");
1671 return NULL;
1672 }
1673
1674 static void hisi_sas_init_add(struct hisi_hba *hisi_hba)
1675 {
1676 int i;
1677
1678 for (i = 0; i < hisi_hba->n_phy; i++)
1679 memcpy(&hisi_hba->phy[i].dev_sas_addr,
1680 hisi_hba->sas_addr,
1681 SAS_ADDR_SIZE);
1682 }
1683
1684 int hisi_sas_probe(struct platform_device *pdev,
1685 const struct hisi_sas_hw *hw)
1686 {
1687 struct Scsi_Host *shost;
1688 struct hisi_hba *hisi_hba;
1689 struct device *dev = &pdev->dev;
1690 struct asd_sas_phy **arr_phy;
1691 struct asd_sas_port **arr_port;
1692 struct sas_ha_struct *sha;
1693 int rc, phy_nr, port_nr, i;
1694
1695 shost = hisi_sas_shost_alloc(pdev, hw);
1696 if (!shost)
1697 return -ENOMEM;
1698
1699 sha = SHOST_TO_SAS_HA(shost);
1700 hisi_hba = shost_priv(shost);
1701 platform_set_drvdata(pdev, sha);
1702
1703 phy_nr = port_nr = hisi_hba->n_phy;
1704
1705 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
1706 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
1707 if (!arr_phy || !arr_port) {
1708 rc = -ENOMEM;
1709 goto err_out_ha;
1710 }
1711
1712 sha->sas_phy = arr_phy;
1713 sha->sas_port = arr_port;
1714 sha->lldd_ha = hisi_hba;
1715
1716 shost->transportt = hisi_sas_stt;
1717 shost->max_id = HISI_SAS_MAX_DEVICES;
1718 shost->max_lun = ~0;
1719 shost->max_channel = 1;
1720 shost->max_cmd_len = 16;
1721 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
1722 shost->can_queue = hisi_hba->hw->max_command_entries;
1723 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
1724
1725 sha->sas_ha_name = DRV_NAME;
1726 sha->dev = &hisi_hba->pdev->dev;
1727 sha->lldd_module = THIS_MODULE;
1728 sha->sas_addr = &hisi_hba->sas_addr[0];
1729 sha->num_phys = hisi_hba->n_phy;
1730 sha->core.shost = hisi_hba->shost;
1731
1732 for (i = 0; i < hisi_hba->n_phy; i++) {
1733 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
1734 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
1735 }
1736
1737 hisi_sas_init_add(hisi_hba);
1738
1739 rc = scsi_add_host(shost, &pdev->dev);
1740 if (rc)
1741 goto err_out_ha;
1742
1743 rc = sas_register_ha(sha);
1744 if (rc)
1745 goto err_out_register_ha;
1746
1747 rc = hisi_hba->hw->hw_init(hisi_hba);
1748 if (rc)
1749 goto err_out_register_ha;
1750
1751 scsi_scan_host(shost);
1752
1753 return 0;
1754
1755 err_out_register_ha:
1756 scsi_remove_host(shost);
1757 err_out_ha:
1758 hisi_sas_free(hisi_hba);
1759 kfree(shost);
1760 return rc;
1761 }
1762 EXPORT_SYMBOL_GPL(hisi_sas_probe);
1763
1764 int hisi_sas_remove(struct platform_device *pdev)
1765 {
1766 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
1767 struct hisi_hba *hisi_hba = sha->lldd_ha;
1768 struct Scsi_Host *shost = sha->core.shost;
1769
1770 scsi_remove_host(sha->core.shost);
1771 sas_unregister_ha(sha);
1772 sas_remove_host(sha->core.shost);
1773
1774 hisi_sas_free(hisi_hba);
1775 kfree(shost);
1776 return 0;
1777 }
1778 EXPORT_SYMBOL_GPL(hisi_sas_remove);
1779
1780 static __init int hisi_sas_init(void)
1781 {
1782 pr_info("hisi_sas: driver version %s\n", DRV_VERSION);
1783
1784 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
1785 if (!hisi_sas_stt)
1786 return -ENOMEM;
1787
1788 return 0;
1789 }
1790
1791 static __exit void hisi_sas_exit(void)
1792 {
1793 sas_release_transport(hisi_sas_stt);
1794 }
1795
1796 module_init(hisi_sas_init);
1797 module_exit(hisi_sas_exit);
1798
1799 MODULE_VERSION(DRV_VERSION);
1800 MODULE_LICENSE("GPL");
1801 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
1802 MODULE_DESCRIPTION("HISILICON SAS controller driver");
1803 MODULE_ALIAS("platform:" DRV_NAME);