]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/hisi_sas/hisi_sas_main.c
scsi: hisi_sas: Fix a timeout race of driver internal and SMP IO
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / hisi_sas / hisi_sas_main.c
1 /*
2 * Copyright (c) 2015 Linaro Ltd.
3 * Copyright (c) 2015 Hisilicon Limited.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 */
11
12 #include "hisi_sas.h"
13 #include "../libsas/sas_internal.h"
14 #define DRV_NAME "hisi_sas"
15
16 #define DEV_IS_GONE(dev) \
17 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
18
19 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
20 u8 *lun, struct hisi_sas_tmf_task *tmf);
21 static int
22 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
23 struct domain_device *device,
24 int abort_flag, int tag);
25 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
26 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
27 void *funcdata);
28 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
29 struct domain_device *device);
30 static void hisi_sas_dev_gone(struct domain_device *device);
31
32 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
33 {
34 switch (fis->command) {
35 case ATA_CMD_FPDMA_WRITE:
36 case ATA_CMD_FPDMA_READ:
37 case ATA_CMD_FPDMA_RECV:
38 case ATA_CMD_FPDMA_SEND:
39 case ATA_CMD_NCQ_NON_DATA:
40 return HISI_SAS_SATA_PROTOCOL_FPDMA;
41
42 case ATA_CMD_DOWNLOAD_MICRO:
43 case ATA_CMD_ID_ATA:
44 case ATA_CMD_PMP_READ:
45 case ATA_CMD_READ_LOG_EXT:
46 case ATA_CMD_PIO_READ:
47 case ATA_CMD_PIO_READ_EXT:
48 case ATA_CMD_PMP_WRITE:
49 case ATA_CMD_WRITE_LOG_EXT:
50 case ATA_CMD_PIO_WRITE:
51 case ATA_CMD_PIO_WRITE_EXT:
52 return HISI_SAS_SATA_PROTOCOL_PIO;
53
54 case ATA_CMD_DSM:
55 case ATA_CMD_DOWNLOAD_MICRO_DMA:
56 case ATA_CMD_PMP_READ_DMA:
57 case ATA_CMD_PMP_WRITE_DMA:
58 case ATA_CMD_READ:
59 case ATA_CMD_READ_EXT:
60 case ATA_CMD_READ_LOG_DMA_EXT:
61 case ATA_CMD_READ_STREAM_DMA_EXT:
62 case ATA_CMD_TRUSTED_RCV_DMA:
63 case ATA_CMD_TRUSTED_SND_DMA:
64 case ATA_CMD_WRITE:
65 case ATA_CMD_WRITE_EXT:
66 case ATA_CMD_WRITE_FUA_EXT:
67 case ATA_CMD_WRITE_QUEUED:
68 case ATA_CMD_WRITE_LOG_DMA_EXT:
69 case ATA_CMD_WRITE_STREAM_DMA_EXT:
70 case ATA_CMD_ZAC_MGMT_IN:
71 return HISI_SAS_SATA_PROTOCOL_DMA;
72
73 case ATA_CMD_CHK_POWER:
74 case ATA_CMD_DEV_RESET:
75 case ATA_CMD_EDD:
76 case ATA_CMD_FLUSH:
77 case ATA_CMD_FLUSH_EXT:
78 case ATA_CMD_VERIFY:
79 case ATA_CMD_VERIFY_EXT:
80 case ATA_CMD_SET_FEATURES:
81 case ATA_CMD_STANDBY:
82 case ATA_CMD_STANDBYNOW1:
83 case ATA_CMD_ZAC_MGMT_OUT:
84 return HISI_SAS_SATA_PROTOCOL_NONDATA;
85
86 case ATA_CMD_SET_MAX:
87 switch (fis->features) {
88 case ATA_SET_MAX_PASSWD:
89 case ATA_SET_MAX_LOCK:
90 return HISI_SAS_SATA_PROTOCOL_PIO;
91
92 case ATA_SET_MAX_PASSWD_DMA:
93 case ATA_SET_MAX_UNLOCK_DMA:
94 return HISI_SAS_SATA_PROTOCOL_DMA;
95
96 default:
97 return HISI_SAS_SATA_PROTOCOL_NONDATA;
98 }
99
100 default:
101 {
102 if (direction == DMA_NONE)
103 return HISI_SAS_SATA_PROTOCOL_NONDATA;
104 return HISI_SAS_SATA_PROTOCOL_PIO;
105 }
106 }
107 }
108 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
109
110 void hisi_sas_sata_done(struct sas_task *task,
111 struct hisi_sas_slot *slot)
112 {
113 struct task_status_struct *ts = &task->task_status;
114 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
115 struct hisi_sas_status_buffer *status_buf =
116 hisi_sas_status_buf_addr_mem(slot);
117 u8 *iu = &status_buf->iu[0];
118 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
119
120 resp->frame_len = sizeof(struct dev_to_host_fis);
121 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
122
123 ts->buf_valid_size = sizeof(*resp);
124 }
125 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
126
127 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
128 {
129 struct ata_queued_cmd *qc = task->uldd_task;
130
131 if (qc) {
132 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
133 qc->tf.command == ATA_CMD_FPDMA_READ) {
134 *tag = qc->tag;
135 return 1;
136 }
137 }
138 return 0;
139 }
140 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
141
142 /*
143 * This function assumes linkrate mask fits in 8 bits, which it
144 * does for all HW versions supported.
145 */
146 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
147 {
148 u16 rate = 0;
149 int i;
150
151 max -= SAS_LINK_RATE_1_5_GBPS;
152 for (i = 0; i <= max; i++)
153 rate |= 1 << (i * 2);
154 return rate;
155 }
156 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
157
158 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
159 {
160 return device->port->ha->lldd_ha;
161 }
162
163 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
164 {
165 return container_of(sas_port, struct hisi_sas_port, sas_port);
166 }
167 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
168
169 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
170 {
171 int phy_no;
172
173 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
174 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
175 }
176 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
177
178 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
179 {
180 void *bitmap = hisi_hba->slot_index_tags;
181
182 clear_bit(slot_idx, bitmap);
183 }
184
185 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
186 {
187 unsigned long flags;
188
189 if (hisi_hba->hw->slot_index_alloc || (slot_idx >=
190 hisi_hba->hw->max_command_entries - HISI_SAS_RESERVED_IPTT_CNT)) {
191 spin_lock_irqsave(&hisi_hba->lock, flags);
192 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
193 spin_unlock_irqrestore(&hisi_hba->lock, flags);
194 }
195 }
196
197 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
198 {
199 void *bitmap = hisi_hba->slot_index_tags;
200
201 set_bit(slot_idx, bitmap);
202 }
203
204 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
205 struct scsi_cmnd *scsi_cmnd)
206 {
207 int index;
208 void *bitmap = hisi_hba->slot_index_tags;
209 unsigned long flags;
210
211 if (scsi_cmnd)
212 return scsi_cmnd->request->tag;
213
214 spin_lock_irqsave(&hisi_hba->lock, flags);
215 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
216 hisi_hba->last_slot_index + 1);
217 if (index >= hisi_hba->slot_index_count) {
218 index = find_next_zero_bit(bitmap,
219 hisi_hba->slot_index_count,
220 hisi_hba->hw->max_command_entries -
221 HISI_SAS_RESERVED_IPTT_CNT);
222 if (index >= hisi_hba->slot_index_count) {
223 spin_unlock_irqrestore(&hisi_hba->lock, flags);
224 return -SAS_QUEUE_FULL;
225 }
226 }
227 hisi_sas_slot_index_set(hisi_hba, index);
228 hisi_hba->last_slot_index = index;
229 spin_unlock_irqrestore(&hisi_hba->lock, flags);
230
231 return index;
232 }
233
234 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
235 {
236 int i;
237
238 for (i = 0; i < hisi_hba->slot_index_count; ++i)
239 hisi_sas_slot_index_clear(hisi_hba, i);
240 }
241
242 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
243 struct hisi_sas_slot *slot)
244 {
245 struct hisi_sas_dq *dq = &hisi_hba->dq[slot->dlvry_queue];
246 unsigned long flags;
247
248 if (task) {
249 struct device *dev = hisi_hba->dev;
250
251 if (!task->lldd_task)
252 return;
253
254 task->lldd_task = NULL;
255
256 if (!sas_protocol_ata(task->task_proto))
257 if (slot->n_elem)
258 dma_unmap_sg(dev, task->scatter,
259 task->num_scatter,
260 task->data_dir);
261 }
262
263
264 spin_lock_irqsave(&dq->lock, flags);
265 list_del_init(&slot->entry);
266 spin_unlock_irqrestore(&dq->lock, flags);
267
268 memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
269
270 hisi_sas_slot_index_free(hisi_hba, slot->idx);
271 }
272 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
273
274 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
275 struct hisi_sas_slot *slot)
276 {
277 hisi_hba->hw->prep_smp(hisi_hba, slot);
278 }
279
280 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
281 struct hisi_sas_slot *slot)
282 {
283 hisi_hba->hw->prep_ssp(hisi_hba, slot);
284 }
285
286 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
287 struct hisi_sas_slot *slot)
288 {
289 hisi_hba->hw->prep_stp(hisi_hba, slot);
290 }
291
292 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
293 struct hisi_sas_slot *slot,
294 int device_id, int abort_flag, int tag_to_abort)
295 {
296 hisi_hba->hw->prep_abort(hisi_hba, slot,
297 device_id, abort_flag, tag_to_abort);
298 }
299
300 static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
301 struct sas_task *task, int n_elem,
302 int n_elem_req, int n_elem_resp)
303 {
304 struct device *dev = hisi_hba->dev;
305
306 if (!sas_protocol_ata(task->task_proto)) {
307 if (task->num_scatter) {
308 if (n_elem)
309 dma_unmap_sg(dev, task->scatter,
310 task->num_scatter,
311 task->data_dir);
312 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
313 if (n_elem_req)
314 dma_unmap_sg(dev, &task->smp_task.smp_req,
315 1, DMA_TO_DEVICE);
316 if (n_elem_resp)
317 dma_unmap_sg(dev, &task->smp_task.smp_resp,
318 1, DMA_FROM_DEVICE);
319 }
320 }
321 }
322
323 static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
324 struct sas_task *task, int *n_elem,
325 int *n_elem_req, int *n_elem_resp)
326 {
327 struct device *dev = hisi_hba->dev;
328 int rc;
329
330 if (sas_protocol_ata(task->task_proto)) {
331 *n_elem = task->num_scatter;
332 } else {
333 unsigned int req_len, resp_len;
334
335 if (task->num_scatter) {
336 *n_elem = dma_map_sg(dev, task->scatter,
337 task->num_scatter, task->data_dir);
338 if (!*n_elem) {
339 rc = -ENOMEM;
340 goto prep_out;
341 }
342 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
343 *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
344 1, DMA_TO_DEVICE);
345 if (!*n_elem_req) {
346 rc = -ENOMEM;
347 goto prep_out;
348 }
349 req_len = sg_dma_len(&task->smp_task.smp_req);
350 if (req_len & 0x3) {
351 rc = -EINVAL;
352 goto err_out_dma_unmap;
353 }
354 *n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
355 1, DMA_FROM_DEVICE);
356 if (!*n_elem_resp) {
357 rc = -ENOMEM;
358 goto err_out_dma_unmap;
359 }
360 resp_len = sg_dma_len(&task->smp_task.smp_resp);
361 if (resp_len & 0x3) {
362 rc = -EINVAL;
363 goto err_out_dma_unmap;
364 }
365 }
366 }
367
368 if (*n_elem > HISI_SAS_SGE_PAGE_CNT) {
369 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
370 *n_elem);
371 rc = -EINVAL;
372 goto err_out_dma_unmap;
373 }
374 return 0;
375
376 err_out_dma_unmap:
377 /* It would be better to call dma_unmap_sg() here, but it's messy */
378 hisi_sas_dma_unmap(hisi_hba, task, *n_elem,
379 *n_elem_req, *n_elem_resp);
380 prep_out:
381 return rc;
382 }
383
384 static int hisi_sas_task_prep(struct sas_task *task,
385 struct hisi_sas_dq **dq_pointer,
386 bool is_tmf, struct hisi_sas_tmf_task *tmf,
387 int *pass)
388 {
389 struct domain_device *device = task->dev;
390 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
391 struct hisi_sas_device *sas_dev = device->lldd_dev;
392 struct hisi_sas_port *port;
393 struct hisi_sas_slot *slot;
394 struct hisi_sas_cmd_hdr *cmd_hdr_base;
395 struct asd_sas_port *sas_port = device->port;
396 struct device *dev = hisi_hba->dev;
397 int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
398 int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
399 struct hisi_sas_dq *dq;
400 unsigned long flags;
401 int wr_q_index;
402
403 if (DEV_IS_GONE(sas_dev)) {
404 if (sas_dev)
405 dev_info(dev, "task prep: device %d not ready\n",
406 sas_dev->device_id);
407 else
408 dev_info(dev, "task prep: device %016llx not ready\n",
409 SAS_ADDR(device->sas_addr));
410
411 return -ECOMM;
412 }
413
414 *dq_pointer = dq = sas_dev->dq;
415
416 port = to_hisi_sas_port(sas_port);
417 if (port && !port->port_attached) {
418 dev_info(dev, "task prep: %s port%d not attach device\n",
419 (dev_is_sata(device)) ?
420 "SATA/STP" : "SAS",
421 device->port->id);
422
423 return -ECOMM;
424 }
425
426 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
427 &n_elem_req, &n_elem_resp);
428 if (rc < 0)
429 goto prep_out;
430
431 if (hisi_hba->hw->slot_index_alloc)
432 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
433 else {
434 struct scsi_cmnd *scsi_cmnd = NULL;
435
436 if (task->uldd_task) {
437 struct ata_queued_cmd *qc;
438
439 if (dev_is_sata(device)) {
440 qc = task->uldd_task;
441 scsi_cmnd = qc->scsicmd;
442 } else {
443 scsi_cmnd = task->uldd_task;
444 }
445 }
446 rc = hisi_sas_slot_index_alloc(hisi_hba, scsi_cmnd);
447 }
448 if (rc < 0)
449 goto err_out_dma_unmap;
450
451 slot_idx = rc;
452 slot = &hisi_hba->slot_info[slot_idx];
453
454 spin_lock_irqsave(&dq->lock, flags);
455 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
456 if (wr_q_index < 0) {
457 spin_unlock_irqrestore(&dq->lock, flags);
458 rc = -EAGAIN;
459 goto err_out_tag;
460 }
461
462 list_add_tail(&slot->delivery, &dq->list);
463 list_add_tail(&slot->entry, &sas_dev->list);
464 spin_unlock_irqrestore(&dq->lock, flags);
465
466 dlvry_queue = dq->id;
467 dlvry_queue_slot = wr_q_index;
468
469 slot->n_elem = n_elem;
470 slot->dlvry_queue = dlvry_queue;
471 slot->dlvry_queue_slot = dlvry_queue_slot;
472 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
473 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
474 slot->task = task;
475 slot->port = port;
476 slot->tmf = tmf;
477 slot->is_internal = is_tmf;
478 task->lldd_task = slot;
479
480 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
481 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
482 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
483
484 switch (task->task_proto) {
485 case SAS_PROTOCOL_SMP:
486 hisi_sas_task_prep_smp(hisi_hba, slot);
487 break;
488 case SAS_PROTOCOL_SSP:
489 hisi_sas_task_prep_ssp(hisi_hba, slot);
490 break;
491 case SAS_PROTOCOL_SATA:
492 case SAS_PROTOCOL_STP:
493 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
494 hisi_sas_task_prep_ata(hisi_hba, slot);
495 break;
496 default:
497 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
498 task->task_proto);
499 break;
500 }
501
502 spin_lock_irqsave(&task->task_state_lock, flags);
503 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
504 spin_unlock_irqrestore(&task->task_state_lock, flags);
505
506 ++(*pass);
507 WRITE_ONCE(slot->ready, 1);
508
509 return 0;
510
511 err_out_tag:
512 hisi_sas_slot_index_free(hisi_hba, slot_idx);
513 err_out_dma_unmap:
514 hisi_sas_dma_unmap(hisi_hba, task, n_elem,
515 n_elem_req, n_elem_resp);
516 prep_out:
517 dev_err(dev, "task prep: failed[%d]!\n", rc);
518 return rc;
519 }
520
521 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
522 bool is_tmf, struct hisi_sas_tmf_task *tmf)
523 {
524 u32 rc;
525 u32 pass = 0;
526 unsigned long flags;
527 struct hisi_hba *hisi_hba;
528 struct device *dev;
529 struct domain_device *device = task->dev;
530 struct asd_sas_port *sas_port = device->port;
531 struct hisi_sas_dq *dq = NULL;
532
533 if (!sas_port) {
534 struct task_status_struct *ts = &task->task_status;
535
536 ts->resp = SAS_TASK_UNDELIVERED;
537 ts->stat = SAS_PHY_DOWN;
538 /*
539 * libsas will use dev->port, should
540 * not call task_done for sata
541 */
542 if (device->dev_type != SAS_SATA_DEV)
543 task->task_done(task);
544 return -ECOMM;
545 }
546
547 hisi_hba = dev_to_hisi_hba(device);
548 dev = hisi_hba->dev;
549
550 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
551 if (in_softirq())
552 return -EINVAL;
553
554 down(&hisi_hba->sem);
555 up(&hisi_hba->sem);
556 }
557
558 /* protect task_prep and start_delivery sequence */
559 rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
560 if (rc)
561 dev_err(dev, "task exec: failed[%d]!\n", rc);
562
563 if (likely(pass)) {
564 spin_lock_irqsave(&dq->lock, flags);
565 hisi_hba->hw->start_delivery(dq);
566 spin_unlock_irqrestore(&dq->lock, flags);
567 }
568
569 return rc;
570 }
571
572 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
573 {
574 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
575 struct asd_sas_phy *sas_phy = &phy->sas_phy;
576 struct sas_ha_struct *sas_ha;
577
578 if (!phy->phy_attached)
579 return;
580
581 sas_ha = &hisi_hba->sha;
582 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
583
584 if (sas_phy->phy) {
585 struct sas_phy *sphy = sas_phy->phy;
586
587 sphy->negotiated_linkrate = sas_phy->linkrate;
588 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
589 sphy->maximum_linkrate_hw =
590 hisi_hba->hw->phy_get_max_linkrate();
591 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
592 sphy->minimum_linkrate = phy->minimum_linkrate;
593
594 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
595 sphy->maximum_linkrate = phy->maximum_linkrate;
596 }
597
598 if (phy->phy_type & PORT_TYPE_SAS) {
599 struct sas_identify_frame *id;
600
601 id = (struct sas_identify_frame *)phy->frame_rcvd;
602 id->dev_type = phy->identify.device_type;
603 id->initiator_bits = SAS_PROTOCOL_ALL;
604 id->target_bits = phy->identify.target_port_protocols;
605 } else if (phy->phy_type & PORT_TYPE_SATA) {
606 /*Nothing*/
607 }
608
609 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
610 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
611 }
612
613 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
614 {
615 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
616 struct hisi_sas_device *sas_dev = NULL;
617 unsigned long flags;
618 int last = hisi_hba->last_dev_id;
619 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
620 int i;
621
622 spin_lock_irqsave(&hisi_hba->lock, flags);
623 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
624 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
625 int queue = i % hisi_hba->queue_count;
626 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
627
628 hisi_hba->devices[i].device_id = i;
629 sas_dev = &hisi_hba->devices[i];
630 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
631 sas_dev->dev_type = device->dev_type;
632 sas_dev->hisi_hba = hisi_hba;
633 sas_dev->sas_device = device;
634 sas_dev->dq = dq;
635 INIT_LIST_HEAD(&hisi_hba->devices[i].list);
636 break;
637 }
638 i++;
639 }
640 hisi_hba->last_dev_id = i;
641 spin_unlock_irqrestore(&hisi_hba->lock, flags);
642
643 return sas_dev;
644 }
645
646 #define HISI_SAS_SRST_ATA_DISK_CNT 3
647 static int hisi_sas_init_device(struct domain_device *device)
648 {
649 int rc = TMF_RESP_FUNC_COMPLETE;
650 struct scsi_lun lun;
651 struct hisi_sas_tmf_task tmf_task;
652 int retry = HISI_SAS_SRST_ATA_DISK_CNT;
653 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
654
655 switch (device->dev_type) {
656 case SAS_END_DEVICE:
657 int_to_scsilun(0, &lun);
658
659 tmf_task.tmf = TMF_CLEAR_TASK_SET;
660 rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun,
661 &tmf_task);
662 if (rc == TMF_RESP_FUNC_COMPLETE)
663 hisi_sas_release_task(hisi_hba, device);
664 break;
665 case SAS_SATA_DEV:
666 case SAS_SATA_PM:
667 case SAS_SATA_PM_PORT:
668 case SAS_SATA_PENDING:
669 while (retry-- > 0) {
670 rc = hisi_sas_softreset_ata_disk(device);
671 if (!rc)
672 break;
673 }
674 break;
675 default:
676 break;
677 }
678
679 return rc;
680 }
681
682 static int hisi_sas_dev_found(struct domain_device *device)
683 {
684 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
685 struct domain_device *parent_dev = device->parent;
686 struct hisi_sas_device *sas_dev;
687 struct device *dev = hisi_hba->dev;
688 int rc;
689
690 if (hisi_hba->hw->alloc_dev)
691 sas_dev = hisi_hba->hw->alloc_dev(device);
692 else
693 sas_dev = hisi_sas_alloc_dev(device);
694 if (!sas_dev) {
695 dev_err(dev, "fail alloc dev: max support %d devices\n",
696 HISI_SAS_MAX_DEVICES);
697 return -EINVAL;
698 }
699
700 device->lldd_dev = sas_dev;
701 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
702
703 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
704 int phy_no;
705 u8 phy_num = parent_dev->ex_dev.num_phys;
706 struct ex_phy *phy;
707
708 for (phy_no = 0; phy_no < phy_num; phy_no++) {
709 phy = &parent_dev->ex_dev.ex_phy[phy_no];
710 if (SAS_ADDR(phy->attached_sas_addr) ==
711 SAS_ADDR(device->sas_addr))
712 break;
713 }
714
715 if (phy_no == phy_num) {
716 dev_info(dev, "dev found: no attached "
717 "dev:%016llx at ex:%016llx\n",
718 SAS_ADDR(device->sas_addr),
719 SAS_ADDR(parent_dev->sas_addr));
720 rc = -EINVAL;
721 goto err_out;
722 }
723 }
724
725 dev_info(dev, "dev[%d:%x] found\n",
726 sas_dev->device_id, sas_dev->dev_type);
727
728 rc = hisi_sas_init_device(device);
729 if (rc)
730 goto err_out;
731 return 0;
732
733 err_out:
734 hisi_sas_dev_gone(device);
735 return rc;
736 }
737
738 int hisi_sas_slave_configure(struct scsi_device *sdev)
739 {
740 struct domain_device *dev = sdev_to_domain_dev(sdev);
741 int ret = sas_slave_configure(sdev);
742
743 if (ret)
744 return ret;
745 if (!dev_is_sata(dev))
746 sas_change_queue_depth(sdev, 64);
747
748 return 0;
749 }
750 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure);
751
752 void hisi_sas_scan_start(struct Scsi_Host *shost)
753 {
754 struct hisi_hba *hisi_hba = shost_priv(shost);
755
756 hisi_hba->hw->phys_init(hisi_hba);
757 }
758 EXPORT_SYMBOL_GPL(hisi_sas_scan_start);
759
760 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
761 {
762 struct hisi_hba *hisi_hba = shost_priv(shost);
763 struct sas_ha_struct *sha = &hisi_hba->sha;
764
765 /* Wait for PHY up interrupt to occur */
766 if (time < HZ)
767 return 0;
768
769 sas_drain_work(sha);
770 return 1;
771 }
772 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
773
774 static void hisi_sas_phyup_work(struct work_struct *work)
775 {
776 struct hisi_sas_phy *phy =
777 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
778 struct hisi_hba *hisi_hba = phy->hisi_hba;
779 struct asd_sas_phy *sas_phy = &phy->sas_phy;
780 int phy_no = sas_phy->id;
781
782 hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
783 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
784 }
785
786 static void hisi_sas_linkreset_work(struct work_struct *work)
787 {
788 struct hisi_sas_phy *phy =
789 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
790 struct asd_sas_phy *sas_phy = &phy->sas_phy;
791
792 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
793 }
794
795 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
796 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
797 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
798 };
799
800 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
801 enum hisi_sas_phy_event event)
802 {
803 struct hisi_hba *hisi_hba = phy->hisi_hba;
804
805 if (WARN_ON(event >= HISI_PHYES_NUM))
806 return false;
807
808 return queue_work(hisi_hba->wq, &phy->works[event]);
809 }
810 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
811
812 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
813 {
814 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
815 struct asd_sas_phy *sas_phy = &phy->sas_phy;
816 int i;
817
818 phy->hisi_hba = hisi_hba;
819 phy->port = NULL;
820 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
821 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
822 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
823 sas_phy->class = SAS;
824 sas_phy->iproto = SAS_PROTOCOL_ALL;
825 sas_phy->tproto = 0;
826 sas_phy->type = PHY_TYPE_PHYSICAL;
827 sas_phy->role = PHY_ROLE_INITIATOR;
828 sas_phy->oob_mode = OOB_NOT_CONNECTED;
829 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
830 sas_phy->id = phy_no;
831 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
832 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
833 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
834 sas_phy->lldd_phy = phy;
835
836 for (i = 0; i < HISI_PHYES_NUM; i++)
837 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
838
839 spin_lock_init(&phy->lock);
840 }
841
842 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
843 {
844 struct sas_ha_struct *sas_ha = sas_phy->ha;
845 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
846 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
847 struct asd_sas_port *sas_port = sas_phy->port;
848 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
849 unsigned long flags;
850
851 if (!sas_port)
852 return;
853
854 spin_lock_irqsave(&hisi_hba->lock, flags);
855 port->port_attached = 1;
856 port->id = phy->port_id;
857 phy->port = port;
858 sas_port->lldd_port = port;
859 spin_unlock_irqrestore(&hisi_hba->lock, flags);
860 }
861
862 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
863 struct hisi_sas_slot *slot)
864 {
865 if (task) {
866 unsigned long flags;
867 struct task_status_struct *ts;
868
869 ts = &task->task_status;
870
871 ts->resp = SAS_TASK_COMPLETE;
872 ts->stat = SAS_ABORTED_TASK;
873 spin_lock_irqsave(&task->task_state_lock, flags);
874 task->task_state_flags &=
875 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
876 if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP)
877 task->task_state_flags |= SAS_TASK_STATE_DONE;
878 spin_unlock_irqrestore(&task->task_state_lock, flags);
879 }
880
881 hisi_sas_slot_task_free(hisi_hba, task, slot);
882 }
883
884 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
885 struct domain_device *device)
886 {
887 struct hisi_sas_slot *slot, *slot2;
888 struct hisi_sas_device *sas_dev = device->lldd_dev;
889
890 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
891 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
892 }
893
894 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
895 {
896 struct hisi_sas_device *sas_dev;
897 struct domain_device *device;
898 int i;
899
900 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
901 sas_dev = &hisi_hba->devices[i];
902 device = sas_dev->sas_device;
903
904 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
905 !device)
906 continue;
907
908 hisi_sas_release_task(hisi_hba, device);
909 }
910 }
911 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
912
913 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
914 struct domain_device *device)
915 {
916 if (hisi_hba->hw->dereg_device)
917 hisi_hba->hw->dereg_device(hisi_hba, device);
918 }
919
920 static void hisi_sas_dev_gone(struct domain_device *device)
921 {
922 struct hisi_sas_device *sas_dev = device->lldd_dev;
923 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
924 struct device *dev = hisi_hba->dev;
925
926 dev_info(dev, "dev[%d:%x] is gone\n",
927 sas_dev->device_id, sas_dev->dev_type);
928
929 if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
930 hisi_sas_internal_task_abort(hisi_hba, device,
931 HISI_SAS_INT_ABT_DEV, 0);
932
933 hisi_sas_dereg_device(hisi_hba, device);
934
935 down(&hisi_hba->sem);
936 hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
937 up(&hisi_hba->sem);
938 device->lldd_dev = NULL;
939 }
940
941 if (hisi_hba->hw->free_device)
942 hisi_hba->hw->free_device(sas_dev);
943 sas_dev->dev_type = SAS_PHY_UNUSED;
944 }
945
946 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
947 {
948 return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
949 }
950
951 static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
952 struct sas_phy_linkrates *r)
953 {
954 struct sas_phy_linkrates _r;
955
956 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
957 struct asd_sas_phy *sas_phy = &phy->sas_phy;
958 enum sas_linkrate min, max;
959
960 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
961 max = sas_phy->phy->maximum_linkrate;
962 min = r->minimum_linkrate;
963 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
964 max = r->maximum_linkrate;
965 min = sas_phy->phy->minimum_linkrate;
966 } else
967 return;
968
969 _r.maximum_linkrate = max;
970 _r.minimum_linkrate = min;
971
972 sas_phy->phy->maximum_linkrate = max;
973 sas_phy->phy->minimum_linkrate = min;
974
975 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
976 msleep(100);
977 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
978 hisi_hba->hw->phy_start(hisi_hba, phy_no);
979 }
980
981 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
982 void *funcdata)
983 {
984 struct sas_ha_struct *sas_ha = sas_phy->ha;
985 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
986 int phy_no = sas_phy->id;
987
988 switch (func) {
989 case PHY_FUNC_HARD_RESET:
990 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
991 break;
992
993 case PHY_FUNC_LINK_RESET:
994 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
995 msleep(100);
996 hisi_hba->hw->phy_start(hisi_hba, phy_no);
997 break;
998
999 case PHY_FUNC_DISABLE:
1000 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
1001 break;
1002
1003 case PHY_FUNC_SET_LINK_RATE:
1004 hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
1005 break;
1006 case PHY_FUNC_GET_EVENTS:
1007 if (hisi_hba->hw->get_events) {
1008 hisi_hba->hw->get_events(hisi_hba, phy_no);
1009 break;
1010 }
1011 /* fallthru */
1012 case PHY_FUNC_RELEASE_SPINUP_HOLD:
1013 default:
1014 return -EOPNOTSUPP;
1015 }
1016 return 0;
1017 }
1018
1019 static void hisi_sas_task_done(struct sas_task *task)
1020 {
1021 del_timer(&task->slow_task->timer);
1022 complete(&task->slow_task->completion);
1023 }
1024
1025 static void hisi_sas_tmf_timedout(struct timer_list *t)
1026 {
1027 struct sas_task_slow *slow = from_timer(slow, t, timer);
1028 struct sas_task *task = slow->task;
1029 unsigned long flags;
1030 bool is_completed = true;
1031
1032 spin_lock_irqsave(&task->task_state_lock, flags);
1033 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1034 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1035 is_completed = false;
1036 }
1037 spin_unlock_irqrestore(&task->task_state_lock, flags);
1038
1039 if (!is_completed)
1040 complete(&task->slow_task->completion);
1041 }
1042
1043 #define TASK_TIMEOUT 20
1044 #define TASK_RETRY 3
1045 #define INTERNAL_ABORT_TIMEOUT 6
1046 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
1047 void *parameter, u32 para_len,
1048 struct hisi_sas_tmf_task *tmf)
1049 {
1050 struct hisi_sas_device *sas_dev = device->lldd_dev;
1051 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
1052 struct device *dev = hisi_hba->dev;
1053 struct sas_task *task;
1054 int res, retry;
1055
1056 for (retry = 0; retry < TASK_RETRY; retry++) {
1057 task = sas_alloc_slow_task(GFP_KERNEL);
1058 if (!task)
1059 return -ENOMEM;
1060
1061 task->dev = device;
1062 task->task_proto = device->tproto;
1063
1064 if (dev_is_sata(device)) {
1065 task->ata_task.device_control_reg_update = 1;
1066 memcpy(&task->ata_task.fis, parameter, para_len);
1067 } else {
1068 memcpy(&task->ssp_task, parameter, para_len);
1069 }
1070 task->task_done = hisi_sas_task_done;
1071
1072 task->slow_task->timer.function = hisi_sas_tmf_timedout;
1073 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
1074 add_timer(&task->slow_task->timer);
1075
1076 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
1077
1078 if (res) {
1079 del_timer(&task->slow_task->timer);
1080 dev_err(dev, "abort tmf: executing internal task failed: %d\n",
1081 res);
1082 goto ex_err;
1083 }
1084
1085 wait_for_completion(&task->slow_task->completion);
1086 res = TMF_RESP_FUNC_FAILED;
1087 /* Even TMF timed out, return direct. */
1088 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1089 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1090 struct hisi_sas_slot *slot = task->lldd_task;
1091
1092 dev_err(dev, "abort tmf: TMF task timeout and not done\n");
1093 if (slot) {
1094 struct hisi_sas_cq *cq =
1095 &hisi_hba->cq[slot->dlvry_queue];
1096 /*
1097 * flush tasklet to avoid free'ing task
1098 * before using task in IO completion
1099 */
1100 tasklet_kill(&cq->tasklet);
1101 slot->task = NULL;
1102 }
1103
1104 goto ex_err;
1105 } else
1106 dev_err(dev, "abort tmf: TMF task timeout\n");
1107 }
1108
1109 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1110 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1111 res = TMF_RESP_FUNC_COMPLETE;
1112 break;
1113 }
1114
1115 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1116 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1117 res = TMF_RESP_FUNC_SUCC;
1118 break;
1119 }
1120
1121 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1122 task->task_status.stat == SAS_DATA_UNDERRUN) {
1123 /* no error, but return the number of bytes of
1124 * underrun
1125 */
1126 dev_warn(dev, "abort tmf: task to dev %016llx "
1127 "resp: 0x%x sts 0x%x underrun\n",
1128 SAS_ADDR(device->sas_addr),
1129 task->task_status.resp,
1130 task->task_status.stat);
1131 res = task->task_status.residual;
1132 break;
1133 }
1134
1135 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1136 task->task_status.stat == SAS_DATA_OVERRUN) {
1137 dev_warn(dev, "abort tmf: blocked task error\n");
1138 res = -EMSGSIZE;
1139 break;
1140 }
1141
1142 dev_warn(dev, "abort tmf: task to dev "
1143 "%016llx resp: 0x%x status 0x%x\n",
1144 SAS_ADDR(device->sas_addr), task->task_status.resp,
1145 task->task_status.stat);
1146 sas_free_task(task);
1147 task = NULL;
1148 }
1149 ex_err:
1150 if (retry == TASK_RETRY)
1151 dev_warn(dev, "abort tmf: executing internal task failed!\n");
1152 sas_free_task(task);
1153 return res;
1154 }
1155
1156 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
1157 bool reset, int pmp, u8 *fis)
1158 {
1159 struct ata_taskfile tf;
1160
1161 ata_tf_init(dev, &tf);
1162 if (reset)
1163 tf.ctl |= ATA_SRST;
1164 else
1165 tf.ctl &= ~ATA_SRST;
1166 tf.command = ATA_CMD_DEV_RESET;
1167 ata_tf_to_fis(&tf, pmp, 0, fis);
1168 }
1169
1170 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
1171 {
1172 u8 fis[20] = {0};
1173 struct ata_port *ap = device->sata_dev.ap;
1174 struct ata_link *link;
1175 int rc = TMF_RESP_FUNC_FAILED;
1176 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1177 struct device *dev = hisi_hba->dev;
1178 int s = sizeof(struct host_to_dev_fis);
1179
1180 ata_for_each_link(link, ap, EDGE) {
1181 int pmp = sata_srst_pmp(link);
1182
1183 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1184 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
1185 if (rc != TMF_RESP_FUNC_COMPLETE)
1186 break;
1187 }
1188
1189 if (rc == TMF_RESP_FUNC_COMPLETE) {
1190 ata_for_each_link(link, ap, EDGE) {
1191 int pmp = sata_srst_pmp(link);
1192
1193 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1194 rc = hisi_sas_exec_internal_tmf_task(device, fis,
1195 s, NULL);
1196 if (rc != TMF_RESP_FUNC_COMPLETE)
1197 dev_err(dev, "ata disk de-reset failed\n");
1198 }
1199 } else {
1200 dev_err(dev, "ata disk reset failed\n");
1201 }
1202
1203 if (rc == TMF_RESP_FUNC_COMPLETE)
1204 hisi_sas_release_task(hisi_hba, device);
1205
1206 return rc;
1207 }
1208
1209 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
1210 u8 *lun, struct hisi_sas_tmf_task *tmf)
1211 {
1212 struct sas_ssp_task ssp_task;
1213
1214 if (!(device->tproto & SAS_PROTOCOL_SSP))
1215 return TMF_RESP_FUNC_ESUPP;
1216
1217 memcpy(ssp_task.LUN, lun, 8);
1218
1219 return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
1220 sizeof(ssp_task), tmf);
1221 }
1222
1223 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1224 {
1225 u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1226 int i;
1227
1228 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1229 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1230 struct domain_device *device = sas_dev->sas_device;
1231 struct asd_sas_port *sas_port;
1232 struct hisi_sas_port *port;
1233 struct hisi_sas_phy *phy = NULL;
1234 struct asd_sas_phy *sas_phy;
1235
1236 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1237 || !device || !device->port)
1238 continue;
1239
1240 sas_port = device->port;
1241 port = to_hisi_sas_port(sas_port);
1242
1243 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1244 if (state & BIT(sas_phy->id)) {
1245 phy = sas_phy->lldd_phy;
1246 break;
1247 }
1248
1249 if (phy) {
1250 port->id = phy->port_id;
1251
1252 /* Update linkrate of directly attached device. */
1253 if (!device->parent)
1254 device->linkrate = phy->sas_phy.linkrate;
1255
1256 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1257 } else
1258 port->id = 0xff;
1259 }
1260 }
1261
1262 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1263 u32 state)
1264 {
1265 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1266 struct asd_sas_port *_sas_port = NULL;
1267 int phy_no;
1268
1269 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1270 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1271 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1272 struct asd_sas_port *sas_port = sas_phy->port;
1273 bool do_port_check = !!(_sas_port != sas_port);
1274
1275 if (!sas_phy->phy->enabled)
1276 continue;
1277
1278 /* Report PHY state change to libsas */
1279 if (state & BIT(phy_no)) {
1280 if (do_port_check && sas_port && sas_port->port_dev) {
1281 struct domain_device *dev = sas_port->port_dev;
1282
1283 _sas_port = sas_port;
1284
1285 if (DEV_IS_EXPANDER(dev->dev_type))
1286 sas_ha->notify_port_event(sas_phy,
1287 PORTE_BROADCAST_RCVD);
1288 }
1289 } else if (old_state & (1 << phy_no))
1290 /* PHY down but was up before */
1291 hisi_sas_phy_down(hisi_hba, phy_no, 0);
1292
1293 }
1294 }
1295
1296 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba)
1297 {
1298 struct hisi_sas_device *sas_dev;
1299 struct domain_device *device;
1300 int i;
1301
1302 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1303 sas_dev = &hisi_hba->devices[i];
1304 device = sas_dev->sas_device;
1305
1306 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1307 continue;
1308
1309 hisi_sas_init_device(device);
1310 }
1311 }
1312
1313 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
1314 struct asd_sas_port *sas_port,
1315 struct domain_device *device)
1316 {
1317 struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 };
1318 struct ata_port *ap = device->sata_dev.ap;
1319 struct device *dev = hisi_hba->dev;
1320 int s = sizeof(struct host_to_dev_fis);
1321 int rc = TMF_RESP_FUNC_FAILED;
1322 struct asd_sas_phy *sas_phy;
1323 struct ata_link *link;
1324 u8 fis[20] = {0};
1325 u32 state;
1326
1327 state = hisi_hba->hw->get_phys_state(hisi_hba);
1328 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) {
1329 if (!(state & BIT(sas_phy->id)))
1330 continue;
1331
1332 ata_for_each_link(link, ap, EDGE) {
1333 int pmp = sata_srst_pmp(link);
1334
1335 tmf_task.phy_id = sas_phy->id;
1336 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1337 rc = hisi_sas_exec_internal_tmf_task(device, fis, s,
1338 &tmf_task);
1339 if (rc != TMF_RESP_FUNC_COMPLETE) {
1340 dev_err(dev, "phy%d ata reset failed rc=%d\n",
1341 sas_phy->id, rc);
1342 break;
1343 }
1344 }
1345 }
1346 }
1347
1348 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
1349 {
1350 struct device *dev = hisi_hba->dev;
1351 int port_no, rc, i;
1352
1353 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1354 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1355 struct domain_device *device = sas_dev->sas_device;
1356
1357 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1358 continue;
1359
1360 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1361 HISI_SAS_INT_ABT_DEV, 0);
1362 if (rc < 0)
1363 dev_err(dev, "STP reject: abort dev failed %d\n", rc);
1364 }
1365
1366 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) {
1367 struct hisi_sas_port *port = &hisi_hba->port[port_no];
1368 struct asd_sas_port *sas_port = &port->sas_port;
1369 struct domain_device *port_dev = sas_port->port_dev;
1370 struct domain_device *device;
1371
1372 if (!port_dev || !DEV_IS_EXPANDER(port_dev->dev_type))
1373 continue;
1374
1375 /* Try to find a SATA device */
1376 list_for_each_entry(device, &sas_port->dev_list,
1377 dev_list_node) {
1378 if (dev_is_sata(device)) {
1379 hisi_sas_send_ata_reset_each_phy(hisi_hba,
1380 sas_port,
1381 device);
1382 break;
1383 }
1384 }
1385 }
1386 }
1387
1388 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
1389 {
1390 struct Scsi_Host *shost = hisi_hba->shost;
1391
1392 down(&hisi_hba->sem);
1393 hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
1394
1395 scsi_block_requests(shost);
1396 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
1397
1398 if (timer_pending(&hisi_hba->timer))
1399 del_timer_sync(&hisi_hba->timer);
1400
1401 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1402 }
1403 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare);
1404
1405 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
1406 {
1407 struct Scsi_Host *shost = hisi_hba->shost;
1408 u32 state;
1409
1410 /* Init and wait for PHYs to come up and all libsas event finished. */
1411 hisi_hba->hw->phys_init(hisi_hba);
1412 msleep(1000);
1413 hisi_sas_refresh_port_id(hisi_hba);
1414 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1415 up(&hisi_hba->sem);
1416
1417 if (hisi_hba->reject_stp_links_msk)
1418 hisi_sas_terminate_stp_reject(hisi_hba);
1419 hisi_sas_reset_init_all_devices(hisi_hba);
1420 scsi_unblock_requests(shost);
1421 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1422
1423 state = hisi_hba->hw->get_phys_state(hisi_hba);
1424 hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state, state);
1425 }
1426 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
1427
1428 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1429 {
1430 struct device *dev = hisi_hba->dev;
1431 struct Scsi_Host *shost = hisi_hba->shost;
1432 int rc;
1433
1434 if (!hisi_hba->hw->soft_reset)
1435 return -1;
1436
1437 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1438 return -1;
1439
1440 dev_info(dev, "controller resetting...\n");
1441 hisi_sas_controller_reset_prepare(hisi_hba);
1442
1443 rc = hisi_hba->hw->soft_reset(hisi_hba);
1444 if (rc) {
1445 dev_warn(dev, "controller reset failed (%d)\n", rc);
1446 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1447 up(&hisi_hba->sem);
1448 scsi_unblock_requests(shost);
1449 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1450 return rc;
1451 }
1452
1453 hisi_sas_controller_reset_done(hisi_hba);
1454 dev_info(dev, "controller reset complete\n");
1455
1456 return 0;
1457 }
1458
1459 static int hisi_sas_abort_task(struct sas_task *task)
1460 {
1461 struct scsi_lun lun;
1462 struct hisi_sas_tmf_task tmf_task;
1463 struct domain_device *device = task->dev;
1464 struct hisi_sas_device *sas_dev = device->lldd_dev;
1465 struct hisi_hba *hisi_hba;
1466 struct device *dev;
1467 int rc = TMF_RESP_FUNC_FAILED;
1468 unsigned long flags;
1469
1470 if (!sas_dev)
1471 return TMF_RESP_FUNC_FAILED;
1472
1473 hisi_hba = dev_to_hisi_hba(task->dev);
1474 dev = hisi_hba->dev;
1475
1476 spin_lock_irqsave(&task->task_state_lock, flags);
1477 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1478 struct hisi_sas_slot *slot = task->lldd_task;
1479 struct hisi_sas_cq *cq;
1480
1481 if (slot) {
1482 /*
1483 * flush tasklet to avoid free'ing task
1484 * before using task in IO completion
1485 */
1486 cq = &hisi_hba->cq[slot->dlvry_queue];
1487 tasklet_kill(&cq->tasklet);
1488 }
1489 spin_unlock_irqrestore(&task->task_state_lock, flags);
1490 rc = TMF_RESP_FUNC_COMPLETE;
1491 goto out;
1492 }
1493 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1494 spin_unlock_irqrestore(&task->task_state_lock, flags);
1495
1496 sas_dev->dev_status = HISI_SAS_DEV_EH;
1497 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1498 struct scsi_cmnd *cmnd = task->uldd_task;
1499 struct hisi_sas_slot *slot = task->lldd_task;
1500 u16 tag = slot->idx;
1501 int rc2;
1502
1503 int_to_scsilun(cmnd->device->lun, &lun);
1504 tmf_task.tmf = TMF_ABORT_TASK;
1505 tmf_task.tag_of_task_to_be_managed = tag;
1506
1507 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1508 &tmf_task);
1509
1510 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1511 HISI_SAS_INT_ABT_CMD, tag);
1512 if (rc2 < 0) {
1513 dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1514 return TMF_RESP_FUNC_FAILED;
1515 }
1516
1517 /*
1518 * If the TMF finds that the IO is not in the device and also
1519 * the internal abort does not succeed, then it is safe to
1520 * free the slot.
1521 * Note: if the internal abort succeeds then the slot
1522 * will have already been completed
1523 */
1524 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1525 if (task->lldd_task)
1526 hisi_sas_do_release_task(hisi_hba, task, slot);
1527 }
1528 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1529 task->task_proto & SAS_PROTOCOL_STP) {
1530 if (task->dev->dev_type == SAS_SATA_DEV) {
1531 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1532 HISI_SAS_INT_ABT_DEV, 0);
1533 if (rc < 0) {
1534 dev_err(dev, "abort task: internal abort failed\n");
1535 goto out;
1536 }
1537 hisi_sas_dereg_device(hisi_hba, device);
1538 rc = hisi_sas_softreset_ata_disk(device);
1539 }
1540 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1541 /* SMP */
1542 struct hisi_sas_slot *slot = task->lldd_task;
1543 u32 tag = slot->idx;
1544 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
1545
1546 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1547 HISI_SAS_INT_ABT_CMD, tag);
1548 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1549 task->lldd_task) {
1550 /*
1551 * flush tasklet to avoid free'ing task
1552 * before using task in IO completion
1553 */
1554 tasklet_kill(&cq->tasklet);
1555 slot->task = NULL;
1556 }
1557 }
1558
1559 out:
1560 if (rc != TMF_RESP_FUNC_COMPLETE)
1561 dev_notice(dev, "abort task: rc=%d\n", rc);
1562 return rc;
1563 }
1564
1565 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1566 {
1567 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1568 struct device *dev = hisi_hba->dev;
1569 struct hisi_sas_tmf_task tmf_task;
1570 int rc = TMF_RESP_FUNC_FAILED;
1571
1572 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1573 HISI_SAS_INT_ABT_DEV, 0);
1574 if (rc < 0) {
1575 dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1576 return TMF_RESP_FUNC_FAILED;
1577 }
1578 hisi_sas_dereg_device(hisi_hba, device);
1579
1580 tmf_task.tmf = TMF_ABORT_TASK_SET;
1581 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1582
1583 if (rc == TMF_RESP_FUNC_COMPLETE)
1584 hisi_sas_release_task(hisi_hba, device);
1585
1586 return rc;
1587 }
1588
1589 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1590 {
1591 int rc = TMF_RESP_FUNC_FAILED;
1592 struct hisi_sas_tmf_task tmf_task;
1593
1594 tmf_task.tmf = TMF_CLEAR_ACA;
1595 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1596
1597 return rc;
1598 }
1599
1600 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1601 {
1602 struct sas_phy *local_phy = sas_get_local_phy(device);
1603 int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1604 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1605 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1606 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1607 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number];
1608 struct hisi_sas_phy *phy = container_of(sas_phy,
1609 struct hisi_sas_phy, sas_phy);
1610 DECLARE_COMPLETION_ONSTACK(phyreset);
1611
1612 if (scsi_is_sas_phy_local(local_phy)) {
1613 phy->in_reset = 1;
1614 phy->reset_completion = &phyreset;
1615 }
1616
1617 rc = sas_phy_reset(local_phy, reset_type);
1618 sas_put_local_phy(local_phy);
1619
1620 if (scsi_is_sas_phy_local(local_phy)) {
1621 int ret = wait_for_completion_timeout(&phyreset, 2 * HZ);
1622 unsigned long flags;
1623
1624 spin_lock_irqsave(&phy->lock, flags);
1625 phy->reset_completion = NULL;
1626 phy->in_reset = 0;
1627 spin_unlock_irqrestore(&phy->lock, flags);
1628
1629 /* report PHY down if timed out */
1630 if (!ret)
1631 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0);
1632 } else
1633 msleep(2000);
1634
1635 return rc;
1636 }
1637
1638 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1639 {
1640 struct hisi_sas_device *sas_dev = device->lldd_dev;
1641 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1642 struct device *dev = hisi_hba->dev;
1643 int rc = TMF_RESP_FUNC_FAILED;
1644
1645 if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1646 return TMF_RESP_FUNC_FAILED;
1647 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1648
1649 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1650 HISI_SAS_INT_ABT_DEV, 0);
1651 if (rc < 0) {
1652 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1653 return TMF_RESP_FUNC_FAILED;
1654 }
1655 hisi_sas_dereg_device(hisi_hba, device);
1656
1657 rc = hisi_sas_debug_I_T_nexus_reset(device);
1658
1659 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
1660 hisi_sas_release_task(hisi_hba, device);
1661
1662 return rc;
1663 }
1664
1665 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1666 {
1667 struct hisi_sas_device *sas_dev = device->lldd_dev;
1668 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1669 struct device *dev = hisi_hba->dev;
1670 int rc = TMF_RESP_FUNC_FAILED;
1671
1672 sas_dev->dev_status = HISI_SAS_DEV_EH;
1673 if (dev_is_sata(device)) {
1674 struct sas_phy *phy;
1675
1676 /* Clear internal IO and then hardreset */
1677 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1678 HISI_SAS_INT_ABT_DEV, 0);
1679 if (rc < 0) {
1680 dev_err(dev, "lu_reset: internal abort failed\n");
1681 goto out;
1682 }
1683 hisi_sas_dereg_device(hisi_hba, device);
1684
1685 phy = sas_get_local_phy(device);
1686
1687 rc = sas_phy_reset(phy, 1);
1688
1689 if (rc == 0)
1690 hisi_sas_release_task(hisi_hba, device);
1691 sas_put_local_phy(phy);
1692 } else {
1693 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
1694
1695 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1696 HISI_SAS_INT_ABT_DEV, 0);
1697 if (rc < 0) {
1698 dev_err(dev, "lu_reset: internal abort failed\n");
1699 goto out;
1700 }
1701 hisi_sas_dereg_device(hisi_hba, device);
1702
1703 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1704 if (rc == TMF_RESP_FUNC_COMPLETE)
1705 hisi_sas_release_task(hisi_hba, device);
1706 }
1707 out:
1708 if (rc != TMF_RESP_FUNC_COMPLETE)
1709 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1710 sas_dev->device_id, rc);
1711 return rc;
1712 }
1713
1714 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1715 {
1716 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1717 struct device *dev = hisi_hba->dev;
1718 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1719 int rc, i;
1720
1721 queue_work(hisi_hba->wq, &r.work);
1722 wait_for_completion(r.completion);
1723 if (!r.done)
1724 return TMF_RESP_FUNC_FAILED;
1725
1726 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1727 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1728 struct domain_device *device = sas_dev->sas_device;
1729
1730 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device ||
1731 DEV_IS_EXPANDER(device->dev_type))
1732 continue;
1733
1734 rc = hisi_sas_debug_I_T_nexus_reset(device);
1735 if (rc != TMF_RESP_FUNC_COMPLETE)
1736 dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n",
1737 sas_dev->device_id, rc);
1738 }
1739
1740 hisi_sas_release_tasks(hisi_hba);
1741
1742 return TMF_RESP_FUNC_COMPLETE;
1743 }
1744
1745 static int hisi_sas_query_task(struct sas_task *task)
1746 {
1747 struct scsi_lun lun;
1748 struct hisi_sas_tmf_task tmf_task;
1749 int rc = TMF_RESP_FUNC_FAILED;
1750
1751 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1752 struct scsi_cmnd *cmnd = task->uldd_task;
1753 struct domain_device *device = task->dev;
1754 struct hisi_sas_slot *slot = task->lldd_task;
1755 u32 tag = slot->idx;
1756
1757 int_to_scsilun(cmnd->device->lun, &lun);
1758 tmf_task.tmf = TMF_QUERY_TASK;
1759 tmf_task.tag_of_task_to_be_managed = tag;
1760
1761 rc = hisi_sas_debug_issue_ssp_tmf(device,
1762 lun.scsi_lun,
1763 &tmf_task);
1764 switch (rc) {
1765 /* The task is still in Lun, release it then */
1766 case TMF_RESP_FUNC_SUCC:
1767 /* The task is not in Lun or failed, reset the phy */
1768 case TMF_RESP_FUNC_FAILED:
1769 case TMF_RESP_FUNC_COMPLETE:
1770 break;
1771 default:
1772 rc = TMF_RESP_FUNC_FAILED;
1773 break;
1774 }
1775 }
1776 return rc;
1777 }
1778
1779 static int
1780 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1781 struct sas_task *task, int abort_flag,
1782 int task_tag)
1783 {
1784 struct domain_device *device = task->dev;
1785 struct hisi_sas_device *sas_dev = device->lldd_dev;
1786 struct device *dev = hisi_hba->dev;
1787 struct hisi_sas_port *port;
1788 struct hisi_sas_slot *slot;
1789 struct asd_sas_port *sas_port = device->port;
1790 struct hisi_sas_cmd_hdr *cmd_hdr_base;
1791 struct hisi_sas_dq *dq = sas_dev->dq;
1792 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1793 unsigned long flags, flags_dq = 0;
1794 int wr_q_index;
1795
1796 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1797 return -EINVAL;
1798
1799 if (!device->port)
1800 return -1;
1801
1802 port = to_hisi_sas_port(sas_port);
1803
1804 /* simply get a slot and send abort command */
1805 rc = hisi_sas_slot_index_alloc(hisi_hba, NULL);
1806 if (rc < 0)
1807 goto err_out;
1808
1809 slot_idx = rc;
1810 slot = &hisi_hba->slot_info[slot_idx];
1811
1812 spin_lock_irqsave(&dq->lock, flags_dq);
1813 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1814 if (wr_q_index < 0) {
1815 spin_unlock_irqrestore(&dq->lock, flags_dq);
1816 rc = -EAGAIN;
1817 goto err_out_tag;
1818 }
1819 list_add_tail(&slot->delivery, &dq->list);
1820 spin_unlock_irqrestore(&dq->lock, flags_dq);
1821
1822 dlvry_queue = dq->id;
1823 dlvry_queue_slot = wr_q_index;
1824
1825 slot->n_elem = n_elem;
1826 slot->dlvry_queue = dlvry_queue;
1827 slot->dlvry_queue_slot = dlvry_queue_slot;
1828 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1829 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1830 slot->task = task;
1831 slot->port = port;
1832 slot->is_internal = true;
1833 task->lldd_task = slot;
1834
1835 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1836 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1837 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
1838
1839 hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1840 abort_flag, task_tag);
1841
1842 spin_lock_irqsave(&task->task_state_lock, flags);
1843 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1844 spin_unlock_irqrestore(&task->task_state_lock, flags);
1845 WRITE_ONCE(slot->ready, 1);
1846 /* send abort command to the chip */
1847 spin_lock_irqsave(&dq->lock, flags);
1848 list_add_tail(&slot->entry, &sas_dev->list);
1849 hisi_hba->hw->start_delivery(dq);
1850 spin_unlock_irqrestore(&dq->lock, flags);
1851
1852 return 0;
1853
1854 err_out_tag:
1855 hisi_sas_slot_index_free(hisi_hba, slot_idx);
1856 err_out:
1857 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1858
1859 return rc;
1860 }
1861
1862 /**
1863 * hisi_sas_internal_task_abort -- execute an internal
1864 * abort command for single IO command or a device
1865 * @hisi_hba: host controller struct
1866 * @device: domain device
1867 * @abort_flag: mode of operation, device or single IO
1868 * @tag: tag of IO to be aborted (only relevant to single
1869 * IO mode)
1870 */
1871 static int
1872 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1873 struct domain_device *device,
1874 int abort_flag, int tag)
1875 {
1876 struct sas_task *task;
1877 struct hisi_sas_device *sas_dev = device->lldd_dev;
1878 struct device *dev = hisi_hba->dev;
1879 int res;
1880
1881 /*
1882 * The interface is not realized means this HW don't support internal
1883 * abort, or don't need to do internal abort. Then here, we return
1884 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
1885 * the internal abort has been executed and returned CQ.
1886 */
1887 if (!hisi_hba->hw->prep_abort)
1888 return TMF_RESP_FUNC_FAILED;
1889
1890 task = sas_alloc_slow_task(GFP_KERNEL);
1891 if (!task)
1892 return -ENOMEM;
1893
1894 task->dev = device;
1895 task->task_proto = device->tproto;
1896 task->task_done = hisi_sas_task_done;
1897 task->slow_task->timer.function = hisi_sas_tmf_timedout;
1898 task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT*HZ;
1899 add_timer(&task->slow_task->timer);
1900
1901 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1902 task, abort_flag, tag);
1903 if (res) {
1904 del_timer(&task->slow_task->timer);
1905 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1906 res);
1907 goto exit;
1908 }
1909 wait_for_completion(&task->slow_task->completion);
1910 res = TMF_RESP_FUNC_FAILED;
1911
1912 /* Internal abort timed out */
1913 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1914 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1915 struct hisi_sas_slot *slot = task->lldd_task;
1916
1917 if (slot) {
1918 struct hisi_sas_cq *cq =
1919 &hisi_hba->cq[slot->dlvry_queue];
1920 /*
1921 * flush tasklet to avoid free'ing task
1922 * before using task in IO completion
1923 */
1924 tasklet_kill(&cq->tasklet);
1925 slot->task = NULL;
1926 }
1927 dev_err(dev, "internal task abort: timeout and not done.\n");
1928 res = -EIO;
1929 goto exit;
1930 } else
1931 dev_err(dev, "internal task abort: timeout.\n");
1932 }
1933
1934 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1935 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1936 res = TMF_RESP_FUNC_COMPLETE;
1937 goto exit;
1938 }
1939
1940 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1941 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1942 res = TMF_RESP_FUNC_SUCC;
1943 goto exit;
1944 }
1945
1946 exit:
1947 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
1948 "resp: 0x%x sts 0x%x\n",
1949 SAS_ADDR(device->sas_addr),
1950 task,
1951 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1952 task->task_status.stat);
1953 sas_free_task(task);
1954
1955 return res;
1956 }
1957
1958 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1959 {
1960 hisi_sas_port_notify_formed(sas_phy);
1961 }
1962
1963 static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
1964 {
1965 }
1966
1967 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
1968 u8 reg_index, u8 reg_count, u8 *write_data)
1969 {
1970 struct hisi_hba *hisi_hba = sha->lldd_ha;
1971
1972 if (!hisi_hba->hw->write_gpio)
1973 return -EOPNOTSUPP;
1974
1975 return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
1976 reg_index, reg_count, write_data);
1977 }
1978
1979 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1980 {
1981 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1982 struct sas_phy *sphy = sas_phy->phy;
1983 struct sas_phy_data *d = sphy->hostdata;
1984
1985 phy->phy_attached = 0;
1986 phy->phy_type = 0;
1987 phy->port = NULL;
1988
1989 if (d->enable)
1990 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
1991 else
1992 sphy->negotiated_linkrate = SAS_PHY_DISABLED;
1993 }
1994
1995 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1996 {
1997 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1998 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1999 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
2000 struct device *dev = hisi_hba->dev;
2001
2002 if (rdy) {
2003 /* Phy down but ready */
2004 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
2005 hisi_sas_port_notify_formed(sas_phy);
2006 } else {
2007 struct hisi_sas_port *port = phy->port;
2008
2009 if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) ||
2010 phy->in_reset) {
2011 dev_info(dev, "ignore flutter phy%d down\n", phy_no);
2012 return;
2013 }
2014 /* Phy down and not ready */
2015 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
2016 sas_phy_disconnected(sas_phy);
2017
2018 if (port) {
2019 if (phy->phy_type & PORT_TYPE_SAS) {
2020 int port_id = port->id;
2021
2022 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
2023 port_id))
2024 port->port_attached = 0;
2025 } else if (phy->phy_type & PORT_TYPE_SATA)
2026 port->port_attached = 0;
2027 }
2028 hisi_sas_phy_disconnected(phy);
2029 }
2030 }
2031 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
2032
2033 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
2034 {
2035 int i;
2036
2037 for (i = 0; i < hisi_hba->queue_count; i++) {
2038 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2039
2040 tasklet_kill(&cq->tasklet);
2041 }
2042 }
2043 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
2044
2045 struct scsi_transport_template *hisi_sas_stt;
2046 EXPORT_SYMBOL_GPL(hisi_sas_stt);
2047
2048 static struct sas_domain_function_template hisi_sas_transport_ops = {
2049 .lldd_dev_found = hisi_sas_dev_found,
2050 .lldd_dev_gone = hisi_sas_dev_gone,
2051 .lldd_execute_task = hisi_sas_queue_command,
2052 .lldd_control_phy = hisi_sas_control_phy,
2053 .lldd_abort_task = hisi_sas_abort_task,
2054 .lldd_abort_task_set = hisi_sas_abort_task_set,
2055 .lldd_clear_aca = hisi_sas_clear_aca,
2056 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
2057 .lldd_lu_reset = hisi_sas_lu_reset,
2058 .lldd_query_task = hisi_sas_query_task,
2059 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
2060 .lldd_port_formed = hisi_sas_port_formed,
2061 .lldd_port_deformed = hisi_sas_port_deformed,
2062 .lldd_write_gpio = hisi_sas_write_gpio,
2063 };
2064
2065 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
2066 {
2067 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
2068
2069 for (i = 0; i < hisi_hba->queue_count; i++) {
2070 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2071 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2072
2073 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
2074 memset(hisi_hba->cmd_hdr[i], 0, s);
2075 dq->wr_point = 0;
2076
2077 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2078 memset(hisi_hba->complete_hdr[i], 0, s);
2079 cq->rd_point = 0;
2080 }
2081
2082 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
2083 memset(hisi_hba->initial_fis, 0, s);
2084
2085 s = max_command_entries * sizeof(struct hisi_sas_iost);
2086 memset(hisi_hba->iost, 0, s);
2087
2088 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2089 memset(hisi_hba->breakpoint, 0, s);
2090
2091 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
2092 memset(hisi_hba->sata_breakpoint, 0, s);
2093 }
2094 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
2095
2096 int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
2097 {
2098 struct device *dev = hisi_hba->dev;
2099 int i, j, s, max_command_entries = hisi_hba->hw->max_command_entries;
2100 int max_command_entries_ru, sz_slot_buf_ru;
2101 int blk_cnt, slots_per_blk;
2102
2103 sema_init(&hisi_hba->sem, 1);
2104 spin_lock_init(&hisi_hba->lock);
2105 for (i = 0; i < hisi_hba->n_phy; i++) {
2106 hisi_sas_phy_init(hisi_hba, i);
2107 hisi_hba->port[i].port_attached = 0;
2108 hisi_hba->port[i].id = -1;
2109 }
2110
2111 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
2112 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
2113 hisi_hba->devices[i].device_id = i;
2114 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
2115 }
2116
2117 for (i = 0; i < hisi_hba->queue_count; i++) {
2118 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2119 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2120
2121 /* Completion queue structure */
2122 cq->id = i;
2123 cq->hisi_hba = hisi_hba;
2124
2125 /* Delivery queue structure */
2126 spin_lock_init(&dq->lock);
2127 INIT_LIST_HEAD(&dq->list);
2128 dq->id = i;
2129 dq->hisi_hba = hisi_hba;
2130
2131 /* Delivery queue */
2132 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
2133 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s,
2134 &hisi_hba->cmd_hdr_dma[i],
2135 GFP_KERNEL);
2136 if (!hisi_hba->cmd_hdr[i])
2137 goto err_out;
2138
2139 /* Completion queue */
2140 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2141 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s,
2142 &hisi_hba->complete_hdr_dma[i],
2143 GFP_KERNEL);
2144 if (!hisi_hba->complete_hdr[i])
2145 goto err_out;
2146 }
2147
2148 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
2149 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma,
2150 GFP_KERNEL);
2151 if (!hisi_hba->itct)
2152 goto err_out;
2153 memset(hisi_hba->itct, 0, s);
2154
2155 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
2156 sizeof(struct hisi_sas_slot),
2157 GFP_KERNEL);
2158 if (!hisi_hba->slot_info)
2159 goto err_out;
2160
2161 /* roundup to avoid overly large block size */
2162 max_command_entries_ru = roundup(max_command_entries, 64);
2163 sz_slot_buf_ru = roundup(sizeof(struct hisi_sas_slot_buf_table), 64);
2164 s = lcm(max_command_entries_ru, sz_slot_buf_ru);
2165 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
2166 slots_per_blk = s / sz_slot_buf_ru;
2167 for (i = 0; i < blk_cnt; i++) {
2168 struct hisi_sas_slot_buf_table *buf;
2169 dma_addr_t buf_dma;
2170 int slot_index = i * slots_per_blk;
2171
2172 buf = dmam_alloc_coherent(dev, s, &buf_dma, GFP_KERNEL);
2173 if (!buf)
2174 goto err_out;
2175 memset(buf, 0, s);
2176
2177 for (j = 0; j < slots_per_blk; j++, slot_index++) {
2178 struct hisi_sas_slot *slot;
2179
2180 slot = &hisi_hba->slot_info[slot_index];
2181 slot->buf = buf;
2182 slot->buf_dma = buf_dma;
2183 slot->idx = slot_index;
2184
2185 buf++;
2186 buf_dma += sizeof(*buf);
2187 }
2188 }
2189
2190 s = max_command_entries * sizeof(struct hisi_sas_iost);
2191 hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma,
2192 GFP_KERNEL);
2193 if (!hisi_hba->iost)
2194 goto err_out;
2195
2196 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2197 hisi_hba->breakpoint = dmam_alloc_coherent(dev, s,
2198 &hisi_hba->breakpoint_dma,
2199 GFP_KERNEL);
2200 if (!hisi_hba->breakpoint)
2201 goto err_out;
2202
2203 hisi_hba->slot_index_count = max_command_entries;
2204 s = hisi_hba->slot_index_count / BITS_PER_BYTE;
2205 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
2206 if (!hisi_hba->slot_index_tags)
2207 goto err_out;
2208
2209 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
2210 hisi_hba->initial_fis = dmam_alloc_coherent(dev, s,
2211 &hisi_hba->initial_fis_dma,
2212 GFP_KERNEL);
2213 if (!hisi_hba->initial_fis)
2214 goto err_out;
2215
2216 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
2217 hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s,
2218 &hisi_hba->sata_breakpoint_dma,
2219 GFP_KERNEL);
2220 if (!hisi_hba->sata_breakpoint)
2221 goto err_out;
2222 hisi_sas_init_mem(hisi_hba);
2223
2224 hisi_sas_slot_index_init(hisi_hba);
2225 hisi_hba->last_slot_index = hisi_hba->hw->max_command_entries -
2226 HISI_SAS_RESERVED_IPTT_CNT;
2227
2228 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
2229 if (!hisi_hba->wq) {
2230 dev_err(dev, "sas_alloc: failed to create workqueue\n");
2231 goto err_out;
2232 }
2233
2234 return 0;
2235 err_out:
2236 return -ENOMEM;
2237 }
2238 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
2239
2240 void hisi_sas_free(struct hisi_hba *hisi_hba)
2241 {
2242 if (hisi_hba->wq)
2243 destroy_workqueue(hisi_hba->wq);
2244 }
2245 EXPORT_SYMBOL_GPL(hisi_sas_free);
2246
2247 void hisi_sas_rst_work_handler(struct work_struct *work)
2248 {
2249 struct hisi_hba *hisi_hba =
2250 container_of(work, struct hisi_hba, rst_work);
2251
2252 hisi_sas_controller_reset(hisi_hba);
2253 }
2254 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
2255
2256 void hisi_sas_sync_rst_work_handler(struct work_struct *work)
2257 {
2258 struct hisi_sas_rst *rst =
2259 container_of(work, struct hisi_sas_rst, work);
2260
2261 if (!hisi_sas_controller_reset(rst->hisi_hba))
2262 rst->done = true;
2263 complete(rst->completion);
2264 }
2265 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
2266
2267 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
2268 {
2269 struct device *dev = hisi_hba->dev;
2270 struct platform_device *pdev = hisi_hba->platform_dev;
2271 struct device_node *np = pdev ? pdev->dev.of_node : NULL;
2272 struct clk *refclk;
2273
2274 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
2275 SAS_ADDR_SIZE)) {
2276 dev_err(dev, "could not get property sas-addr\n");
2277 return -ENOENT;
2278 }
2279
2280 if (np) {
2281 /*
2282 * These properties are only required for platform device-based
2283 * controller with DT firmware.
2284 */
2285 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
2286 "hisilicon,sas-syscon");
2287 if (IS_ERR(hisi_hba->ctrl)) {
2288 dev_err(dev, "could not get syscon\n");
2289 return -ENOENT;
2290 }
2291
2292 if (device_property_read_u32(dev, "ctrl-reset-reg",
2293 &hisi_hba->ctrl_reset_reg)) {
2294 dev_err(dev,
2295 "could not get property ctrl-reset-reg\n");
2296 return -ENOENT;
2297 }
2298
2299 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
2300 &hisi_hba->ctrl_reset_sts_reg)) {
2301 dev_err(dev,
2302 "could not get property ctrl-reset-sts-reg\n");
2303 return -ENOENT;
2304 }
2305
2306 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
2307 &hisi_hba->ctrl_clock_ena_reg)) {
2308 dev_err(dev,
2309 "could not get property ctrl-clock-ena-reg\n");
2310 return -ENOENT;
2311 }
2312 }
2313
2314 refclk = devm_clk_get(dev, NULL);
2315 if (IS_ERR(refclk))
2316 dev_dbg(dev, "no ref clk property\n");
2317 else
2318 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2319
2320 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2321 dev_err(dev, "could not get property phy-count\n");
2322 return -ENOENT;
2323 }
2324
2325 if (device_property_read_u32(dev, "queue-count",
2326 &hisi_hba->queue_count)) {
2327 dev_err(dev, "could not get property queue-count\n");
2328 return -ENOENT;
2329 }
2330
2331 return 0;
2332 }
2333 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2334
2335 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2336 const struct hisi_sas_hw *hw)
2337 {
2338 struct resource *res;
2339 struct Scsi_Host *shost;
2340 struct hisi_hba *hisi_hba;
2341 struct device *dev = &pdev->dev;
2342
2343 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
2344 if (!shost) {
2345 dev_err(dev, "scsi host alloc failed\n");
2346 return NULL;
2347 }
2348 hisi_hba = shost_priv(shost);
2349
2350 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2351 hisi_hba->hw = hw;
2352 hisi_hba->dev = dev;
2353 hisi_hba->platform_dev = pdev;
2354 hisi_hba->shost = shost;
2355 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2356
2357 timer_setup(&hisi_hba->timer, NULL, 0);
2358
2359 if (hisi_sas_get_fw_info(hisi_hba) < 0)
2360 goto err_out;
2361
2362 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
2363 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2364 dev_err(dev, "No usable DMA addressing method\n");
2365 goto err_out;
2366 }
2367
2368 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2369 hisi_hba->regs = devm_ioremap_resource(dev, res);
2370 if (IS_ERR(hisi_hba->regs))
2371 goto err_out;
2372
2373 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2374 if (res) {
2375 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2376 if (IS_ERR(hisi_hba->sgpio_regs))
2377 goto err_out;
2378 }
2379
2380 if (hisi_sas_alloc(hisi_hba, shost)) {
2381 hisi_sas_free(hisi_hba);
2382 goto err_out;
2383 }
2384
2385 return shost;
2386 err_out:
2387 scsi_host_put(shost);
2388 dev_err(dev, "shost alloc failed\n");
2389 return NULL;
2390 }
2391
2392 int hisi_sas_probe(struct platform_device *pdev,
2393 const struct hisi_sas_hw *hw)
2394 {
2395 struct Scsi_Host *shost;
2396 struct hisi_hba *hisi_hba;
2397 struct device *dev = &pdev->dev;
2398 struct asd_sas_phy **arr_phy;
2399 struct asd_sas_port **arr_port;
2400 struct sas_ha_struct *sha;
2401 int rc, phy_nr, port_nr, i;
2402
2403 shost = hisi_sas_shost_alloc(pdev, hw);
2404 if (!shost)
2405 return -ENOMEM;
2406
2407 sha = SHOST_TO_SAS_HA(shost);
2408 hisi_hba = shost_priv(shost);
2409 platform_set_drvdata(pdev, sha);
2410
2411 phy_nr = port_nr = hisi_hba->n_phy;
2412
2413 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2414 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2415 if (!arr_phy || !arr_port) {
2416 rc = -ENOMEM;
2417 goto err_out_ha;
2418 }
2419
2420 sha->sas_phy = arr_phy;
2421 sha->sas_port = arr_port;
2422 sha->lldd_ha = hisi_hba;
2423
2424 shost->transportt = hisi_sas_stt;
2425 shost->max_id = HISI_SAS_MAX_DEVICES;
2426 shost->max_lun = ~0;
2427 shost->max_channel = 1;
2428 shost->max_cmd_len = 16;
2429 if (hisi_hba->hw->slot_index_alloc) {
2430 shost->can_queue = hisi_hba->hw->max_command_entries;
2431 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
2432 } else {
2433 shost->can_queue = hisi_hba->hw->max_command_entries -
2434 HISI_SAS_RESERVED_IPTT_CNT;
2435 shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
2436 HISI_SAS_RESERVED_IPTT_CNT;
2437 }
2438
2439 sha->sas_ha_name = DRV_NAME;
2440 sha->dev = hisi_hba->dev;
2441 sha->lldd_module = THIS_MODULE;
2442 sha->sas_addr = &hisi_hba->sas_addr[0];
2443 sha->num_phys = hisi_hba->n_phy;
2444 sha->core.shost = hisi_hba->shost;
2445
2446 for (i = 0; i < hisi_hba->n_phy; i++) {
2447 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2448 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2449 }
2450
2451 rc = scsi_add_host(shost, &pdev->dev);
2452 if (rc)
2453 goto err_out_ha;
2454
2455 rc = sas_register_ha(sha);
2456 if (rc)
2457 goto err_out_register_ha;
2458
2459 rc = hisi_hba->hw->hw_init(hisi_hba);
2460 if (rc)
2461 goto err_out_register_ha;
2462
2463 scsi_scan_host(shost);
2464
2465 return 0;
2466
2467 err_out_register_ha:
2468 scsi_remove_host(shost);
2469 err_out_ha:
2470 hisi_sas_free(hisi_hba);
2471 scsi_host_put(shost);
2472 return rc;
2473 }
2474 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2475
2476 int hisi_sas_remove(struct platform_device *pdev)
2477 {
2478 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2479 struct hisi_hba *hisi_hba = sha->lldd_ha;
2480 struct Scsi_Host *shost = sha->core.shost;
2481
2482 if (timer_pending(&hisi_hba->timer))
2483 del_timer(&hisi_hba->timer);
2484
2485 sas_unregister_ha(sha);
2486 sas_remove_host(sha->core.shost);
2487
2488 hisi_sas_free(hisi_hba);
2489 scsi_host_put(shost);
2490 return 0;
2491 }
2492 EXPORT_SYMBOL_GPL(hisi_sas_remove);
2493
2494 static __init int hisi_sas_init(void)
2495 {
2496 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2497 if (!hisi_sas_stt)
2498 return -ENOMEM;
2499
2500 return 0;
2501 }
2502
2503 static __exit void hisi_sas_exit(void)
2504 {
2505 sas_release_transport(hisi_sas_stt);
2506 }
2507
2508 module_init(hisi_sas_init);
2509 module_exit(hisi_sas_exit);
2510
2511 MODULE_LICENSE("GPL");
2512 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2513 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2514 MODULE_ALIAS("platform:" DRV_NAME);