]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/hisi_sas/hisi_sas_main.c
scsi: hisi_sas: stop controller timer for reset
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / hisi_sas / hisi_sas_main.c
1 /*
2 * Copyright (c) 2015 Linaro Ltd.
3 * Copyright (c) 2015 Hisilicon Limited.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 */
11
12 #include "hisi_sas.h"
13 #define DRV_NAME "hisi_sas"
14
15 #define DEV_IS_GONE(dev) \
16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
17
18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19 u8 *lun, struct hisi_sas_tmf_task *tmf);
20 static int
21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22 struct domain_device *device,
23 int abort_flag, int tag);
24 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
25 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
26 void *funcdata);
27
28 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
29 {
30 switch (fis->command) {
31 case ATA_CMD_FPDMA_WRITE:
32 case ATA_CMD_FPDMA_READ:
33 case ATA_CMD_FPDMA_RECV:
34 case ATA_CMD_FPDMA_SEND:
35 case ATA_CMD_NCQ_NON_DATA:
36 return HISI_SAS_SATA_PROTOCOL_FPDMA;
37
38 case ATA_CMD_DOWNLOAD_MICRO:
39 case ATA_CMD_ID_ATA:
40 case ATA_CMD_PMP_READ:
41 case ATA_CMD_READ_LOG_EXT:
42 case ATA_CMD_PIO_READ:
43 case ATA_CMD_PIO_READ_EXT:
44 case ATA_CMD_PMP_WRITE:
45 case ATA_CMD_WRITE_LOG_EXT:
46 case ATA_CMD_PIO_WRITE:
47 case ATA_CMD_PIO_WRITE_EXT:
48 return HISI_SAS_SATA_PROTOCOL_PIO;
49
50 case ATA_CMD_DSM:
51 case ATA_CMD_DOWNLOAD_MICRO_DMA:
52 case ATA_CMD_PMP_READ_DMA:
53 case ATA_CMD_PMP_WRITE_DMA:
54 case ATA_CMD_READ:
55 case ATA_CMD_READ_EXT:
56 case ATA_CMD_READ_LOG_DMA_EXT:
57 case ATA_CMD_READ_STREAM_DMA_EXT:
58 case ATA_CMD_TRUSTED_RCV_DMA:
59 case ATA_CMD_TRUSTED_SND_DMA:
60 case ATA_CMD_WRITE:
61 case ATA_CMD_WRITE_EXT:
62 case ATA_CMD_WRITE_FUA_EXT:
63 case ATA_CMD_WRITE_QUEUED:
64 case ATA_CMD_WRITE_LOG_DMA_EXT:
65 case ATA_CMD_WRITE_STREAM_DMA_EXT:
66 case ATA_CMD_ZAC_MGMT_IN:
67 return HISI_SAS_SATA_PROTOCOL_DMA;
68
69 case ATA_CMD_CHK_POWER:
70 case ATA_CMD_DEV_RESET:
71 case ATA_CMD_EDD:
72 case ATA_CMD_FLUSH:
73 case ATA_CMD_FLUSH_EXT:
74 case ATA_CMD_VERIFY:
75 case ATA_CMD_VERIFY_EXT:
76 case ATA_CMD_SET_FEATURES:
77 case ATA_CMD_STANDBY:
78 case ATA_CMD_STANDBYNOW1:
79 case ATA_CMD_ZAC_MGMT_OUT:
80 return HISI_SAS_SATA_PROTOCOL_NONDATA;
81 default:
82 {
83 if (fis->command == ATA_CMD_SET_MAX) {
84 switch (fis->features) {
85 case ATA_SET_MAX_PASSWD:
86 case ATA_SET_MAX_LOCK:
87 return HISI_SAS_SATA_PROTOCOL_PIO;
88
89 case ATA_SET_MAX_PASSWD_DMA:
90 case ATA_SET_MAX_UNLOCK_DMA:
91 return HISI_SAS_SATA_PROTOCOL_DMA;
92
93 default:
94 return HISI_SAS_SATA_PROTOCOL_NONDATA;
95 }
96 }
97 if (direction == DMA_NONE)
98 return HISI_SAS_SATA_PROTOCOL_NONDATA;
99 return HISI_SAS_SATA_PROTOCOL_PIO;
100 }
101 }
102 }
103 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
104
105 void hisi_sas_sata_done(struct sas_task *task,
106 struct hisi_sas_slot *slot)
107 {
108 struct task_status_struct *ts = &task->task_status;
109 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
110 struct hisi_sas_status_buffer *status_buf =
111 hisi_sas_status_buf_addr_mem(slot);
112 u8 *iu = &status_buf->iu[0];
113 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
114
115 resp->frame_len = sizeof(struct dev_to_host_fis);
116 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
117
118 ts->buf_valid_size = sizeof(*resp);
119 }
120 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
121
122 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
123 {
124 struct ata_queued_cmd *qc = task->uldd_task;
125
126 if (qc) {
127 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
128 qc->tf.command == ATA_CMD_FPDMA_READ) {
129 *tag = qc->tag;
130 return 1;
131 }
132 }
133 return 0;
134 }
135 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
136
137 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
138 {
139 return device->port->ha->lldd_ha;
140 }
141
142 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
143 {
144 return container_of(sas_port, struct hisi_sas_port, sas_port);
145 }
146 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
147
148 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
149 {
150 int phy_no;
151
152 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
153 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
154 }
155 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
156
157 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
158 {
159 void *bitmap = hisi_hba->slot_index_tags;
160
161 clear_bit(slot_idx, bitmap);
162 }
163
164 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
165 {
166 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
167 }
168
169 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
170 {
171 void *bitmap = hisi_hba->slot_index_tags;
172
173 set_bit(slot_idx, bitmap);
174 }
175
176 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
177 {
178 unsigned int index;
179 void *bitmap = hisi_hba->slot_index_tags;
180
181 index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count);
182 if (index >= hisi_hba->slot_index_count)
183 return -SAS_QUEUE_FULL;
184 hisi_sas_slot_index_set(hisi_hba, index);
185 *slot_idx = index;
186 return 0;
187 }
188
189 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
190 {
191 int i;
192
193 for (i = 0; i < hisi_hba->slot_index_count; ++i)
194 hisi_sas_slot_index_clear(hisi_hba, i);
195 }
196
197 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
198 struct hisi_sas_slot *slot)
199 {
200
201 if (task) {
202 struct device *dev = hisi_hba->dev;
203
204 if (!task->lldd_task)
205 return;
206
207 task->lldd_task = NULL;
208
209 if (!sas_protocol_ata(task->task_proto))
210 if (slot->n_elem)
211 dma_unmap_sg(dev, task->scatter,
212 task->num_scatter,
213 task->data_dir);
214 }
215
216 if (slot->buf)
217 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
218
219 list_del_init(&slot->entry);
220 slot->buf = NULL;
221 slot->task = NULL;
222 slot->port = NULL;
223 hisi_sas_slot_index_free(hisi_hba, slot->idx);
224
225 /* slot memory is fully zeroed when it is reused */
226 }
227 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
228
229 static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
230 struct hisi_sas_slot *slot)
231 {
232 return hisi_hba->hw->prep_smp(hisi_hba, slot);
233 }
234
235 static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
236 struct hisi_sas_slot *slot, int is_tmf,
237 struct hisi_sas_tmf_task *tmf)
238 {
239 return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
240 }
241
242 static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
243 struct hisi_sas_slot *slot)
244 {
245 return hisi_hba->hw->prep_stp(hisi_hba, slot);
246 }
247
248 static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
249 struct hisi_sas_slot *slot,
250 int device_id, int abort_flag, int tag_to_abort)
251 {
252 return hisi_hba->hw->prep_abort(hisi_hba, slot,
253 device_id, abort_flag, tag_to_abort);
254 }
255
256 /*
257 * This function will issue an abort TMF regardless of whether the
258 * task is in the sdev or not. Then it will do the task complete
259 * cleanup and callbacks.
260 */
261 static void hisi_sas_slot_abort(struct work_struct *work)
262 {
263 struct hisi_sas_slot *abort_slot =
264 container_of(work, struct hisi_sas_slot, abort_slot);
265 struct sas_task *task = abort_slot->task;
266 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
267 struct scsi_cmnd *cmnd = task->uldd_task;
268 struct hisi_sas_tmf_task tmf_task;
269 struct scsi_lun lun;
270 struct device *dev = hisi_hba->dev;
271 int tag = abort_slot->idx;
272 unsigned long flags;
273
274 if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
275 dev_err(dev, "cannot abort slot for non-ssp task\n");
276 goto out;
277 }
278
279 int_to_scsilun(cmnd->device->lun, &lun);
280 tmf_task.tmf = TMF_ABORT_TASK;
281 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
282
283 hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
284 out:
285 /* Do cleanup for this task */
286 spin_lock_irqsave(&hisi_hba->lock, flags);
287 hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
288 spin_unlock_irqrestore(&hisi_hba->lock, flags);
289 if (task->task_done)
290 task->task_done(task);
291 }
292
293 static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
294 *dq, int is_tmf, struct hisi_sas_tmf_task *tmf,
295 int *pass)
296 {
297 struct hisi_hba *hisi_hba = dq->hisi_hba;
298 struct domain_device *device = task->dev;
299 struct hisi_sas_device *sas_dev = device->lldd_dev;
300 struct hisi_sas_port *port;
301 struct hisi_sas_slot *slot;
302 struct hisi_sas_cmd_hdr *cmd_hdr_base;
303 struct asd_sas_port *sas_port = device->port;
304 struct device *dev = hisi_hba->dev;
305 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
306 unsigned long flags;
307
308 if (!sas_port) {
309 struct task_status_struct *ts = &task->task_status;
310
311 ts->resp = SAS_TASK_UNDELIVERED;
312 ts->stat = SAS_PHY_DOWN;
313 /*
314 * libsas will use dev->port, should
315 * not call task_done for sata
316 */
317 if (device->dev_type != SAS_SATA_DEV)
318 task->task_done(task);
319 return -ECOMM;
320 }
321
322 if (DEV_IS_GONE(sas_dev)) {
323 if (sas_dev)
324 dev_info(dev, "task prep: device %d not ready\n",
325 sas_dev->device_id);
326 else
327 dev_info(dev, "task prep: device %016llx not ready\n",
328 SAS_ADDR(device->sas_addr));
329
330 return -ECOMM;
331 }
332
333 port = to_hisi_sas_port(sas_port);
334 if (port && !port->port_attached) {
335 dev_info(dev, "task prep: %s port%d not attach device\n",
336 (dev_is_sata(device)) ?
337 "SATA/STP" : "SAS",
338 device->port->id);
339
340 return -ECOMM;
341 }
342
343 if (!sas_protocol_ata(task->task_proto)) {
344 if (task->num_scatter) {
345 n_elem = dma_map_sg(dev, task->scatter,
346 task->num_scatter, task->data_dir);
347 if (!n_elem) {
348 rc = -ENOMEM;
349 goto prep_out;
350 }
351 }
352 } else
353 n_elem = task->num_scatter;
354
355 spin_lock_irqsave(&hisi_hba->lock, flags);
356 if (hisi_hba->hw->slot_index_alloc)
357 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
358 device);
359 else
360 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
361 if (rc) {
362 spin_unlock_irqrestore(&hisi_hba->lock, flags);
363 goto err_out;
364 }
365 spin_unlock_irqrestore(&hisi_hba->lock, flags);
366
367 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
368 if (rc)
369 goto err_out_tag;
370
371 dlvry_queue = dq->id;
372 dlvry_queue_slot = dq->wr_point;
373 slot = &hisi_hba->slot_info[slot_idx];
374 memset(slot, 0, sizeof(struct hisi_sas_slot));
375
376 slot->idx = slot_idx;
377 slot->n_elem = n_elem;
378 slot->dlvry_queue = dlvry_queue;
379 slot->dlvry_queue_slot = dlvry_queue_slot;
380 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
381 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
382 slot->task = task;
383 slot->port = port;
384 if (is_tmf)
385 slot->is_internal = true;
386 task->lldd_task = slot;
387 INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
388
389 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
390 GFP_ATOMIC, &slot->buf_dma);
391 if (!slot->buf) {
392 rc = -ENOMEM;
393 goto err_out_slot_buf;
394 }
395 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
396 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
397 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
398
399 switch (task->task_proto) {
400 case SAS_PROTOCOL_SMP:
401 rc = hisi_sas_task_prep_smp(hisi_hba, slot);
402 break;
403 case SAS_PROTOCOL_SSP:
404 rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
405 break;
406 case SAS_PROTOCOL_SATA:
407 case SAS_PROTOCOL_STP:
408 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
409 rc = hisi_sas_task_prep_ata(hisi_hba, slot);
410 break;
411 default:
412 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
413 task->task_proto);
414 rc = -EINVAL;
415 break;
416 }
417
418 if (rc) {
419 dev_err(dev, "task prep: rc = 0x%x\n", rc);
420 goto err_out_buf;
421 }
422
423 spin_lock_irqsave(&hisi_hba->lock, flags);
424 list_add_tail(&slot->entry, &sas_dev->list);
425 spin_unlock_irqrestore(&hisi_hba->lock, flags);
426 spin_lock_irqsave(&task->task_state_lock, flags);
427 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
428 spin_unlock_irqrestore(&task->task_state_lock, flags);
429
430 dq->slot_prep = slot;
431 ++(*pass);
432
433 return 0;
434
435 err_out_buf:
436 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
437 slot->buf_dma);
438 err_out_slot_buf:
439 /* Nothing to be done */
440 err_out_tag:
441 spin_lock_irqsave(&hisi_hba->lock, flags);
442 hisi_sas_slot_index_free(hisi_hba, slot_idx);
443 spin_unlock_irqrestore(&hisi_hba->lock, flags);
444 err_out:
445 dev_err(dev, "task prep: failed[%d]!\n", rc);
446 if (!sas_protocol_ata(task->task_proto))
447 if (n_elem)
448 dma_unmap_sg(dev, task->scatter,
449 task->num_scatter,
450 task->data_dir);
451 prep_out:
452 return rc;
453 }
454
455 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
456 int is_tmf, struct hisi_sas_tmf_task *tmf)
457 {
458 u32 rc;
459 u32 pass = 0;
460 unsigned long flags;
461 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
462 struct device *dev = hisi_hba->dev;
463 struct domain_device *device = task->dev;
464 struct hisi_sas_device *sas_dev = device->lldd_dev;
465 struct hisi_sas_dq *dq = sas_dev->dq;
466
467 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
468 return -EINVAL;
469
470 /* protect task_prep and start_delivery sequence */
471 spin_lock_irqsave(&dq->lock, flags);
472 rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass);
473 if (rc)
474 dev_err(dev, "task exec: failed[%d]!\n", rc);
475
476 if (likely(pass))
477 hisi_hba->hw->start_delivery(dq);
478 spin_unlock_irqrestore(&dq->lock, flags);
479
480 return rc;
481 }
482
483 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
484 {
485 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
486 struct asd_sas_phy *sas_phy = &phy->sas_phy;
487 struct sas_ha_struct *sas_ha;
488
489 if (!phy->phy_attached)
490 return;
491
492 sas_ha = &hisi_hba->sha;
493 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
494
495 if (sas_phy->phy) {
496 struct sas_phy *sphy = sas_phy->phy;
497
498 sphy->negotiated_linkrate = sas_phy->linkrate;
499 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
500 sphy->maximum_linkrate_hw =
501 hisi_hba->hw->phy_get_max_linkrate();
502 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
503 sphy->minimum_linkrate = phy->minimum_linkrate;
504
505 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
506 sphy->maximum_linkrate = phy->maximum_linkrate;
507 }
508
509 if (phy->phy_type & PORT_TYPE_SAS) {
510 struct sas_identify_frame *id;
511
512 id = (struct sas_identify_frame *)phy->frame_rcvd;
513 id->dev_type = phy->identify.device_type;
514 id->initiator_bits = SAS_PROTOCOL_ALL;
515 id->target_bits = phy->identify.target_port_protocols;
516 } else if (phy->phy_type & PORT_TYPE_SATA) {
517 /*Nothing*/
518 }
519
520 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
521 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
522 }
523
524 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
525 {
526 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
527 struct hisi_sas_device *sas_dev = NULL;
528 unsigned long flags;
529 int i;
530
531 spin_lock_irqsave(&hisi_hba->lock, flags);
532 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
533 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
534 int queue = i % hisi_hba->queue_count;
535 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
536
537 hisi_hba->devices[i].device_id = i;
538 sas_dev = &hisi_hba->devices[i];
539 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
540 sas_dev->dev_type = device->dev_type;
541 sas_dev->hisi_hba = hisi_hba;
542 sas_dev->sas_device = device;
543 sas_dev->dq = dq;
544 INIT_LIST_HEAD(&hisi_hba->devices[i].list);
545 break;
546 }
547 }
548 spin_unlock_irqrestore(&hisi_hba->lock, flags);
549
550 return sas_dev;
551 }
552
553 static int hisi_sas_dev_found(struct domain_device *device)
554 {
555 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
556 struct domain_device *parent_dev = device->parent;
557 struct hisi_sas_device *sas_dev;
558 struct device *dev = hisi_hba->dev;
559
560 if (hisi_hba->hw->alloc_dev)
561 sas_dev = hisi_hba->hw->alloc_dev(device);
562 else
563 sas_dev = hisi_sas_alloc_dev(device);
564 if (!sas_dev) {
565 dev_err(dev, "fail alloc dev: max support %d devices\n",
566 HISI_SAS_MAX_DEVICES);
567 return -EINVAL;
568 }
569
570 device->lldd_dev = sas_dev;
571 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
572
573 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
574 int phy_no;
575 u8 phy_num = parent_dev->ex_dev.num_phys;
576 struct ex_phy *phy;
577
578 for (phy_no = 0; phy_no < phy_num; phy_no++) {
579 phy = &parent_dev->ex_dev.ex_phy[phy_no];
580 if (SAS_ADDR(phy->attached_sas_addr) ==
581 SAS_ADDR(device->sas_addr)) {
582 sas_dev->attached_phy = phy_no;
583 break;
584 }
585 }
586
587 if (phy_no == phy_num) {
588 dev_info(dev, "dev found: no attached "
589 "dev:%016llx at ex:%016llx\n",
590 SAS_ADDR(device->sas_addr),
591 SAS_ADDR(parent_dev->sas_addr));
592 return -EINVAL;
593 }
594 }
595
596 dev_info(dev, "dev[%d:%x] found\n",
597 sas_dev->device_id, sas_dev->dev_type);
598
599 return 0;
600 }
601
602 static int hisi_sas_slave_configure(struct scsi_device *sdev)
603 {
604 struct domain_device *dev = sdev_to_domain_dev(sdev);
605 int ret = sas_slave_configure(sdev);
606
607 if (ret)
608 return ret;
609 if (!dev_is_sata(dev))
610 sas_change_queue_depth(sdev, 64);
611
612 return 0;
613 }
614
615 static void hisi_sas_scan_start(struct Scsi_Host *shost)
616 {
617 struct hisi_hba *hisi_hba = shost_priv(shost);
618
619 hisi_hba->hw->phys_init(hisi_hba);
620 }
621
622 static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
623 {
624 struct hisi_hba *hisi_hba = shost_priv(shost);
625 struct sas_ha_struct *sha = &hisi_hba->sha;
626
627 /* Wait for PHY up interrupt to occur */
628 if (time < HZ)
629 return 0;
630
631 sas_drain_work(sha);
632 return 1;
633 }
634
635 static void hisi_sas_phyup_work(struct work_struct *work)
636 {
637 struct hisi_sas_phy *phy =
638 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
639 struct hisi_hba *hisi_hba = phy->hisi_hba;
640 struct asd_sas_phy *sas_phy = &phy->sas_phy;
641 int phy_no = sas_phy->id;
642
643 hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
644 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
645 }
646
647 static void hisi_sas_linkreset_work(struct work_struct *work)
648 {
649 struct hisi_sas_phy *phy =
650 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
651 struct asd_sas_phy *sas_phy = &phy->sas_phy;
652
653 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
654 }
655
656 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
657 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
658 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
659 };
660
661 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
662 enum hisi_sas_phy_event event)
663 {
664 struct hisi_hba *hisi_hba = phy->hisi_hba;
665
666 if (WARN_ON(event >= HISI_PHYES_NUM))
667 return false;
668
669 return queue_work(hisi_hba->wq, &phy->works[event]);
670 }
671 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
672
673 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
674 {
675 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
676 struct asd_sas_phy *sas_phy = &phy->sas_phy;
677 int i;
678
679 phy->hisi_hba = hisi_hba;
680 phy->port = NULL;
681 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
682 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
683 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
684 sas_phy->class = SAS;
685 sas_phy->iproto = SAS_PROTOCOL_ALL;
686 sas_phy->tproto = 0;
687 sas_phy->type = PHY_TYPE_PHYSICAL;
688 sas_phy->role = PHY_ROLE_INITIATOR;
689 sas_phy->oob_mode = OOB_NOT_CONNECTED;
690 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
691 sas_phy->id = phy_no;
692 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
693 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
694 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
695 sas_phy->lldd_phy = phy;
696
697 for (i = 0; i < HISI_PHYES_NUM; i++)
698 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
699 }
700
701 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
702 {
703 struct sas_ha_struct *sas_ha = sas_phy->ha;
704 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
705 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
706 struct asd_sas_port *sas_port = sas_phy->port;
707 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
708 unsigned long flags;
709
710 if (!sas_port)
711 return;
712
713 spin_lock_irqsave(&hisi_hba->lock, flags);
714 port->port_attached = 1;
715 port->id = phy->port_id;
716 phy->port = port;
717 sas_port->lldd_port = port;
718 spin_unlock_irqrestore(&hisi_hba->lock, flags);
719 }
720
721 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
722 struct hisi_sas_slot *slot)
723 {
724 if (task) {
725 unsigned long flags;
726 struct task_status_struct *ts;
727
728 ts = &task->task_status;
729
730 ts->resp = SAS_TASK_COMPLETE;
731 ts->stat = SAS_ABORTED_TASK;
732 spin_lock_irqsave(&task->task_state_lock, flags);
733 task->task_state_flags &=
734 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
735 task->task_state_flags |= SAS_TASK_STATE_DONE;
736 spin_unlock_irqrestore(&task->task_state_lock, flags);
737 }
738
739 hisi_sas_slot_task_free(hisi_hba, task, slot);
740 }
741
742 /* hisi_hba.lock should be locked */
743 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
744 struct domain_device *device)
745 {
746 struct hisi_sas_slot *slot, *slot2;
747 struct hisi_sas_device *sas_dev = device->lldd_dev;
748
749 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
750 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
751 }
752
753 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
754 {
755 struct hisi_sas_device *sas_dev;
756 struct domain_device *device;
757 int i;
758
759 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
760 sas_dev = &hisi_hba->devices[i];
761 device = sas_dev->sas_device;
762
763 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
764 !device)
765 continue;
766
767 hisi_sas_release_task(hisi_hba, device);
768 }
769 }
770 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
771
772 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
773 struct domain_device *device)
774 {
775 if (hisi_hba->hw->dereg_device)
776 hisi_hba->hw->dereg_device(hisi_hba, device);
777 }
778
779 static void hisi_sas_dev_gone(struct domain_device *device)
780 {
781 struct hisi_sas_device *sas_dev = device->lldd_dev;
782 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
783 struct device *dev = hisi_hba->dev;
784
785 dev_info(dev, "dev[%d:%x] is gone\n",
786 sas_dev->device_id, sas_dev->dev_type);
787
788 if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
789 hisi_sas_internal_task_abort(hisi_hba, device,
790 HISI_SAS_INT_ABT_DEV, 0);
791
792 hisi_sas_dereg_device(hisi_hba, device);
793
794 hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
795 device->lldd_dev = NULL;
796 }
797
798 if (hisi_hba->hw->free_device)
799 hisi_hba->hw->free_device(sas_dev);
800 sas_dev->dev_type = SAS_PHY_UNUSED;
801 }
802
803 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
804 {
805 return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
806 }
807
808 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
809 void *funcdata)
810 {
811 struct sas_ha_struct *sas_ha = sas_phy->ha;
812 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
813 int phy_no = sas_phy->id;
814
815 switch (func) {
816 case PHY_FUNC_HARD_RESET:
817 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
818 break;
819
820 case PHY_FUNC_LINK_RESET:
821 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
822 msleep(100);
823 hisi_hba->hw->phy_start(hisi_hba, phy_no);
824 break;
825
826 case PHY_FUNC_DISABLE:
827 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
828 break;
829
830 case PHY_FUNC_SET_LINK_RATE:
831 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
832 break;
833 case PHY_FUNC_GET_EVENTS:
834 if (hisi_hba->hw->get_events) {
835 hisi_hba->hw->get_events(hisi_hba, phy_no);
836 break;
837 }
838 /* fallthru */
839 case PHY_FUNC_RELEASE_SPINUP_HOLD:
840 default:
841 return -EOPNOTSUPP;
842 }
843 return 0;
844 }
845
846 static void hisi_sas_task_done(struct sas_task *task)
847 {
848 if (!del_timer(&task->slow_task->timer))
849 return;
850 complete(&task->slow_task->completion);
851 }
852
853 static void hisi_sas_tmf_timedout(struct timer_list *t)
854 {
855 struct sas_task_slow *slow = from_timer(slow, t, timer);
856 struct sas_task *task = slow->task;
857 unsigned long flags;
858
859 spin_lock_irqsave(&task->task_state_lock, flags);
860 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
861 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
862 spin_unlock_irqrestore(&task->task_state_lock, flags);
863
864 complete(&task->slow_task->completion);
865 }
866
867 #define TASK_TIMEOUT 20
868 #define TASK_RETRY 3
869 #define INTERNAL_ABORT_TIMEOUT 6
870 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
871 void *parameter, u32 para_len,
872 struct hisi_sas_tmf_task *tmf)
873 {
874 struct hisi_sas_device *sas_dev = device->lldd_dev;
875 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
876 struct device *dev = hisi_hba->dev;
877 struct sas_task *task;
878 int res, retry;
879
880 for (retry = 0; retry < TASK_RETRY; retry++) {
881 task = sas_alloc_slow_task(GFP_KERNEL);
882 if (!task)
883 return -ENOMEM;
884
885 task->dev = device;
886 task->task_proto = device->tproto;
887
888 if (dev_is_sata(device)) {
889 task->ata_task.device_control_reg_update = 1;
890 memcpy(&task->ata_task.fis, parameter, para_len);
891 } else {
892 memcpy(&task->ssp_task, parameter, para_len);
893 }
894 task->task_done = hisi_sas_task_done;
895
896 task->slow_task->timer.function = hisi_sas_tmf_timedout;
897 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
898 add_timer(&task->slow_task->timer);
899
900 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
901
902 if (res) {
903 del_timer(&task->slow_task->timer);
904 dev_err(dev, "abort tmf: executing internal task failed: %d\n",
905 res);
906 goto ex_err;
907 }
908
909 wait_for_completion(&task->slow_task->completion);
910 res = TMF_RESP_FUNC_FAILED;
911 /* Even TMF timed out, return direct. */
912 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
913 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
914 struct hisi_sas_slot *slot = task->lldd_task;
915
916 dev_err(dev, "abort tmf: TMF task timeout and not done\n");
917 if (slot)
918 slot->task = NULL;
919
920 goto ex_err;
921 } else
922 dev_err(dev, "abort tmf: TMF task timeout\n");
923 }
924
925 if (task->task_status.resp == SAS_TASK_COMPLETE &&
926 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
927 res = TMF_RESP_FUNC_COMPLETE;
928 break;
929 }
930
931 if (task->task_status.resp == SAS_TASK_COMPLETE &&
932 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
933 res = TMF_RESP_FUNC_SUCC;
934 break;
935 }
936
937 if (task->task_status.resp == SAS_TASK_COMPLETE &&
938 task->task_status.stat == SAS_DATA_UNDERRUN) {
939 /* no error, but return the number of bytes of
940 * underrun
941 */
942 dev_warn(dev, "abort tmf: task to dev %016llx "
943 "resp: 0x%x sts 0x%x underrun\n",
944 SAS_ADDR(device->sas_addr),
945 task->task_status.resp,
946 task->task_status.stat);
947 res = task->task_status.residual;
948 break;
949 }
950
951 if (task->task_status.resp == SAS_TASK_COMPLETE &&
952 task->task_status.stat == SAS_DATA_OVERRUN) {
953 dev_warn(dev, "abort tmf: blocked task error\n");
954 res = -EMSGSIZE;
955 break;
956 }
957
958 dev_warn(dev, "abort tmf: task to dev "
959 "%016llx resp: 0x%x status 0x%x\n",
960 SAS_ADDR(device->sas_addr), task->task_status.resp,
961 task->task_status.stat);
962 sas_free_task(task);
963 task = NULL;
964 }
965 ex_err:
966 if (retry == TASK_RETRY)
967 dev_warn(dev, "abort tmf: executing internal task failed!\n");
968 sas_free_task(task);
969 return res;
970 }
971
972 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
973 bool reset, int pmp, u8 *fis)
974 {
975 struct ata_taskfile tf;
976
977 ata_tf_init(dev, &tf);
978 if (reset)
979 tf.ctl |= ATA_SRST;
980 else
981 tf.ctl &= ~ATA_SRST;
982 tf.command = ATA_CMD_DEV_RESET;
983 ata_tf_to_fis(&tf, pmp, 0, fis);
984 }
985
986 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
987 {
988 u8 fis[20] = {0};
989 struct ata_port *ap = device->sata_dev.ap;
990 struct ata_link *link;
991 int rc = TMF_RESP_FUNC_FAILED;
992 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
993 struct device *dev = hisi_hba->dev;
994 int s = sizeof(struct host_to_dev_fis);
995 unsigned long flags;
996
997 ata_for_each_link(link, ap, EDGE) {
998 int pmp = sata_srst_pmp(link);
999
1000 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1001 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
1002 if (rc != TMF_RESP_FUNC_COMPLETE)
1003 break;
1004 }
1005
1006 if (rc == TMF_RESP_FUNC_COMPLETE) {
1007 ata_for_each_link(link, ap, EDGE) {
1008 int pmp = sata_srst_pmp(link);
1009
1010 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1011 rc = hisi_sas_exec_internal_tmf_task(device, fis,
1012 s, NULL);
1013 if (rc != TMF_RESP_FUNC_COMPLETE)
1014 dev_err(dev, "ata disk de-reset failed\n");
1015 }
1016 } else {
1017 dev_err(dev, "ata disk reset failed\n");
1018 }
1019
1020 if (rc == TMF_RESP_FUNC_COMPLETE) {
1021 spin_lock_irqsave(&hisi_hba->lock, flags);
1022 hisi_sas_release_task(hisi_hba, device);
1023 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1024 }
1025
1026 return rc;
1027 }
1028
1029 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
1030 u8 *lun, struct hisi_sas_tmf_task *tmf)
1031 {
1032 struct sas_ssp_task ssp_task;
1033
1034 if (!(device->tproto & SAS_PROTOCOL_SSP))
1035 return TMF_RESP_FUNC_ESUPP;
1036
1037 memcpy(ssp_task.LUN, lun, 8);
1038
1039 return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
1040 sizeof(ssp_task), tmf);
1041 }
1042
1043 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1044 {
1045 u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1046 int i;
1047
1048 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1049 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1050 struct domain_device *device = sas_dev->sas_device;
1051 struct asd_sas_port *sas_port;
1052 struct hisi_sas_port *port;
1053 struct hisi_sas_phy *phy = NULL;
1054 struct asd_sas_phy *sas_phy;
1055
1056 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1057 || !device || !device->port)
1058 continue;
1059
1060 sas_port = device->port;
1061 port = to_hisi_sas_port(sas_port);
1062
1063 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1064 if (state & BIT(sas_phy->id)) {
1065 phy = sas_phy->lldd_phy;
1066 break;
1067 }
1068
1069 if (phy) {
1070 port->id = phy->port_id;
1071
1072 /* Update linkrate of directly attached device. */
1073 if (!device->parent)
1074 device->linkrate = phy->sas_phy.linkrate;
1075
1076 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1077 } else
1078 port->id = 0xff;
1079 }
1080 }
1081
1082 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1083 u32 state)
1084 {
1085 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1086 struct asd_sas_port *_sas_port = NULL;
1087 int phy_no;
1088
1089 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1090 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1091 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1092 struct asd_sas_port *sas_port = sas_phy->port;
1093 bool do_port_check = !!(_sas_port != sas_port);
1094
1095 if (!sas_phy->phy->enabled)
1096 continue;
1097
1098 /* Report PHY state change to libsas */
1099 if (state & BIT(phy_no)) {
1100 if (do_port_check && sas_port && sas_port->port_dev) {
1101 struct domain_device *dev = sas_port->port_dev;
1102
1103 _sas_port = sas_port;
1104
1105 if (DEV_IS_EXPANDER(dev->dev_type))
1106 sas_ha->notify_port_event(sas_phy,
1107 PORTE_BROADCAST_RCVD);
1108 }
1109 } else if (old_state & (1 << phy_no))
1110 /* PHY down but was up before */
1111 hisi_sas_phy_down(hisi_hba, phy_no, 0);
1112
1113 }
1114 }
1115
1116 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1117 {
1118 struct device *dev = hisi_hba->dev;
1119 struct Scsi_Host *shost = hisi_hba->shost;
1120 u32 old_state, state;
1121 unsigned long flags;
1122 int rc;
1123
1124 if (!hisi_hba->hw->soft_reset)
1125 return -1;
1126
1127 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1128 return -1;
1129
1130 dev_info(dev, "controller resetting...\n");
1131 old_state = hisi_hba->hw->get_phys_state(hisi_hba);
1132
1133 scsi_block_requests(shost);
1134 if (timer_pending(&hisi_hba->timer))
1135 del_timer_sync(&hisi_hba->timer);
1136
1137 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1138 rc = hisi_hba->hw->soft_reset(hisi_hba);
1139 if (rc) {
1140 dev_warn(dev, "controller reset failed (%d)\n", rc);
1141 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1142 scsi_unblock_requests(shost);
1143 goto out;
1144 }
1145 spin_lock_irqsave(&hisi_hba->lock, flags);
1146 hisi_sas_release_tasks(hisi_hba);
1147 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1148
1149 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1150
1151 /* Init and wait for PHYs to come up and all libsas event finished. */
1152 hisi_hba->hw->phys_init(hisi_hba);
1153 msleep(1000);
1154 hisi_sas_refresh_port_id(hisi_hba);
1155 scsi_unblock_requests(shost);
1156
1157 state = hisi_hba->hw->get_phys_state(hisi_hba);
1158 hisi_sas_rescan_topology(hisi_hba, old_state, state);
1159 dev_info(dev, "controller reset complete\n");
1160
1161 out:
1162 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1163
1164 return rc;
1165 }
1166
1167 static int hisi_sas_abort_task(struct sas_task *task)
1168 {
1169 struct scsi_lun lun;
1170 struct hisi_sas_tmf_task tmf_task;
1171 struct domain_device *device = task->dev;
1172 struct hisi_sas_device *sas_dev = device->lldd_dev;
1173 struct hisi_hba *hisi_hba;
1174 struct device *dev;
1175 int rc = TMF_RESP_FUNC_FAILED;
1176 unsigned long flags;
1177
1178 if (!sas_dev)
1179 return TMF_RESP_FUNC_FAILED;
1180
1181 hisi_hba = dev_to_hisi_hba(task->dev);
1182 dev = hisi_hba->dev;
1183
1184 spin_lock_irqsave(&task->task_state_lock, flags);
1185 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1186 spin_unlock_irqrestore(&task->task_state_lock, flags);
1187 rc = TMF_RESP_FUNC_COMPLETE;
1188 goto out;
1189 }
1190 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1191 spin_unlock_irqrestore(&task->task_state_lock, flags);
1192
1193 sas_dev->dev_status = HISI_SAS_DEV_EH;
1194 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1195 struct scsi_cmnd *cmnd = task->uldd_task;
1196 struct hisi_sas_slot *slot = task->lldd_task;
1197 u32 tag = slot->idx;
1198 int rc2;
1199
1200 int_to_scsilun(cmnd->device->lun, &lun);
1201 tmf_task.tmf = TMF_ABORT_TASK;
1202 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1203
1204 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1205 &tmf_task);
1206
1207 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1208 HISI_SAS_INT_ABT_CMD, tag);
1209 if (rc2 < 0) {
1210 dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1211 return TMF_RESP_FUNC_FAILED;
1212 }
1213
1214 /*
1215 * If the TMF finds that the IO is not in the device and also
1216 * the internal abort does not succeed, then it is safe to
1217 * free the slot.
1218 * Note: if the internal abort succeeds then the slot
1219 * will have already been completed
1220 */
1221 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1222 if (task->lldd_task) {
1223 spin_lock_irqsave(&hisi_hba->lock, flags);
1224 hisi_sas_do_release_task(hisi_hba, task, slot);
1225 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1226 }
1227 }
1228 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1229 task->task_proto & SAS_PROTOCOL_STP) {
1230 if (task->dev->dev_type == SAS_SATA_DEV) {
1231 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1232 HISI_SAS_INT_ABT_DEV, 0);
1233 if (rc < 0) {
1234 dev_err(dev, "abort task: internal abort failed\n");
1235 goto out;
1236 }
1237 hisi_sas_dereg_device(hisi_hba, device);
1238 rc = hisi_sas_softreset_ata_disk(device);
1239 }
1240 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1241 /* SMP */
1242 struct hisi_sas_slot *slot = task->lldd_task;
1243 u32 tag = slot->idx;
1244
1245 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1246 HISI_SAS_INT_ABT_CMD, tag);
1247 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1248 task->lldd_task) {
1249 spin_lock_irqsave(&hisi_hba->lock, flags);
1250 hisi_sas_do_release_task(hisi_hba, task, slot);
1251 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1252 }
1253 }
1254
1255 out:
1256 if (rc != TMF_RESP_FUNC_COMPLETE)
1257 dev_notice(dev, "abort task: rc=%d\n", rc);
1258 return rc;
1259 }
1260
1261 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1262 {
1263 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1264 struct device *dev = hisi_hba->dev;
1265 struct hisi_sas_tmf_task tmf_task;
1266 int rc = TMF_RESP_FUNC_FAILED;
1267 unsigned long flags;
1268
1269 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1270 HISI_SAS_INT_ABT_DEV, 0);
1271 if (rc < 0) {
1272 dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1273 return TMF_RESP_FUNC_FAILED;
1274 }
1275 hisi_sas_dereg_device(hisi_hba, device);
1276
1277 tmf_task.tmf = TMF_ABORT_TASK_SET;
1278 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1279
1280 if (rc == TMF_RESP_FUNC_COMPLETE) {
1281 spin_lock_irqsave(&hisi_hba->lock, flags);
1282 hisi_sas_release_task(hisi_hba, device);
1283 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1284 }
1285
1286 return rc;
1287 }
1288
1289 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1290 {
1291 int rc = TMF_RESP_FUNC_FAILED;
1292 struct hisi_sas_tmf_task tmf_task;
1293
1294 tmf_task.tmf = TMF_CLEAR_ACA;
1295 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1296
1297 return rc;
1298 }
1299
1300 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1301 {
1302 struct sas_phy *phy = sas_get_local_phy(device);
1303 int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1304 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1305 rc = sas_phy_reset(phy, reset_type);
1306 sas_put_local_phy(phy);
1307 msleep(2000);
1308 return rc;
1309 }
1310
1311 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1312 {
1313 struct hisi_sas_device *sas_dev = device->lldd_dev;
1314 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1315 struct device *dev = hisi_hba->dev;
1316 int rc = TMF_RESP_FUNC_FAILED;
1317 unsigned long flags;
1318
1319 if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1320 return TMF_RESP_FUNC_FAILED;
1321 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1322
1323 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1324 HISI_SAS_INT_ABT_DEV, 0);
1325 if (rc < 0) {
1326 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1327 return TMF_RESP_FUNC_FAILED;
1328 }
1329 hisi_sas_dereg_device(hisi_hba, device);
1330
1331 rc = hisi_sas_debug_I_T_nexus_reset(device);
1332
1333 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) {
1334 spin_lock_irqsave(&hisi_hba->lock, flags);
1335 hisi_sas_release_task(hisi_hba, device);
1336 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1337 }
1338 return rc;
1339 }
1340
1341 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1342 {
1343 struct hisi_sas_device *sas_dev = device->lldd_dev;
1344 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1345 struct device *dev = hisi_hba->dev;
1346 unsigned long flags;
1347 int rc = TMF_RESP_FUNC_FAILED;
1348
1349 sas_dev->dev_status = HISI_SAS_DEV_EH;
1350 if (dev_is_sata(device)) {
1351 struct sas_phy *phy;
1352
1353 /* Clear internal IO and then hardreset */
1354 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1355 HISI_SAS_INT_ABT_DEV, 0);
1356 if (rc < 0) {
1357 dev_err(dev, "lu_reset: internal abort failed\n");
1358 goto out;
1359 }
1360 hisi_sas_dereg_device(hisi_hba, device);
1361
1362 phy = sas_get_local_phy(device);
1363
1364 rc = sas_phy_reset(phy, 1);
1365
1366 if (rc == 0) {
1367 spin_lock_irqsave(&hisi_hba->lock, flags);
1368 hisi_sas_release_task(hisi_hba, device);
1369 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1370 }
1371 sas_put_local_phy(phy);
1372 } else {
1373 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
1374
1375 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1376 HISI_SAS_INT_ABT_DEV, 0);
1377 if (rc < 0) {
1378 dev_err(dev, "lu_reset: internal abort failed\n");
1379 goto out;
1380 }
1381 hisi_sas_dereg_device(hisi_hba, device);
1382
1383 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1384 if (rc == TMF_RESP_FUNC_COMPLETE) {
1385 spin_lock_irqsave(&hisi_hba->lock, flags);
1386 hisi_sas_release_task(hisi_hba, device);
1387 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1388 }
1389 }
1390 out:
1391 if (rc != TMF_RESP_FUNC_COMPLETE)
1392 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1393 sas_dev->device_id, rc);
1394 return rc;
1395 }
1396
1397 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1398 {
1399 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1400 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1401
1402 queue_work(hisi_hba->wq, &r.work);
1403 wait_for_completion(r.completion);
1404 if (r.done)
1405 return TMF_RESP_FUNC_COMPLETE;
1406
1407 return TMF_RESP_FUNC_FAILED;
1408 }
1409
1410 static int hisi_sas_query_task(struct sas_task *task)
1411 {
1412 struct scsi_lun lun;
1413 struct hisi_sas_tmf_task tmf_task;
1414 int rc = TMF_RESP_FUNC_FAILED;
1415
1416 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1417 struct scsi_cmnd *cmnd = task->uldd_task;
1418 struct domain_device *device = task->dev;
1419 struct hisi_sas_slot *slot = task->lldd_task;
1420 u32 tag = slot->idx;
1421
1422 int_to_scsilun(cmnd->device->lun, &lun);
1423 tmf_task.tmf = TMF_QUERY_TASK;
1424 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1425
1426 rc = hisi_sas_debug_issue_ssp_tmf(device,
1427 lun.scsi_lun,
1428 &tmf_task);
1429 switch (rc) {
1430 /* The task is still in Lun, release it then */
1431 case TMF_RESP_FUNC_SUCC:
1432 /* The task is not in Lun or failed, reset the phy */
1433 case TMF_RESP_FUNC_FAILED:
1434 case TMF_RESP_FUNC_COMPLETE:
1435 break;
1436 default:
1437 rc = TMF_RESP_FUNC_FAILED;
1438 break;
1439 }
1440 }
1441 return rc;
1442 }
1443
1444 static int
1445 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1446 struct sas_task *task, int abort_flag,
1447 int task_tag)
1448 {
1449 struct domain_device *device = task->dev;
1450 struct hisi_sas_device *sas_dev = device->lldd_dev;
1451 struct device *dev = hisi_hba->dev;
1452 struct hisi_sas_port *port;
1453 struct hisi_sas_slot *slot;
1454 struct asd_sas_port *sas_port = device->port;
1455 struct hisi_sas_cmd_hdr *cmd_hdr_base;
1456 struct hisi_sas_dq *dq = sas_dev->dq;
1457 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1458 unsigned long flags, flags_dq;
1459
1460 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1461 return -EINVAL;
1462
1463 if (!device->port)
1464 return -1;
1465
1466 port = to_hisi_sas_port(sas_port);
1467
1468 /* simply get a slot and send abort command */
1469 spin_lock_irqsave(&hisi_hba->lock, flags);
1470 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
1471 if (rc) {
1472 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1473 goto err_out;
1474 }
1475 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1476
1477 spin_lock_irqsave(&dq->lock, flags_dq);
1478 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1479 if (rc)
1480 goto err_out_tag;
1481
1482 dlvry_queue = dq->id;
1483 dlvry_queue_slot = dq->wr_point;
1484
1485 slot = &hisi_hba->slot_info[slot_idx];
1486 memset(slot, 0, sizeof(struct hisi_sas_slot));
1487
1488 slot->idx = slot_idx;
1489 slot->n_elem = n_elem;
1490 slot->dlvry_queue = dlvry_queue;
1491 slot->dlvry_queue_slot = dlvry_queue_slot;
1492 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1493 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1494 slot->task = task;
1495 slot->port = port;
1496 slot->is_internal = true;
1497 task->lldd_task = slot;
1498
1499 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
1500 GFP_ATOMIC, &slot->buf_dma);
1501 if (!slot->buf) {
1502 rc = -ENOMEM;
1503 goto err_out_tag;
1504 }
1505
1506 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1507 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1508 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
1509
1510 rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1511 abort_flag, task_tag);
1512 if (rc)
1513 goto err_out_buf;
1514
1515 spin_lock_irqsave(&hisi_hba->lock, flags);
1516 list_add_tail(&slot->entry, &sas_dev->list);
1517 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1518 spin_lock_irqsave(&task->task_state_lock, flags);
1519 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1520 spin_unlock_irqrestore(&task->task_state_lock, flags);
1521
1522 dq->slot_prep = slot;
1523
1524 /* send abort command to the chip */
1525 hisi_hba->hw->start_delivery(dq);
1526 spin_unlock_irqrestore(&dq->lock, flags_dq);
1527
1528 return 0;
1529
1530 err_out_buf:
1531 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
1532 slot->buf_dma);
1533 err_out_tag:
1534 spin_lock_irqsave(&hisi_hba->lock, flags);
1535 hisi_sas_slot_index_free(hisi_hba, slot_idx);
1536 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1537 spin_unlock_irqrestore(&dq->lock, flags_dq);
1538 err_out:
1539 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1540
1541 return rc;
1542 }
1543
1544 /**
1545 * hisi_sas_internal_task_abort -- execute an internal
1546 * abort command for single IO command or a device
1547 * @hisi_hba: host controller struct
1548 * @device: domain device
1549 * @abort_flag: mode of operation, device or single IO
1550 * @tag: tag of IO to be aborted (only relevant to single
1551 * IO mode)
1552 */
1553 static int
1554 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1555 struct domain_device *device,
1556 int abort_flag, int tag)
1557 {
1558 struct sas_task *task;
1559 struct hisi_sas_device *sas_dev = device->lldd_dev;
1560 struct device *dev = hisi_hba->dev;
1561 int res;
1562
1563 /*
1564 * The interface is not realized means this HW don't support internal
1565 * abort, or don't need to do internal abort. Then here, we return
1566 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
1567 * the internal abort has been executed and returned CQ.
1568 */
1569 if (!hisi_hba->hw->prep_abort)
1570 return TMF_RESP_FUNC_FAILED;
1571
1572 task = sas_alloc_slow_task(GFP_KERNEL);
1573 if (!task)
1574 return -ENOMEM;
1575
1576 task->dev = device;
1577 task->task_proto = device->tproto;
1578 task->task_done = hisi_sas_task_done;
1579 task->slow_task->timer.function = hisi_sas_tmf_timedout;
1580 task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT*HZ;
1581 add_timer(&task->slow_task->timer);
1582
1583 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1584 task, abort_flag, tag);
1585 if (res) {
1586 del_timer(&task->slow_task->timer);
1587 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1588 res);
1589 goto exit;
1590 }
1591 wait_for_completion(&task->slow_task->completion);
1592 res = TMF_RESP_FUNC_FAILED;
1593
1594 /* Internal abort timed out */
1595 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1596 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1597 struct hisi_sas_slot *slot = task->lldd_task;
1598
1599 if (slot)
1600 slot->task = NULL;
1601 dev_err(dev, "internal task abort: timeout and not done.\n");
1602 res = -EIO;
1603 goto exit;
1604 } else
1605 dev_err(dev, "internal task abort: timeout.\n");
1606 }
1607
1608 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1609 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1610 res = TMF_RESP_FUNC_COMPLETE;
1611 goto exit;
1612 }
1613
1614 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1615 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1616 res = TMF_RESP_FUNC_SUCC;
1617 goto exit;
1618 }
1619
1620 exit:
1621 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
1622 "resp: 0x%x sts 0x%x\n",
1623 SAS_ADDR(device->sas_addr),
1624 task,
1625 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1626 task->task_status.stat);
1627 sas_free_task(task);
1628
1629 return res;
1630 }
1631
1632 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1633 {
1634 hisi_sas_port_notify_formed(sas_phy);
1635 }
1636
1637 static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
1638 {
1639 }
1640
1641 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
1642 u8 reg_index, u8 reg_count, u8 *write_data)
1643 {
1644 struct hisi_hba *hisi_hba = sha->lldd_ha;
1645
1646 if (!hisi_hba->hw->write_gpio)
1647 return -EOPNOTSUPP;
1648
1649 return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
1650 reg_index, reg_count, write_data);
1651 }
1652
1653 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1654 {
1655 phy->phy_attached = 0;
1656 phy->phy_type = 0;
1657 phy->port = NULL;
1658 }
1659
1660 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1661 {
1662 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1663 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1664 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1665
1666 if (rdy) {
1667 /* Phy down but ready */
1668 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1669 hisi_sas_port_notify_formed(sas_phy);
1670 } else {
1671 struct hisi_sas_port *port = phy->port;
1672
1673 /* Phy down and not ready */
1674 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1675 sas_phy_disconnected(sas_phy);
1676
1677 if (port) {
1678 if (phy->phy_type & PORT_TYPE_SAS) {
1679 int port_id = port->id;
1680
1681 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1682 port_id))
1683 port->port_attached = 0;
1684 } else if (phy->phy_type & PORT_TYPE_SATA)
1685 port->port_attached = 0;
1686 }
1687 hisi_sas_phy_disconnected(phy);
1688 }
1689 }
1690 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1691
1692 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
1693 {
1694 int i;
1695
1696 for (i = 0; i < hisi_hba->queue_count; i++) {
1697 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1698
1699 tasklet_kill(&cq->tasklet);
1700 }
1701 }
1702 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
1703
1704 struct scsi_transport_template *hisi_sas_stt;
1705 EXPORT_SYMBOL_GPL(hisi_sas_stt);
1706
1707 static struct device_attribute *host_attrs[] = {
1708 &dev_attr_phy_event_threshold,
1709 NULL,
1710 };
1711
1712 static struct scsi_host_template _hisi_sas_sht = {
1713 .module = THIS_MODULE,
1714 .name = DRV_NAME,
1715 .queuecommand = sas_queuecommand,
1716 .target_alloc = sas_target_alloc,
1717 .slave_configure = hisi_sas_slave_configure,
1718 .scan_finished = hisi_sas_scan_finished,
1719 .scan_start = hisi_sas_scan_start,
1720 .change_queue_depth = sas_change_queue_depth,
1721 .bios_param = sas_bios_param,
1722 .can_queue = 1,
1723 .this_id = -1,
1724 .sg_tablesize = SG_ALL,
1725 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
1726 .use_clustering = ENABLE_CLUSTERING,
1727 .eh_device_reset_handler = sas_eh_device_reset_handler,
1728 .eh_target_reset_handler = sas_eh_target_reset_handler,
1729 .target_destroy = sas_target_destroy,
1730 .ioctl = sas_ioctl,
1731 .shost_attrs = host_attrs,
1732 };
1733 struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
1734 EXPORT_SYMBOL_GPL(hisi_sas_sht);
1735
1736 static struct sas_domain_function_template hisi_sas_transport_ops = {
1737 .lldd_dev_found = hisi_sas_dev_found,
1738 .lldd_dev_gone = hisi_sas_dev_gone,
1739 .lldd_execute_task = hisi_sas_queue_command,
1740 .lldd_control_phy = hisi_sas_control_phy,
1741 .lldd_abort_task = hisi_sas_abort_task,
1742 .lldd_abort_task_set = hisi_sas_abort_task_set,
1743 .lldd_clear_aca = hisi_sas_clear_aca,
1744 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
1745 .lldd_lu_reset = hisi_sas_lu_reset,
1746 .lldd_query_task = hisi_sas_query_task,
1747 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
1748 .lldd_port_formed = hisi_sas_port_formed,
1749 .lldd_port_deformed = hisi_sas_port_deformed,
1750 .lldd_write_gpio = hisi_sas_write_gpio,
1751 };
1752
1753 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
1754 {
1755 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1756
1757 for (i = 0; i < hisi_hba->queue_count; i++) {
1758 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1759 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1760
1761 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1762 memset(hisi_hba->cmd_hdr[i], 0, s);
1763 dq->wr_point = 0;
1764
1765 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1766 memset(hisi_hba->complete_hdr[i], 0, s);
1767 cq->rd_point = 0;
1768 }
1769
1770 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
1771 memset(hisi_hba->initial_fis, 0, s);
1772
1773 s = max_command_entries * sizeof(struct hisi_sas_iost);
1774 memset(hisi_hba->iost, 0, s);
1775
1776 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1777 memset(hisi_hba->breakpoint, 0, s);
1778
1779 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1780 memset(hisi_hba->sata_breakpoint, 0, s);
1781 }
1782 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
1783
1784 int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1785 {
1786 struct device *dev = hisi_hba->dev;
1787 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1788
1789 spin_lock_init(&hisi_hba->lock);
1790 for (i = 0; i < hisi_hba->n_phy; i++) {
1791 hisi_sas_phy_init(hisi_hba, i);
1792 hisi_hba->port[i].port_attached = 0;
1793 hisi_hba->port[i].id = -1;
1794 }
1795
1796 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1797 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
1798 hisi_hba->devices[i].device_id = i;
1799 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
1800 }
1801
1802 for (i = 0; i < hisi_hba->queue_count; i++) {
1803 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1804 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1805
1806 /* Completion queue structure */
1807 cq->id = i;
1808 cq->hisi_hba = hisi_hba;
1809
1810 /* Delivery queue structure */
1811 spin_lock_init(&dq->lock);
1812 dq->id = i;
1813 dq->hisi_hba = hisi_hba;
1814
1815 /* Delivery queue */
1816 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1817 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1818 &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1819 if (!hisi_hba->cmd_hdr[i])
1820 goto err_out;
1821
1822 /* Completion queue */
1823 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1824 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1825 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1826 if (!hisi_hba->complete_hdr[i])
1827 goto err_out;
1828 }
1829
1830 s = sizeof(struct hisi_sas_slot_buf_table);
1831 hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
1832 if (!hisi_hba->buffer_pool)
1833 goto err_out;
1834
1835 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1836 hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma,
1837 GFP_KERNEL);
1838 if (!hisi_hba->itct)
1839 goto err_out;
1840
1841 memset(hisi_hba->itct, 0, s);
1842
1843 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
1844 sizeof(struct hisi_sas_slot),
1845 GFP_KERNEL);
1846 if (!hisi_hba->slot_info)
1847 goto err_out;
1848
1849 s = max_command_entries * sizeof(struct hisi_sas_iost);
1850 hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1851 GFP_KERNEL);
1852 if (!hisi_hba->iost)
1853 goto err_out;
1854
1855 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1856 hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1857 &hisi_hba->breakpoint_dma, GFP_KERNEL);
1858 if (!hisi_hba->breakpoint)
1859 goto err_out;
1860
1861 hisi_hba->slot_index_count = max_command_entries;
1862 s = hisi_hba->slot_index_count / BITS_PER_BYTE;
1863 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1864 if (!hisi_hba->slot_index_tags)
1865 goto err_out;
1866
1867 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1868 hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1869 &hisi_hba->initial_fis_dma, GFP_KERNEL);
1870 if (!hisi_hba->initial_fis)
1871 goto err_out;
1872
1873 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1874 hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1875 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1876 if (!hisi_hba->sata_breakpoint)
1877 goto err_out;
1878 hisi_sas_init_mem(hisi_hba);
1879
1880 hisi_sas_slot_index_init(hisi_hba);
1881
1882 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1883 if (!hisi_hba->wq) {
1884 dev_err(dev, "sas_alloc: failed to create workqueue\n");
1885 goto err_out;
1886 }
1887
1888 return 0;
1889 err_out:
1890 return -ENOMEM;
1891 }
1892 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
1893
1894 void hisi_sas_free(struct hisi_hba *hisi_hba)
1895 {
1896 struct device *dev = hisi_hba->dev;
1897 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1898
1899 for (i = 0; i < hisi_hba->queue_count; i++) {
1900 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1901 if (hisi_hba->cmd_hdr[i])
1902 dma_free_coherent(dev, s,
1903 hisi_hba->cmd_hdr[i],
1904 hisi_hba->cmd_hdr_dma[i]);
1905
1906 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1907 if (hisi_hba->complete_hdr[i])
1908 dma_free_coherent(dev, s,
1909 hisi_hba->complete_hdr[i],
1910 hisi_hba->complete_hdr_dma[i]);
1911 }
1912
1913 dma_pool_destroy(hisi_hba->buffer_pool);
1914
1915 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1916 if (hisi_hba->itct)
1917 dma_free_coherent(dev, s,
1918 hisi_hba->itct, hisi_hba->itct_dma);
1919
1920 s = max_command_entries * sizeof(struct hisi_sas_iost);
1921 if (hisi_hba->iost)
1922 dma_free_coherent(dev, s,
1923 hisi_hba->iost, hisi_hba->iost_dma);
1924
1925 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1926 if (hisi_hba->breakpoint)
1927 dma_free_coherent(dev, s,
1928 hisi_hba->breakpoint,
1929 hisi_hba->breakpoint_dma);
1930
1931
1932 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1933 if (hisi_hba->initial_fis)
1934 dma_free_coherent(dev, s,
1935 hisi_hba->initial_fis,
1936 hisi_hba->initial_fis_dma);
1937
1938 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1939 if (hisi_hba->sata_breakpoint)
1940 dma_free_coherent(dev, s,
1941 hisi_hba->sata_breakpoint,
1942 hisi_hba->sata_breakpoint_dma);
1943
1944 if (hisi_hba->wq)
1945 destroy_workqueue(hisi_hba->wq);
1946 }
1947 EXPORT_SYMBOL_GPL(hisi_sas_free);
1948
1949 void hisi_sas_rst_work_handler(struct work_struct *work)
1950 {
1951 struct hisi_hba *hisi_hba =
1952 container_of(work, struct hisi_hba, rst_work);
1953
1954 hisi_sas_controller_reset(hisi_hba);
1955 }
1956 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
1957
1958 void hisi_sas_sync_rst_work_handler(struct work_struct *work)
1959 {
1960 struct hisi_sas_rst *rst =
1961 container_of(work, struct hisi_sas_rst, work);
1962
1963 if (!hisi_sas_controller_reset(rst->hisi_hba))
1964 rst->done = true;
1965 complete(rst->completion);
1966 }
1967 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
1968
1969 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
1970 {
1971 struct device *dev = hisi_hba->dev;
1972 struct platform_device *pdev = hisi_hba->platform_dev;
1973 struct device_node *np = pdev ? pdev->dev.of_node : NULL;
1974 struct clk *refclk;
1975
1976 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
1977 SAS_ADDR_SIZE)) {
1978 dev_err(dev, "could not get property sas-addr\n");
1979 return -ENOENT;
1980 }
1981
1982 if (np) {
1983 /*
1984 * These properties are only required for platform device-based
1985 * controller with DT firmware.
1986 */
1987 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
1988 "hisilicon,sas-syscon");
1989 if (IS_ERR(hisi_hba->ctrl)) {
1990 dev_err(dev, "could not get syscon\n");
1991 return -ENOENT;
1992 }
1993
1994 if (device_property_read_u32(dev, "ctrl-reset-reg",
1995 &hisi_hba->ctrl_reset_reg)) {
1996 dev_err(dev,
1997 "could not get property ctrl-reset-reg\n");
1998 return -ENOENT;
1999 }
2000
2001 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
2002 &hisi_hba->ctrl_reset_sts_reg)) {
2003 dev_err(dev,
2004 "could not get property ctrl-reset-sts-reg\n");
2005 return -ENOENT;
2006 }
2007
2008 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
2009 &hisi_hba->ctrl_clock_ena_reg)) {
2010 dev_err(dev,
2011 "could not get property ctrl-clock-ena-reg\n");
2012 return -ENOENT;
2013 }
2014 }
2015
2016 refclk = devm_clk_get(dev, NULL);
2017 if (IS_ERR(refclk))
2018 dev_dbg(dev, "no ref clk property\n");
2019 else
2020 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2021
2022 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2023 dev_err(dev, "could not get property phy-count\n");
2024 return -ENOENT;
2025 }
2026
2027 if (device_property_read_u32(dev, "queue-count",
2028 &hisi_hba->queue_count)) {
2029 dev_err(dev, "could not get property queue-count\n");
2030 return -ENOENT;
2031 }
2032
2033 return 0;
2034 }
2035 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2036
2037 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2038 const struct hisi_sas_hw *hw)
2039 {
2040 struct resource *res;
2041 struct Scsi_Host *shost;
2042 struct hisi_hba *hisi_hba;
2043 struct device *dev = &pdev->dev;
2044
2045 shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
2046 if (!shost) {
2047 dev_err(dev, "scsi host alloc failed\n");
2048 return NULL;
2049 }
2050 hisi_hba = shost_priv(shost);
2051
2052 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2053 hisi_hba->hw = hw;
2054 hisi_hba->dev = dev;
2055 hisi_hba->platform_dev = pdev;
2056 hisi_hba->shost = shost;
2057 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2058
2059 timer_setup(&hisi_hba->timer, NULL, 0);
2060
2061 if (hisi_sas_get_fw_info(hisi_hba) < 0)
2062 goto err_out;
2063
2064 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
2065 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2066 dev_err(dev, "No usable DMA addressing method\n");
2067 goto err_out;
2068 }
2069
2070 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2071 hisi_hba->regs = devm_ioremap_resource(dev, res);
2072 if (IS_ERR(hisi_hba->regs))
2073 goto err_out;
2074
2075 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2076 if (res) {
2077 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2078 if (IS_ERR(hisi_hba->sgpio_regs))
2079 goto err_out;
2080 }
2081
2082 if (hisi_sas_alloc(hisi_hba, shost)) {
2083 hisi_sas_free(hisi_hba);
2084 goto err_out;
2085 }
2086
2087 return shost;
2088 err_out:
2089 scsi_host_put(shost);
2090 dev_err(dev, "shost alloc failed\n");
2091 return NULL;
2092 }
2093
2094 void hisi_sas_init_add(struct hisi_hba *hisi_hba)
2095 {
2096 int i;
2097
2098 for (i = 0; i < hisi_hba->n_phy; i++)
2099 memcpy(&hisi_hba->phy[i].dev_sas_addr,
2100 hisi_hba->sas_addr,
2101 SAS_ADDR_SIZE);
2102 }
2103 EXPORT_SYMBOL_GPL(hisi_sas_init_add);
2104
2105 int hisi_sas_probe(struct platform_device *pdev,
2106 const struct hisi_sas_hw *hw)
2107 {
2108 struct Scsi_Host *shost;
2109 struct hisi_hba *hisi_hba;
2110 struct device *dev = &pdev->dev;
2111 struct asd_sas_phy **arr_phy;
2112 struct asd_sas_port **arr_port;
2113 struct sas_ha_struct *sha;
2114 int rc, phy_nr, port_nr, i;
2115
2116 shost = hisi_sas_shost_alloc(pdev, hw);
2117 if (!shost)
2118 return -ENOMEM;
2119
2120 sha = SHOST_TO_SAS_HA(shost);
2121 hisi_hba = shost_priv(shost);
2122 platform_set_drvdata(pdev, sha);
2123
2124 phy_nr = port_nr = hisi_hba->n_phy;
2125
2126 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2127 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2128 if (!arr_phy || !arr_port) {
2129 rc = -ENOMEM;
2130 goto err_out_ha;
2131 }
2132
2133 sha->sas_phy = arr_phy;
2134 sha->sas_port = arr_port;
2135 sha->lldd_ha = hisi_hba;
2136
2137 shost->transportt = hisi_sas_stt;
2138 shost->max_id = HISI_SAS_MAX_DEVICES;
2139 shost->max_lun = ~0;
2140 shost->max_channel = 1;
2141 shost->max_cmd_len = 16;
2142 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
2143 shost->can_queue = hisi_hba->hw->max_command_entries;
2144 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
2145
2146 sha->sas_ha_name = DRV_NAME;
2147 sha->dev = hisi_hba->dev;
2148 sha->lldd_module = THIS_MODULE;
2149 sha->sas_addr = &hisi_hba->sas_addr[0];
2150 sha->num_phys = hisi_hba->n_phy;
2151 sha->core.shost = hisi_hba->shost;
2152
2153 for (i = 0; i < hisi_hba->n_phy; i++) {
2154 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2155 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2156 }
2157
2158 hisi_sas_init_add(hisi_hba);
2159
2160 rc = scsi_add_host(shost, &pdev->dev);
2161 if (rc)
2162 goto err_out_ha;
2163
2164 rc = sas_register_ha(sha);
2165 if (rc)
2166 goto err_out_register_ha;
2167
2168 rc = hisi_hba->hw->hw_init(hisi_hba);
2169 if (rc)
2170 goto err_out_register_ha;
2171
2172 scsi_scan_host(shost);
2173
2174 return 0;
2175
2176 err_out_register_ha:
2177 scsi_remove_host(shost);
2178 err_out_ha:
2179 hisi_sas_free(hisi_hba);
2180 scsi_host_put(shost);
2181 return rc;
2182 }
2183 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2184
2185 int hisi_sas_remove(struct platform_device *pdev)
2186 {
2187 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2188 struct hisi_hba *hisi_hba = sha->lldd_ha;
2189 struct Scsi_Host *shost = sha->core.shost;
2190
2191 if (timer_pending(&hisi_hba->timer))
2192 del_timer(&hisi_hba->timer);
2193
2194 sas_unregister_ha(sha);
2195 sas_remove_host(sha->core.shost);
2196
2197 hisi_sas_free(hisi_hba);
2198 scsi_host_put(shost);
2199 return 0;
2200 }
2201 EXPORT_SYMBOL_GPL(hisi_sas_remove);
2202
2203 static __init int hisi_sas_init(void)
2204 {
2205 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2206 if (!hisi_sas_stt)
2207 return -ENOMEM;
2208
2209 return 0;
2210 }
2211
2212 static __exit void hisi_sas_exit(void)
2213 {
2214 sas_release_transport(hisi_sas_stt);
2215 }
2216
2217 module_init(hisi_sas_init);
2218 module_exit(hisi_sas_exit);
2219
2220 MODULE_LICENSE("GPL");
2221 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2222 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2223 MODULE_ALIAS("platform:" DRV_NAME);