]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/scsi/hisi_sas/hisi_sas_main.c
scsi: hisi_sas: fail tmf task prep when port detached
[mirror_ubuntu-eoan-kernel.git] / drivers / scsi / hisi_sas / hisi_sas_main.c
CommitLineData
e8899fad
JG
1/*
2 * Copyright (c) 2015 Linaro Ltd.
3 * Copyright (c) 2015 Hisilicon Limited.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 */
11
12#include "hisi_sas.h"
13#define DRV_NAME "hisi_sas"
14
42e7a693
JG
15#define DEV_IS_GONE(dev) \
16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
17
cac9b2a2
JG
18static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19 u8 *lun, struct hisi_sas_tmf_task *tmf);
441c2740
JG
20static int
21hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22 struct domain_device *device,
23 int abort_flag, int tag);
cac9b2a2 24
42e7a693
JG
25static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
26{
27 return device->port->ha->lldd_ha;
28}
29
257efd1f
JG
30static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
31{
32 void *bitmap = hisi_hba->slot_index_tags;
33
34 clear_bit(slot_idx, bitmap);
35}
36
42e7a693
JG
37static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
38{
39 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
40}
41
42static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
43{
44 void *bitmap = hisi_hba->slot_index_tags;
45
46 set_bit(slot_idx, bitmap);
47}
48
49static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
50{
51 unsigned int index;
52 void *bitmap = hisi_hba->slot_index_tags;
53
54 index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count);
55 if (index >= hisi_hba->slot_index_count)
56 return -SAS_QUEUE_FULL;
57 hisi_sas_slot_index_set(hisi_hba, index);
58 *slot_idx = index;
59 return 0;
60}
61
257efd1f
JG
62static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
63{
64 int i;
65
66 for (i = 0; i < hisi_hba->slot_index_count; ++i)
67 hisi_sas_slot_index_clear(hisi_hba, i);
68}
27a3f229
JG
69
70void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
71 struct hisi_sas_slot *slot)
72{
73 struct device *dev = &hisi_hba->pdev->dev;
74
75 if (!slot->task)
76 return;
77
78 if (!sas_protocol_ata(task->task_proto))
79 if (slot->n_elem)
80 dma_unmap_sg(dev, task->scatter, slot->n_elem,
81 task->data_dir);
82
83 if (slot->command_table)
84 dma_pool_free(hisi_hba->command_table_pool,
85 slot->command_table, slot->command_table_dma);
86
87 if (slot->status_buffer)
88 dma_pool_free(hisi_hba->status_buffer_pool,
89 slot->status_buffer, slot->status_buffer_dma);
90
91 if (slot->sge_page)
92 dma_pool_free(hisi_hba->sge_page_pool, slot->sge_page,
93 slot->sge_page_dma);
94
95 list_del_init(&slot->entry);
96 task->lldd_task = NULL;
97 slot->task = NULL;
98 slot->port = NULL;
99 hisi_sas_slot_index_free(hisi_hba, slot->idx);
100 memset(slot, 0, sizeof(*slot));
101}
102EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
103
66ee999b
JG
104static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
105 struct hisi_sas_slot *slot)
106{
107 return hisi_hba->hw->prep_smp(hisi_hba, slot);
108}
109
42e7a693
JG
110static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
111 struct hisi_sas_slot *slot, int is_tmf,
112 struct hisi_sas_tmf_task *tmf)
113{
114 return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
115}
116
6f2ff1a1
JG
117static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
118 struct hisi_sas_slot *slot)
119{
120 return hisi_hba->hw->prep_stp(hisi_hba, slot);
121}
122
441c2740
JG
123static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
124 struct hisi_sas_slot *slot,
125 int device_id, int abort_flag, int tag_to_abort)
126{
127 return hisi_hba->hw->prep_abort(hisi_hba, slot,
128 device_id, abort_flag, tag_to_abort);
129}
130
cac9b2a2
JG
131/*
132 * This function will issue an abort TMF regardless of whether the
133 * task is in the sdev or not. Then it will do the task complete
134 * cleanup and callbacks.
135 */
136static void hisi_sas_slot_abort(struct work_struct *work)
137{
138 struct hisi_sas_slot *abort_slot =
139 container_of(work, struct hisi_sas_slot, abort_slot);
140 struct sas_task *task = abort_slot->task;
141 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
142 struct scsi_cmnd *cmnd = task->uldd_task;
143 struct hisi_sas_tmf_task tmf_task;
144 struct domain_device *device = task->dev;
145 struct hisi_sas_device *sas_dev = device->lldd_dev;
146 struct scsi_lun lun;
147 struct device *dev = &hisi_hba->pdev->dev;
148 int tag = abort_slot->idx;
149
150 if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
151 dev_err(dev, "cannot abort slot for non-ssp task\n");
152 goto out;
153 }
154
155 int_to_scsilun(cmnd->device->lun, &lun);
156 tmf_task.tmf = TMF_ABORT_TASK;
157 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
158
159 hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
160out:
161 /* Do cleanup for this task */
162 hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
163 if (task->task_done)
164 task->task_done(task);
165 if (sas_dev && sas_dev->running_req)
166 sas_dev->running_req--;
167}
168
42e7a693
JG
169static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
170 int is_tmf, struct hisi_sas_tmf_task *tmf,
171 int *pass)
172{
173 struct domain_device *device = task->dev;
174 struct hisi_sas_device *sas_dev = device->lldd_dev;
175 struct hisi_sas_port *port;
176 struct hisi_sas_slot *slot;
177 struct hisi_sas_cmd_hdr *cmd_hdr_base;
178 struct device *dev = &hisi_hba->pdev->dev;
179 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
180
181 if (!device->port) {
182 struct task_status_struct *ts = &task->task_status;
183
184 ts->resp = SAS_TASK_UNDELIVERED;
185 ts->stat = SAS_PHY_DOWN;
186 /*
187 * libsas will use dev->port, should
188 * not call task_done for sata
189 */
190 if (device->dev_type != SAS_SATA_DEV)
191 task->task_done(task);
192 return 0;
193 }
194
195 if (DEV_IS_GONE(sas_dev)) {
196 if (sas_dev)
197 dev_info(dev, "task prep: device %llu not ready\n",
198 sas_dev->device_id);
199 else
200 dev_info(dev, "task prep: device %016llx not ready\n",
201 SAS_ADDR(device->sas_addr));
202
203 rc = SAS_PHY_DOWN;
204 return rc;
205 }
206 port = device->port->lldd_port;
9859f24e 207 if (port && !port->port_attached) {
42e7a693
JG
208 if (sas_protocol_ata(task->task_proto)) {
209 struct task_status_struct *ts = &task->task_status;
210
211 dev_info(dev,
212 "task prep: SATA/STP port%d not attach device\n",
213 device->port->id);
214 ts->resp = SAS_TASK_COMPLETE;
215 ts->stat = SAS_PHY_DOWN;
216 task->task_done(task);
217 } else {
218 struct task_status_struct *ts = &task->task_status;
219
220 dev_info(dev,
221 "task prep: SAS port%d does not attach device\n",
222 device->port->id);
223 ts->resp = SAS_TASK_UNDELIVERED;
224 ts->stat = SAS_PHY_DOWN;
225 task->task_done(task);
226 }
227 return 0;
228 }
229
230 if (!sas_protocol_ata(task->task_proto)) {
231 if (task->num_scatter) {
232 n_elem = dma_map_sg(dev, task->scatter,
233 task->num_scatter, task->data_dir);
234 if (!n_elem) {
235 rc = -ENOMEM;
236 goto prep_out;
237 }
238 }
239 } else
240 n_elem = task->num_scatter;
241
685b6d6e
JG
242 if (hisi_hba->hw->slot_index_alloc)
243 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
244 device);
245 else
246 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
42e7a693
JG
247 if (rc)
248 goto err_out;
249 rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue,
250 &dlvry_queue_slot);
251 if (rc)
252 goto err_out_tag;
253
254 slot = &hisi_hba->slot_info[slot_idx];
255 memset(slot, 0, sizeof(struct hisi_sas_slot));
256
257 slot->idx = slot_idx;
258 slot->n_elem = n_elem;
259 slot->dlvry_queue = dlvry_queue;
260 slot->dlvry_queue_slot = dlvry_queue_slot;
261 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
262 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
263 slot->task = task;
264 slot->port = port;
265 task->lldd_task = slot;
cac9b2a2 266 INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
42e7a693
JG
267
268 slot->status_buffer = dma_pool_alloc(hisi_hba->status_buffer_pool,
269 GFP_ATOMIC,
270 &slot->status_buffer_dma);
9c9d18e7
DC
271 if (!slot->status_buffer) {
272 rc = -ENOMEM;
42e7a693 273 goto err_out_slot_buf;
9c9d18e7 274 }
42e7a693
JG
275 memset(slot->status_buffer, 0, HISI_SAS_STATUS_BUF_SZ);
276
277 slot->command_table = dma_pool_alloc(hisi_hba->command_table_pool,
278 GFP_ATOMIC,
279 &slot->command_table_dma);
9c9d18e7
DC
280 if (!slot->command_table) {
281 rc = -ENOMEM;
42e7a693 282 goto err_out_status_buf;
9c9d18e7 283 }
42e7a693
JG
284 memset(slot->command_table, 0, HISI_SAS_COMMAND_TABLE_SZ);
285 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
286
287 switch (task->task_proto) {
66ee999b
JG
288 case SAS_PROTOCOL_SMP:
289 rc = hisi_sas_task_prep_smp(hisi_hba, slot);
290 break;
42e7a693
JG
291 case SAS_PROTOCOL_SSP:
292 rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
293 break;
294 case SAS_PROTOCOL_SATA:
295 case SAS_PROTOCOL_STP:
296 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
6f2ff1a1
JG
297 rc = hisi_sas_task_prep_ata(hisi_hba, slot);
298 break;
42e7a693
JG
299 default:
300 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
301 task->task_proto);
302 rc = -EINVAL;
303 break;
304 }
305
306 if (rc) {
307 dev_err(dev, "task prep: rc = 0x%x\n", rc);
308 if (slot->sge_page)
309 goto err_out_sge;
310 goto err_out_command_table;
311 }
312
313 list_add_tail(&slot->entry, &port->list);
314 spin_lock(&task->task_state_lock);
315 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
316 spin_unlock(&task->task_state_lock);
317
318 hisi_hba->slot_prep = slot;
319
320 sas_dev->running_req++;
321 ++(*pass);
322
9c9d18e7 323 return 0;
42e7a693
JG
324
325err_out_sge:
326 dma_pool_free(hisi_hba->sge_page_pool, slot->sge_page,
327 slot->sge_page_dma);
328err_out_command_table:
329 dma_pool_free(hisi_hba->command_table_pool, slot->command_table,
330 slot->command_table_dma);
331err_out_status_buf:
332 dma_pool_free(hisi_hba->status_buffer_pool, slot->status_buffer,
333 slot->status_buffer_dma);
334err_out_slot_buf:
335 /* Nothing to be done */
336err_out_tag:
337 hisi_sas_slot_index_free(hisi_hba, slot_idx);
338err_out:
339 dev_err(dev, "task prep: failed[%d]!\n", rc);
340 if (!sas_protocol_ata(task->task_proto))
341 if (n_elem)
342 dma_unmap_sg(dev, task->scatter, n_elem,
343 task->data_dir);
344prep_out:
345 return rc;
346}
347
348static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
349 int is_tmf, struct hisi_sas_tmf_task *tmf)
350{
351 u32 rc;
352 u32 pass = 0;
353 unsigned long flags;
354 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
355 struct device *dev = &hisi_hba->pdev->dev;
356
357 /* protect task_prep and start_delivery sequence */
358 spin_lock_irqsave(&hisi_hba->lock, flags);
359 rc = hisi_sas_task_prep(task, hisi_hba, is_tmf, tmf, &pass);
360 if (rc)
361 dev_err(dev, "task exec: failed[%d]!\n", rc);
362
363 if (likely(pass))
364 hisi_hba->hw->start_delivery(hisi_hba);
365 spin_unlock_irqrestore(&hisi_hba->lock, flags);
366
367 return rc;
368}
257efd1f 369
66139921
JG
370static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
371{
372 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
373 struct asd_sas_phy *sas_phy = &phy->sas_phy;
374 struct sas_ha_struct *sas_ha;
375
376 if (!phy->phy_attached)
377 return;
378
379 sas_ha = &hisi_hba->sha;
380 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
381
382 if (sas_phy->phy) {
383 struct sas_phy *sphy = sas_phy->phy;
384
385 sphy->negotiated_linkrate = sas_phy->linkrate;
386 sphy->minimum_linkrate = phy->minimum_linkrate;
387 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
388 sphy->maximum_linkrate = phy->maximum_linkrate;
389 }
390
391 if (phy->phy_type & PORT_TYPE_SAS) {
392 struct sas_identify_frame *id;
393
394 id = (struct sas_identify_frame *)phy->frame_rcvd;
395 id->dev_type = phy->identify.device_type;
396 id->initiator_bits = SAS_PROTOCOL_ALL;
397 id->target_bits = phy->identify.target_port_protocols;
398 } else if (phy->phy_type & PORT_TYPE_SATA) {
399 /*Nothing*/
400 }
401
402 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
403 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
404}
405
abda97c2
JG
406static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
407{
408 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
409 struct hisi_sas_device *sas_dev = NULL;
410 int i;
411
412 spin_lock(&hisi_hba->lock);
413 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
414 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
415 hisi_hba->devices[i].device_id = i;
416 sas_dev = &hisi_hba->devices[i];
417 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
418 sas_dev->dev_type = device->dev_type;
419 sas_dev->hisi_hba = hisi_hba;
420 sas_dev->sas_device = device;
421 break;
422 }
423 }
424 spin_unlock(&hisi_hba->lock);
425
426 return sas_dev;
427}
428
429static int hisi_sas_dev_found(struct domain_device *device)
430{
431 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
432 struct domain_device *parent_dev = device->parent;
433 struct hisi_sas_device *sas_dev;
434 struct device *dev = &hisi_hba->pdev->dev;
435
685b6d6e
JG
436 if (hisi_hba->hw->alloc_dev)
437 sas_dev = hisi_hba->hw->alloc_dev(device);
438 else
439 sas_dev = hisi_sas_alloc_dev(device);
abda97c2
JG
440 if (!sas_dev) {
441 dev_err(dev, "fail alloc dev: max support %d devices\n",
442 HISI_SAS_MAX_DEVICES);
443 return -EINVAL;
444 }
445
446 device->lldd_dev = sas_dev;
447 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
448
449 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
450 int phy_no;
451 u8 phy_num = parent_dev->ex_dev.num_phys;
452 struct ex_phy *phy;
453
454 for (phy_no = 0; phy_no < phy_num; phy_no++) {
455 phy = &parent_dev->ex_dev.ex_phy[phy_no];
456 if (SAS_ADDR(phy->attached_sas_addr) ==
457 SAS_ADDR(device->sas_addr)) {
458 sas_dev->attached_phy = phy_no;
459 break;
460 }
461 }
462
463 if (phy_no == phy_num) {
464 dev_info(dev, "dev found: no attached "
465 "dev:%016llx at ex:%016llx\n",
466 SAS_ADDR(device->sas_addr),
467 SAS_ADDR(parent_dev->sas_addr));
468 return -EINVAL;
469 }
470 }
471
472 return 0;
473}
474
31eec8a6
JG
475static int hisi_sas_slave_configure(struct scsi_device *sdev)
476{
477 struct domain_device *dev = sdev_to_domain_dev(sdev);
478 int ret = sas_slave_configure(sdev);
479
480 if (ret)
481 return ret;
482 if (!dev_is_sata(dev))
483 sas_change_queue_depth(sdev, 64);
484
485 return 0;
486}
487
701f75ec
JG
488static void hisi_sas_scan_start(struct Scsi_Host *shost)
489{
490 struct hisi_hba *hisi_hba = shost_priv(shost);
491 int i;
492
493 for (i = 0; i < hisi_hba->n_phy; ++i)
494 hisi_sas_bytes_dmaed(hisi_hba, i);
495
496 hisi_hba->scan_finished = 1;
497}
498
499static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
500{
501 struct hisi_hba *hisi_hba = shost_priv(shost);
502 struct sas_ha_struct *sha = &hisi_hba->sha;
503
504 if (hisi_hba->scan_finished == 0)
505 return 0;
506
507 sas_drain_work(sha);
508 return 1;
509}
510
66139921
JG
511static void hisi_sas_phyup_work(struct work_struct *work)
512{
513 struct hisi_sas_phy *phy =
514 container_of(work, struct hisi_sas_phy, phyup_ws);
515 struct hisi_hba *hisi_hba = phy->hisi_hba;
516 struct asd_sas_phy *sas_phy = &phy->sas_phy;
517 int phy_no = sas_phy->id;
518
519 hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
520 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
521}
976867e6
JG
522
523static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
524{
525 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
526 struct asd_sas_phy *sas_phy = &phy->sas_phy;
527
528 phy->hisi_hba = hisi_hba;
529 phy->port = NULL;
530 init_timer(&phy->timer);
531 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
532 sas_phy->class = SAS;
533 sas_phy->iproto = SAS_PROTOCOL_ALL;
534 sas_phy->tproto = 0;
535 sas_phy->type = PHY_TYPE_PHYSICAL;
536 sas_phy->role = PHY_ROLE_INITIATOR;
537 sas_phy->oob_mode = OOB_NOT_CONNECTED;
538 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
539 sas_phy->id = phy_no;
540 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
541 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
542 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
543 sas_phy->lldd_phy = phy;
66139921
JG
544
545 INIT_WORK(&phy->phyup_ws, hisi_sas_phyup_work);
976867e6
JG
546}
547
184a4635
JG
548static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
549{
550 struct sas_ha_struct *sas_ha = sas_phy->ha;
551 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
552 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
553 struct asd_sas_port *sas_port = sas_phy->port;
554 struct hisi_sas_port *port = &hisi_hba->port[sas_phy->id];
555 unsigned long flags;
556
557 if (!sas_port)
558 return;
559
560 spin_lock_irqsave(&hisi_hba->lock, flags);
561 port->port_attached = 1;
562 port->id = phy->port_id;
563 phy->port = port;
564 sas_port->lldd_port = port;
565 spin_unlock_irqrestore(&hisi_hba->lock, flags);
566}
567
568static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, int phy_no,
569 struct domain_device *device)
570{
571 struct hisi_sas_phy *phy;
572 struct hisi_sas_port *port;
573 struct hisi_sas_slot *slot, *slot2;
574 struct device *dev = &hisi_hba->pdev->dev;
575
576 phy = &hisi_hba->phy[phy_no];
577 port = phy->port;
578 if (!port)
579 return;
580
581 list_for_each_entry_safe(slot, slot2, &port->list, entry) {
582 struct sas_task *task;
583
584 task = slot->task;
585 if (device && task->dev != device)
586 continue;
587
588 dev_info(dev, "Release slot [%d:%d], task [%p]:\n",
589 slot->dlvry_queue, slot->dlvry_queue_slot, task);
590 hisi_hba->hw->slot_complete(hisi_hba, slot, 1);
591 }
592}
593
594static void hisi_sas_port_notify_deformed(struct asd_sas_phy *sas_phy)
595{
596 struct domain_device *device;
597 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
598 struct asd_sas_port *sas_port = sas_phy->port;
599
600 list_for_each_entry(device, &sas_port->dev_list, dev_list_node)
601 hisi_sas_do_release_task(phy->hisi_hba, sas_phy->id, device);
602}
603
604static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
605 struct domain_device *device)
606{
607 struct asd_sas_port *port = device->port;
608 struct asd_sas_phy *sas_phy;
609
610 list_for_each_entry(sas_phy, &port->phy_list, port_phy_el)
611 hisi_sas_do_release_task(hisi_hba, sas_phy->id, device);
612}
613
abda97c2
JG
614static void hisi_sas_dev_gone(struct domain_device *device)
615{
616 struct hisi_sas_device *sas_dev = device->lldd_dev;
617 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
618 struct device *dev = &hisi_hba->pdev->dev;
619 u64 dev_id = sas_dev->device_id;
620
621 dev_info(dev, "found dev[%lld:%x] is gone\n",
622 sas_dev->device_id, sas_dev->dev_type);
623
40f2702b
JG
624 hisi_sas_internal_task_abort(hisi_hba, device,
625 HISI_SAS_INT_ABT_DEV, 0);
626
abda97c2
JG
627 hisi_hba->hw->free_device(hisi_hba, sas_dev);
628 device->lldd_dev = NULL;
629 memset(sas_dev, 0, sizeof(*sas_dev));
630 sas_dev->device_id = dev_id;
631 sas_dev->dev_type = SAS_PHY_UNUSED;
632 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
633}
42e7a693
JG
634
635static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
636{
637 return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
638}
639
e4189d53
JG
640static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
641 void *funcdata)
642{
643 struct sas_ha_struct *sas_ha = sas_phy->ha;
644 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
645 int phy_no = sas_phy->id;
646
647 switch (func) {
648 case PHY_FUNC_HARD_RESET:
649 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
650 break;
651
652 case PHY_FUNC_LINK_RESET:
653 hisi_hba->hw->phy_enable(hisi_hba, phy_no);
654 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
655 break;
656
657 case PHY_FUNC_DISABLE:
658 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
659 break;
660
661 case PHY_FUNC_SET_LINK_RATE:
662 case PHY_FUNC_RELEASE_SPINUP_HOLD:
663 default:
664 return -EOPNOTSUPP;
665 }
666 return 0;
667}
184a4635 668
0efff300
JG
669static void hisi_sas_task_done(struct sas_task *task)
670{
671 if (!del_timer(&task->slow_task->timer))
672 return;
673 complete(&task->slow_task->completion);
674}
675
676static void hisi_sas_tmf_timedout(unsigned long data)
677{
678 struct sas_task *task = (struct sas_task *)data;
679
680 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
681 complete(&task->slow_task->completion);
682}
683
684#define TASK_TIMEOUT 20
685#define TASK_RETRY 3
686static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
687 void *parameter, u32 para_len,
688 struct hisi_sas_tmf_task *tmf)
689{
690 struct hisi_sas_device *sas_dev = device->lldd_dev;
691 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
692 struct device *dev = &hisi_hba->pdev->dev;
693 struct sas_task *task;
694 int res, retry;
695
696 for (retry = 0; retry < TASK_RETRY; retry++) {
697 task = sas_alloc_slow_task(GFP_KERNEL);
698 if (!task)
699 return -ENOMEM;
700
701 task->dev = device;
702 task->task_proto = device->tproto;
703
704 memcpy(&task->ssp_task, parameter, para_len);
705 task->task_done = hisi_sas_task_done;
706
707 task->slow_task->timer.data = (unsigned long) task;
708 task->slow_task->timer.function = hisi_sas_tmf_timedout;
709 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
710 add_timer(&task->slow_task->timer);
711
712 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
713
714 if (res) {
715 del_timer(&task->slow_task->timer);
716 dev_err(dev, "abort tmf: executing internal task failed: %d\n",
717 res);
718 goto ex_err;
719 }
720
721 wait_for_completion(&task->slow_task->completion);
722 res = TMF_RESP_FUNC_FAILED;
723 /* Even TMF timed out, return direct. */
724 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
725 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
726 dev_err(dev, "abort tmf: TMF task[%d] timeout\n",
727 tmf->tag_of_task_to_be_managed);
728 if (task->lldd_task) {
729 struct hisi_sas_slot *slot =
730 task->lldd_task;
731
732 hisi_sas_slot_task_free(hisi_hba,
733 task, slot);
734 }
735
736 goto ex_err;
737 }
738 }
739
740 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1af1b808 741 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
0efff300
JG
742 res = TMF_RESP_FUNC_COMPLETE;
743 break;
744 }
745
746 if (task->task_status.resp == SAS_TASK_COMPLETE &&
747 task->task_status.stat == SAS_DATA_UNDERRUN) {
748 /* no error, but return the number of bytes of
749 * underrun
750 */
751 dev_warn(dev, "abort tmf: task to dev %016llx "
752 "resp: 0x%x sts 0x%x underrun\n",
753 SAS_ADDR(device->sas_addr),
754 task->task_status.resp,
755 task->task_status.stat);
756 res = task->task_status.residual;
757 break;
758 }
759
760 if (task->task_status.resp == SAS_TASK_COMPLETE &&
761 task->task_status.stat == SAS_DATA_OVERRUN) {
762 dev_warn(dev, "abort tmf: blocked task error\n");
763 res = -EMSGSIZE;
764 break;
765 }
766
767 dev_warn(dev, "abort tmf: task to dev "
768 "%016llx resp: 0x%x status 0x%x\n",
769 SAS_ADDR(device->sas_addr), task->task_status.resp,
770 task->task_status.stat);
771 sas_free_task(task);
772 task = NULL;
773 }
774ex_err:
775 WARN_ON(retry == TASK_RETRY);
776 sas_free_task(task);
777 return res;
778}
779
780static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
781 u8 *lun, struct hisi_sas_tmf_task *tmf)
782{
783 struct sas_ssp_task ssp_task;
784
785 if (!(device->tproto & SAS_PROTOCOL_SSP))
786 return TMF_RESP_FUNC_ESUPP;
787
788 memcpy(ssp_task.LUN, lun, 8);
789
790 return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
791 sizeof(ssp_task), tmf);
792}
793
794static int hisi_sas_abort_task(struct sas_task *task)
795{
796 struct scsi_lun lun;
797 struct hisi_sas_tmf_task tmf_task;
798 struct domain_device *device = task->dev;
799 struct hisi_sas_device *sas_dev = device->lldd_dev;
800 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
801 struct device *dev = &hisi_hba->pdev->dev;
802 int rc = TMF_RESP_FUNC_FAILED;
803 unsigned long flags;
804
805 if (!sas_dev) {
806 dev_warn(dev, "Device has been removed\n");
807 return TMF_RESP_FUNC_FAILED;
808 }
809
810 spin_lock_irqsave(&task->task_state_lock, flags);
811 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
812 spin_unlock_irqrestore(&task->task_state_lock, flags);
813 rc = TMF_RESP_FUNC_COMPLETE;
814 goto out;
815 }
816
817 spin_unlock_irqrestore(&task->task_state_lock, flags);
818 sas_dev->dev_status = HISI_SAS_DEV_EH;
819 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
820 struct scsi_cmnd *cmnd = task->uldd_task;
821 struct hisi_sas_slot *slot = task->lldd_task;
822 u32 tag = slot->idx;
823
824 int_to_scsilun(cmnd->device->lun, &lun);
825 tmf_task.tmf = TMF_ABORT_TASK;
826 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
827
828 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
829 &tmf_task);
830
831 /* if successful, clear the task and callback forwards.*/
832 if (rc == TMF_RESP_FUNC_COMPLETE) {
833 if (task->lldd_task) {
834 struct hisi_sas_slot *slot;
835
836 slot = &hisi_hba->slot_info
837 [tmf_task.tag_of_task_to_be_managed];
838 spin_lock_irqsave(&hisi_hba->lock, flags);
839 hisi_hba->hw->slot_complete(hisi_hba, slot, 1);
840 spin_unlock_irqrestore(&hisi_hba->lock, flags);
841 }
842 }
843
dc8a49ca
JG
844 hisi_sas_internal_task_abort(hisi_hba, device,
845 HISI_SAS_INT_ABT_CMD, tag);
0efff300
JG
846 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
847 task->task_proto & SAS_PROTOCOL_STP) {
848 if (task->dev->dev_type == SAS_SATA_DEV) {
dc8a49ca
JG
849 hisi_sas_internal_task_abort(hisi_hba, device,
850 HISI_SAS_INT_ABT_DEV, 0);
0efff300 851 rc = TMF_RESP_FUNC_COMPLETE;
0efff300 852 }
dc8a49ca
JG
853 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
854 /* SMP */
855 struct hisi_sas_slot *slot = task->lldd_task;
856 u32 tag = slot->idx;
0efff300 857
dc8a49ca
JG
858 hisi_sas_internal_task_abort(hisi_hba, device,
859 HISI_SAS_INT_ABT_CMD, tag);
0efff300
JG
860 }
861
862out:
863 if (rc != TMF_RESP_FUNC_COMPLETE)
864 dev_notice(dev, "abort task: rc=%d\n", rc);
865 return rc;
866}
867
868static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
869{
870 struct hisi_sas_tmf_task tmf_task;
871 int rc = TMF_RESP_FUNC_FAILED;
872
873 tmf_task.tmf = TMF_ABORT_TASK_SET;
874 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
875
876 return rc;
877}
878
879static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
880{
881 int rc = TMF_RESP_FUNC_FAILED;
882 struct hisi_sas_tmf_task tmf_task;
883
884 tmf_task.tmf = TMF_CLEAR_ACA;
885 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
886
887 return rc;
888}
889
890static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
891{
892 struct sas_phy *phy = sas_get_local_phy(device);
893 int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
894 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
895 rc = sas_phy_reset(phy, reset_type);
896 sas_put_local_phy(phy);
897 msleep(2000);
898 return rc;
899}
900
901static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
902{
903 struct hisi_sas_device *sas_dev = device->lldd_dev;
904 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
905 unsigned long flags;
906 int rc = TMF_RESP_FUNC_FAILED;
907
908 if (sas_dev->dev_status != HISI_SAS_DEV_EH)
909 return TMF_RESP_FUNC_FAILED;
910 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
911
912 rc = hisi_sas_debug_I_T_nexus_reset(device);
913
914 spin_lock_irqsave(&hisi_hba->lock, flags);
915 hisi_sas_release_task(hisi_hba, device);
916 spin_unlock_irqrestore(&hisi_hba->lock, flags);
917
918 return 0;
919}
920
921static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
922{
923 struct hisi_sas_tmf_task tmf_task;
924 struct hisi_sas_device *sas_dev = device->lldd_dev;
925 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
926 struct device *dev = &hisi_hba->pdev->dev;
927 unsigned long flags;
928 int rc = TMF_RESP_FUNC_FAILED;
929
930 tmf_task.tmf = TMF_LU_RESET;
931 sas_dev->dev_status = HISI_SAS_DEV_EH;
932 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
933 if (rc == TMF_RESP_FUNC_COMPLETE) {
934 spin_lock_irqsave(&hisi_hba->lock, flags);
935 hisi_sas_release_task(hisi_hba, device);
936 spin_unlock_irqrestore(&hisi_hba->lock, flags);
937 }
938
939 /* If failed, fall-through I_T_Nexus reset */
940 dev_err(dev, "lu_reset: for device[%llx]:rc= %d\n",
941 sas_dev->device_id, rc);
942 return rc;
943}
944
945static int hisi_sas_query_task(struct sas_task *task)
946{
947 struct scsi_lun lun;
948 struct hisi_sas_tmf_task tmf_task;
949 int rc = TMF_RESP_FUNC_FAILED;
950
951 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
952 struct scsi_cmnd *cmnd = task->uldd_task;
953 struct domain_device *device = task->dev;
954 struct hisi_sas_slot *slot = task->lldd_task;
955 u32 tag = slot->idx;
956
957 int_to_scsilun(cmnd->device->lun, &lun);
958 tmf_task.tmf = TMF_QUERY_TASK;
959 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
960
961 rc = hisi_sas_debug_issue_ssp_tmf(device,
962 lun.scsi_lun,
963 &tmf_task);
964 switch (rc) {
965 /* The task is still in Lun, release it then */
966 case TMF_RESP_FUNC_SUCC:
967 /* The task is not in Lun or failed, reset the phy */
968 case TMF_RESP_FUNC_FAILED:
969 case TMF_RESP_FUNC_COMPLETE:
970 break;
971 }
972 }
973 return rc;
974}
975
441c2740
JG
976static int
977hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
978 struct sas_task *task, int abort_flag,
979 int task_tag)
980{
981 struct domain_device *device = task->dev;
982 struct hisi_sas_device *sas_dev = device->lldd_dev;
983 struct device *dev = &hisi_hba->pdev->dev;
984 struct hisi_sas_port *port;
985 struct hisi_sas_slot *slot;
986 struct hisi_sas_cmd_hdr *cmd_hdr_base;
987 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
988
989 if (!device->port)
990 return -1;
991
992 port = device->port->lldd_port;
993
994 /* simply get a slot and send abort command */
995 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
996 if (rc)
997 goto err_out;
998 rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue,
999 &dlvry_queue_slot);
1000 if (rc)
1001 goto err_out_tag;
1002
1003 slot = &hisi_hba->slot_info[slot_idx];
1004 memset(slot, 0, sizeof(struct hisi_sas_slot));
1005
1006 slot->idx = slot_idx;
1007 slot->n_elem = n_elem;
1008 slot->dlvry_queue = dlvry_queue;
1009 slot->dlvry_queue_slot = dlvry_queue_slot;
1010 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1011 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1012 slot->task = task;
1013 slot->port = port;
1014 task->lldd_task = slot;
1015
1016 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1017
1018 rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1019 abort_flag, task_tag);
1020 if (rc)
1021 goto err_out_tag;
1022
1023 /* Port structure is static for the HBA, so
1024 * even if the port is deformed it is ok
1025 * to reference.
1026 */
1027 list_add_tail(&slot->entry, &port->list);
1028 spin_lock(&task->task_state_lock);
1029 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1030 spin_unlock(&task->task_state_lock);
1031
1032 hisi_hba->slot_prep = slot;
1033
1034 sas_dev->running_req++;
1035 /* send abort command to our chip */
1036 hisi_hba->hw->start_delivery(hisi_hba);
1037
1038 return 0;
1039
1040err_out_tag:
1041 hisi_sas_slot_index_free(hisi_hba, slot_idx);
1042err_out:
1043 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1044
1045 return rc;
1046}
1047
1048/**
1049 * hisi_sas_internal_task_abort -- execute an internal
1050 * abort command for single IO command or a device
1051 * @hisi_hba: host controller struct
1052 * @device: domain device
1053 * @abort_flag: mode of operation, device or single IO
1054 * @tag: tag of IO to be aborted (only relevant to single
1055 * IO mode)
1056 */
1057static int
1058hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1059 struct domain_device *device,
1060 int abort_flag, int tag)
1061{
1062 struct sas_task *task;
1063 struct hisi_sas_device *sas_dev = device->lldd_dev;
1064 struct device *dev = &hisi_hba->pdev->dev;
1065 int res;
1066 unsigned long flags;
1067
1068 if (!hisi_hba->hw->prep_abort)
1069 return -EOPNOTSUPP;
1070
1071 task = sas_alloc_slow_task(GFP_KERNEL);
1072 if (!task)
1073 return -ENOMEM;
1074
1075 task->dev = device;
1076 task->task_proto = device->tproto;
1077 task->task_done = hisi_sas_task_done;
1078 task->slow_task->timer.data = (unsigned long)task;
1079 task->slow_task->timer.function = hisi_sas_tmf_timedout;
1080 task->slow_task->timer.expires = jiffies + 20*HZ;
1081 add_timer(&task->slow_task->timer);
1082
1083 /* Lock as we are alloc'ing a slot, which cannot be interrupted */
1084 spin_lock_irqsave(&hisi_hba->lock, flags);
1085 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1086 task, abort_flag, tag);
1087 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1088 if (res) {
1089 del_timer(&task->slow_task->timer);
1090 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1091 res);
1092 goto exit;
1093 }
1094 wait_for_completion(&task->slow_task->completion);
1095 res = TMF_RESP_FUNC_FAILED;
1096
1097 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1098 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1099 res = TMF_RESP_FUNC_COMPLETE;
1100 goto exit;
1101 }
1102
1103 /* TMF timed out, return direct. */
1104 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1105 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1106 dev_err(dev, "internal task abort: timeout.\n");
1107 if (task->lldd_task) {
1108 struct hisi_sas_slot *slot = task->lldd_task;
1109
1110 hisi_sas_slot_task_free(hisi_hba, task, slot);
1111 }
1112 }
1113 }
1114
1115exit:
1116 dev_info(dev, "internal task abort: task to dev %016llx task=%p "
1117 "resp: 0x%x sts 0x%x\n",
1118 SAS_ADDR(device->sas_addr),
1119 task,
1120 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1121 task->task_status.stat);
1122 sas_free_task(task);
1123
1124 return res;
1125}
1126
184a4635
JG
1127static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1128{
1129 hisi_sas_port_notify_formed(sas_phy);
1130}
1131
1132static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
1133{
1134 hisi_sas_port_notify_deformed(sas_phy);
1135}
1136
1137static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1138{
1139 phy->phy_attached = 0;
1140 phy->phy_type = 0;
1141 phy->port = NULL;
1142}
1143
1144void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1145{
1146 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1147 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1148 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1149
1150 if (rdy) {
1151 /* Phy down but ready */
1152 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1153 hisi_sas_port_notify_formed(sas_phy);
1154 } else {
1155 struct hisi_sas_port *port = phy->port;
1156
1157 /* Phy down and not ready */
1158 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1159 sas_phy_disconnected(sas_phy);
1160
1161 if (port) {
1162 if (phy->phy_type & PORT_TYPE_SAS) {
1163 int port_id = port->id;
1164
1165 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1166 port_id))
1167 port->port_attached = 0;
1168 } else if (phy->phy_type & PORT_TYPE_SATA)
1169 port->port_attached = 0;
1170 }
1171 hisi_sas_phy_disconnected(phy);
1172 }
1173}
1174EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1175
e8899fad
JG
1176static struct scsi_transport_template *hisi_sas_stt;
1177
7eb7869f
JG
1178static struct scsi_host_template hisi_sas_sht = {
1179 .module = THIS_MODULE,
1180 .name = DRV_NAME,
1181 .queuecommand = sas_queuecommand,
1182 .target_alloc = sas_target_alloc,
31eec8a6 1183 .slave_configure = hisi_sas_slave_configure,
701f75ec
JG
1184 .scan_finished = hisi_sas_scan_finished,
1185 .scan_start = hisi_sas_scan_start,
7eb7869f
JG
1186 .change_queue_depth = sas_change_queue_depth,
1187 .bios_param = sas_bios_param,
1188 .can_queue = 1,
1189 .this_id = -1,
1190 .sg_tablesize = SG_ALL,
1191 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
1192 .use_clustering = ENABLE_CLUSTERING,
1193 .eh_device_reset_handler = sas_eh_device_reset_handler,
1194 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
1195 .target_destroy = sas_target_destroy,
1196 .ioctl = sas_ioctl,
1197};
1198
e8899fad 1199static struct sas_domain_function_template hisi_sas_transport_ops = {
abda97c2
JG
1200 .lldd_dev_found = hisi_sas_dev_found,
1201 .lldd_dev_gone = hisi_sas_dev_gone,
42e7a693 1202 .lldd_execute_task = hisi_sas_queue_command,
e4189d53 1203 .lldd_control_phy = hisi_sas_control_phy,
0efff300
JG
1204 .lldd_abort_task = hisi_sas_abort_task,
1205 .lldd_abort_task_set = hisi_sas_abort_task_set,
1206 .lldd_clear_aca = hisi_sas_clear_aca,
1207 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
1208 .lldd_lu_reset = hisi_sas_lu_reset,
1209 .lldd_query_task = hisi_sas_query_task,
184a4635
JG
1210 .lldd_port_formed = hisi_sas_port_formed,
1211 .lldd_port_deformed = hisi_sas_port_deformed,
e8899fad
JG
1212};
1213
6be6de18
JG
1214static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1215{
6be6de18
JG
1216 struct platform_device *pdev = hisi_hba->pdev;
1217 struct device *dev = &pdev->dev;
a8d547bd 1218 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
6be6de18 1219
fa42d80d 1220 spin_lock_init(&hisi_hba->lock);
976867e6
JG
1221 for (i = 0; i < hisi_hba->n_phy; i++) {
1222 hisi_sas_phy_init(hisi_hba, i);
1223 hisi_hba->port[i].port_attached = 0;
1224 hisi_hba->port[i].id = -1;
1225 INIT_LIST_HEAD(&hisi_hba->port[i].list);
1226 }
1227
af740dbe
JG
1228 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1229 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
1230 hisi_hba->devices[i].device_id = i;
1231 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
1232 }
1233
6be6de18 1234 for (i = 0; i < hisi_hba->queue_count; i++) {
9101a079
JG
1235 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1236
1237 /* Completion queue structure */
1238 cq->id = i;
1239 cq->hisi_hba = hisi_hba;
1240
6be6de18
JG
1241 /* Delivery queue */
1242 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1243 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1244 &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1245 if (!hisi_hba->cmd_hdr[i])
1246 goto err_out;
1247 memset(hisi_hba->cmd_hdr[i], 0, s);
1248
1249 /* Completion queue */
1250 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1251 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1252 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1253 if (!hisi_hba->complete_hdr[i])
1254 goto err_out;
1255 memset(hisi_hba->complete_hdr[i], 0, s);
1256 }
1257
1258 s = HISI_SAS_STATUS_BUF_SZ;
1259 hisi_hba->status_buffer_pool = dma_pool_create("status_buffer",
1260 dev, s, 16, 0);
1261 if (!hisi_hba->status_buffer_pool)
1262 goto err_out;
1263
1264 s = HISI_SAS_COMMAND_TABLE_SZ;
1265 hisi_hba->command_table_pool = dma_pool_create("command_table",
1266 dev, s, 16, 0);
1267 if (!hisi_hba->command_table_pool)
1268 goto err_out;
1269
1270 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1271 hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma,
1272 GFP_KERNEL);
1273 if (!hisi_hba->itct)
1274 goto err_out;
1275
1276 memset(hisi_hba->itct, 0, s);
1277
a8d547bd 1278 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
6be6de18
JG
1279 sizeof(struct hisi_sas_slot),
1280 GFP_KERNEL);
1281 if (!hisi_hba->slot_info)
1282 goto err_out;
1283
a8d547bd 1284 s = max_command_entries * sizeof(struct hisi_sas_iost);
6be6de18
JG
1285 hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1286 GFP_KERNEL);
1287 if (!hisi_hba->iost)
1288 goto err_out;
1289
1290 memset(hisi_hba->iost, 0, s);
1291
a8d547bd 1292 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
6be6de18
JG
1293 hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1294 &hisi_hba->breakpoint_dma, GFP_KERNEL);
1295 if (!hisi_hba->breakpoint)
1296 goto err_out;
1297
1298 memset(hisi_hba->breakpoint, 0, s);
1299
a8d547bd 1300 hisi_hba->slot_index_count = max_command_entries;
257efd1f
JG
1301 s = hisi_hba->slot_index_count / sizeof(unsigned long);
1302 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1303 if (!hisi_hba->slot_index_tags)
1304 goto err_out;
1305
6be6de18
JG
1306 hisi_hba->sge_page_pool = dma_pool_create("status_sge", dev,
1307 sizeof(struct hisi_sas_sge_page), 16, 0);
1308 if (!hisi_hba->sge_page_pool)
1309 goto err_out;
1310
1311 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1312 hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1313 &hisi_hba->initial_fis_dma, GFP_KERNEL);
1314 if (!hisi_hba->initial_fis)
1315 goto err_out;
1316 memset(hisi_hba->initial_fis, 0, s);
1317
a8d547bd 1318 s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
6be6de18
JG
1319 hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1320 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1321 if (!hisi_hba->sata_breakpoint)
1322 goto err_out;
1323 memset(hisi_hba->sata_breakpoint, 0, s);
1324
257efd1f
JG
1325 hisi_sas_slot_index_init(hisi_hba);
1326
7e9080e1
JG
1327 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1328 if (!hisi_hba->wq) {
1329 dev_err(dev, "sas_alloc: failed to create workqueue\n");
1330 goto err_out;
1331 }
1332
6be6de18
JG
1333 return 0;
1334err_out:
1335 return -ENOMEM;
1336}
1337
89d53322
JG
1338static void hisi_sas_free(struct hisi_hba *hisi_hba)
1339{
1340 struct device *dev = &hisi_hba->pdev->dev;
a8d547bd 1341 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
89d53322
JG
1342
1343 for (i = 0; i < hisi_hba->queue_count; i++) {
1344 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1345 if (hisi_hba->cmd_hdr[i])
1346 dma_free_coherent(dev, s,
1347 hisi_hba->cmd_hdr[i],
1348 hisi_hba->cmd_hdr_dma[i]);
1349
1350 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1351 if (hisi_hba->complete_hdr[i])
1352 dma_free_coherent(dev, s,
1353 hisi_hba->complete_hdr[i],
1354 hisi_hba->complete_hdr_dma[i]);
1355 }
1356
1357 dma_pool_destroy(hisi_hba->status_buffer_pool);
1358 dma_pool_destroy(hisi_hba->command_table_pool);
1359 dma_pool_destroy(hisi_hba->sge_page_pool);
1360
1361 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1362 if (hisi_hba->itct)
1363 dma_free_coherent(dev, s,
1364 hisi_hba->itct, hisi_hba->itct_dma);
1365
a8d547bd 1366 s = max_command_entries * sizeof(struct hisi_sas_iost);
89d53322
JG
1367 if (hisi_hba->iost)
1368 dma_free_coherent(dev, s,
1369 hisi_hba->iost, hisi_hba->iost_dma);
1370
a8d547bd 1371 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
89d53322
JG
1372 if (hisi_hba->breakpoint)
1373 dma_free_coherent(dev, s,
1374 hisi_hba->breakpoint,
1375 hisi_hba->breakpoint_dma);
1376
1377
1378 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1379 if (hisi_hba->initial_fis)
1380 dma_free_coherent(dev, s,
1381 hisi_hba->initial_fis,
1382 hisi_hba->initial_fis_dma);
1383
a8d547bd 1384 s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
89d53322
JG
1385 if (hisi_hba->sata_breakpoint)
1386 dma_free_coherent(dev, s,
1387 hisi_hba->sata_breakpoint,
1388 hisi_hba->sata_breakpoint_dma);
1389
7e9080e1
JG
1390 if (hisi_hba->wq)
1391 destroy_workqueue(hisi_hba->wq);
89d53322 1392}
6be6de18 1393
7eb7869f
JG
1394static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
1395 const struct hisi_sas_hw *hw)
1396{
e26b2f40 1397 struct resource *res;
7eb7869f
JG
1398 struct Scsi_Host *shost;
1399 struct hisi_hba *hisi_hba;
1400 struct device *dev = &pdev->dev;
e26b2f40 1401 struct device_node *np = pdev->dev.of_node;
7eb7869f
JG
1402
1403 shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba));
1404 if (!shost)
1405 goto err_out;
1406 hisi_hba = shost_priv(shost);
1407
1408 hisi_hba->hw = hw;
1409 hisi_hba->pdev = pdev;
1410 hisi_hba->shost = shost;
1411 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
1412
fa42d80d
JG
1413 init_timer(&hisi_hba->timer);
1414
4d558c77
JG
1415 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
1416 SAS_ADDR_SIZE))
e26b2f40 1417 goto err_out;
e26b2f40 1418
4d558c77
JG
1419 if (np) {
1420 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
1421 "hisilicon,sas-syscon");
1422 if (IS_ERR(hisi_hba->ctrl))
1423 goto err_out;
e26b2f40 1424
4d558c77
JG
1425 if (device_property_read_u32(dev, "ctrl-reset-reg",
1426 &hisi_hba->ctrl_reset_reg))
1427 goto err_out;
e26b2f40 1428
4d558c77
JG
1429 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
1430 &hisi_hba->ctrl_reset_sts_reg))
1431 goto err_out;
e26b2f40 1432
4d558c77
JG
1433 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
1434 &hisi_hba->ctrl_clock_ena_reg))
1435 goto err_out;
1436 }
1437
1438 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy))
e26b2f40
JG
1439 goto err_out;
1440
4d558c77
JG
1441 if (device_property_read_u32(dev, "queue-count",
1442 &hisi_hba->queue_count))
e26b2f40
JG
1443 goto err_out;
1444
e26b2f40
JG
1445 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1446 hisi_hba->regs = devm_ioremap_resource(dev, res);
1447 if (IS_ERR(hisi_hba->regs))
1448 goto err_out;
1449
89d53322
JG
1450 if (hisi_sas_alloc(hisi_hba, shost)) {
1451 hisi_sas_free(hisi_hba);
6be6de18 1452 goto err_out;
89d53322 1453 }
6be6de18 1454
7eb7869f
JG
1455 return shost;
1456err_out:
1457 dev_err(dev, "shost alloc failed\n");
1458 return NULL;
1459}
1460
5d74242e
JG
1461static void hisi_sas_init_add(struct hisi_hba *hisi_hba)
1462{
1463 int i;
1464
1465 for (i = 0; i < hisi_hba->n_phy; i++)
1466 memcpy(&hisi_hba->phy[i].dev_sas_addr,
1467 hisi_hba->sas_addr,
1468 SAS_ADDR_SIZE);
1469}
1470
7eb7869f
JG
1471int hisi_sas_probe(struct platform_device *pdev,
1472 const struct hisi_sas_hw *hw)
1473{
1474 struct Scsi_Host *shost;
1475 struct hisi_hba *hisi_hba;
1476 struct device *dev = &pdev->dev;
1477 struct asd_sas_phy **arr_phy;
1478 struct asd_sas_port **arr_port;
1479 struct sas_ha_struct *sha;
1480 int rc, phy_nr, port_nr, i;
1481
1482 shost = hisi_sas_shost_alloc(pdev, hw);
1483 if (!shost) {
1484 rc = -ENOMEM;
1485 goto err_out_ha;
1486 }
1487
1488 sha = SHOST_TO_SAS_HA(shost);
1489 hisi_hba = shost_priv(shost);
1490 platform_set_drvdata(pdev, sha);
50cb916f
JG
1491
1492 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
1493 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
1494 dev_err(dev, "No usable DMA addressing method\n");
1495 rc = -EIO;
1496 goto err_out_ha;
1497 }
1498
7eb7869f
JG
1499 phy_nr = port_nr = hisi_hba->n_phy;
1500
1501 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
1502 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
1503 if (!arr_phy || !arr_port)
1504 return -ENOMEM;
1505
1506 sha->sas_phy = arr_phy;
1507 sha->sas_port = arr_port;
1508 sha->core.shost = shost;
1509 sha->lldd_ha = hisi_hba;
1510
1511 shost->transportt = hisi_sas_stt;
1512 shost->max_id = HISI_SAS_MAX_DEVICES;
1513 shost->max_lun = ~0;
1514 shost->max_channel = 1;
1515 shost->max_cmd_len = 16;
1516 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
a8d547bd
JG
1517 shost->can_queue = hisi_hba->hw->max_command_entries;
1518 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
7eb7869f
JG
1519
1520 sha->sas_ha_name = DRV_NAME;
1521 sha->dev = &hisi_hba->pdev->dev;
1522 sha->lldd_module = THIS_MODULE;
1523 sha->sas_addr = &hisi_hba->sas_addr[0];
1524 sha->num_phys = hisi_hba->n_phy;
1525 sha->core.shost = hisi_hba->shost;
1526
1527 for (i = 0; i < hisi_hba->n_phy; i++) {
1528 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
1529 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
1530 }
1531
5d74242e
JG
1532 hisi_sas_init_add(hisi_hba);
1533
8ff1d571
JG
1534 rc = hisi_hba->hw->hw_init(hisi_hba);
1535 if (rc)
1536 goto err_out_ha;
1537
7eb7869f
JG
1538 rc = scsi_add_host(shost, &pdev->dev);
1539 if (rc)
1540 goto err_out_ha;
1541
1542 rc = sas_register_ha(sha);
1543 if (rc)
1544 goto err_out_register_ha;
1545
1546 scsi_scan_host(shost);
1547
1548 return 0;
1549
1550err_out_register_ha:
1551 scsi_remove_host(shost);
1552err_out_ha:
1553 kfree(shost);
1554 return rc;
1555}
1556EXPORT_SYMBOL_GPL(hisi_sas_probe);
1557
89d53322
JG
1558int hisi_sas_remove(struct platform_device *pdev)
1559{
1560 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
1561 struct hisi_hba *hisi_hba = sha->lldd_ha;
1562
1563 scsi_remove_host(sha->core.shost);
1564 sas_unregister_ha(sha);
1565 sas_remove_host(sha->core.shost);
1566
1567 hisi_sas_free(hisi_hba);
1568 return 0;
1569}
1570EXPORT_SYMBOL_GPL(hisi_sas_remove);
1571
e8899fad
JG
1572static __init int hisi_sas_init(void)
1573{
1574 pr_info("hisi_sas: driver version %s\n", DRV_VERSION);
1575
1576 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
1577 if (!hisi_sas_stt)
1578 return -ENOMEM;
1579
1580 return 0;
1581}
1582
1583static __exit void hisi_sas_exit(void)
1584{
1585 sas_release_transport(hisi_sas_stt);
1586}
1587
1588module_init(hisi_sas_init);
1589module_exit(hisi_sas_exit);
1590
1591MODULE_VERSION(DRV_VERSION);
1592MODULE_LICENSE("GPL");
1593MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
1594MODULE_DESCRIPTION("HISILICON SAS controller driver");
1595MODULE_ALIAS("platform:" DRV_NAME);