]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
scsi: hisi_sas: workaround a SoC SATA IO processing bug
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / hisi_sas / hisi_sas_v2_hw.c
1 /*
2 * Copyright (c) 2016 Linaro Ltd.
3 * Copyright (c) 2016 Hisilicon Limited.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 */
11
12 #include "hisi_sas.h"
13 #define DRV_NAME "hisi_sas_v2_hw"
14
15 /* global registers need init*/
16 #define DLVRY_QUEUE_ENABLE 0x0
17 #define IOST_BASE_ADDR_LO 0x8
18 #define IOST_BASE_ADDR_HI 0xc
19 #define ITCT_BASE_ADDR_LO 0x10
20 #define ITCT_BASE_ADDR_HI 0x14
21 #define IO_BROKEN_MSG_ADDR_LO 0x18
22 #define IO_BROKEN_MSG_ADDR_HI 0x1c
23 #define PHY_CONTEXT 0x20
24 #define PHY_STATE 0x24
25 #define PHY_PORT_NUM_MA 0x28
26 #define PORT_STATE 0x2c
27 #define PORT_STATE_PHY8_PORT_NUM_OFF 16
28 #define PORT_STATE_PHY8_PORT_NUM_MSK (0xf << PORT_STATE_PHY8_PORT_NUM_OFF)
29 #define PORT_STATE_PHY8_CONN_RATE_OFF 20
30 #define PORT_STATE_PHY8_CONN_RATE_MSK (0xf << PORT_STATE_PHY8_CONN_RATE_OFF)
31 #define PHY_CONN_RATE 0x30
32 #define HGC_TRANS_TASK_CNT_LIMIT 0x38
33 #define AXI_AHB_CLK_CFG 0x3c
34 #define ITCT_CLR 0x44
35 #define ITCT_CLR_EN_OFF 16
36 #define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF)
37 #define ITCT_DEV_OFF 0
38 #define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF)
39 #define AXI_USER1 0x48
40 #define AXI_USER2 0x4c
41 #define IO_SATA_BROKEN_MSG_ADDR_LO 0x58
42 #define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c
43 #define SATA_INITI_D2H_STORE_ADDR_LO 0x60
44 #define SATA_INITI_D2H_STORE_ADDR_HI 0x64
45 #define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84
46 #define HGC_SAS_TXFAIL_RETRY_CTRL 0x88
47 #define HGC_GET_ITV_TIME 0x90
48 #define DEVICE_MSG_WORK_MODE 0x94
49 #define OPENA_WT_CONTI_TIME 0x9c
50 #define I_T_NEXUS_LOSS_TIME 0xa0
51 #define MAX_CON_TIME_LIMIT_TIME 0xa4
52 #define BUS_INACTIVE_LIMIT_TIME 0xa8
53 #define REJECT_TO_OPEN_LIMIT_TIME 0xac
54 #define CFG_AGING_TIME 0xbc
55 #define HGC_DFX_CFG2 0xc0
56 #define HGC_IOMB_PROC1_STATUS 0x104
57 #define CFG_1US_TIMER_TRSH 0xcc
58 #define HGC_LM_DFX_STATUS2 0x128
59 #define HGC_LM_DFX_STATUS2_IOSTLIST_OFF 0
60 #define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \
61 HGC_LM_DFX_STATUS2_IOSTLIST_OFF)
62 #define HGC_LM_DFX_STATUS2_ITCTLIST_OFF 12
63 #define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \
64 HGC_LM_DFX_STATUS2_ITCTLIST_OFF)
65 #define HGC_CQE_ECC_ADDR 0x13c
66 #define HGC_CQE_ECC_1B_ADDR_OFF 0
67 #define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF)
68 #define HGC_CQE_ECC_MB_ADDR_OFF 8
69 #define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF)
70 #define HGC_IOST_ECC_ADDR 0x140
71 #define HGC_IOST_ECC_1B_ADDR_OFF 0
72 #define HGC_IOST_ECC_1B_ADDR_MSK (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF)
73 #define HGC_IOST_ECC_MB_ADDR_OFF 16
74 #define HGC_IOST_ECC_MB_ADDR_MSK (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF)
75 #define HGC_DQE_ECC_ADDR 0x144
76 #define HGC_DQE_ECC_1B_ADDR_OFF 0
77 #define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF)
78 #define HGC_DQE_ECC_MB_ADDR_OFF 16
79 #define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF)
80 #define HGC_INVLD_DQE_INFO 0x148
81 #define HGC_INVLD_DQE_INFO_FB_CH0_OFF 9
82 #define HGC_INVLD_DQE_INFO_FB_CH0_MSK (0x1 << HGC_INVLD_DQE_INFO_FB_CH0_OFF)
83 #define HGC_INVLD_DQE_INFO_FB_CH3_OFF 18
84 #define HGC_ITCT_ECC_ADDR 0x150
85 #define HGC_ITCT_ECC_1B_ADDR_OFF 0
86 #define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \
87 HGC_ITCT_ECC_1B_ADDR_OFF)
88 #define HGC_ITCT_ECC_MB_ADDR_OFF 16
89 #define HGC_ITCT_ECC_MB_ADDR_MSK (0x3ff << \
90 HGC_ITCT_ECC_MB_ADDR_OFF)
91 #define HGC_AXI_FIFO_ERR_INFO 0x154
92 #define AXI_ERR_INFO_OFF 0
93 #define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF)
94 #define FIFO_ERR_INFO_OFF 8
95 #define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF)
96 #define INT_COAL_EN 0x19c
97 #define OQ_INT_COAL_TIME 0x1a0
98 #define OQ_INT_COAL_CNT 0x1a4
99 #define ENT_INT_COAL_TIME 0x1a8
100 #define ENT_INT_COAL_CNT 0x1ac
101 #define OQ_INT_SRC 0x1b0
102 #define OQ_INT_SRC_MSK 0x1b4
103 #define ENT_INT_SRC1 0x1b8
104 #define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0
105 #define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF)
106 #define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8
107 #define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF)
108 #define ENT_INT_SRC2 0x1bc
109 #define ENT_INT_SRC3 0x1c0
110 #define ENT_INT_SRC3_WP_DEPTH_OFF 8
111 #define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9
112 #define ENT_INT_SRC3_RP_DEPTH_OFF 10
113 #define ENT_INT_SRC3_AXI_OFF 11
114 #define ENT_INT_SRC3_FIFO_OFF 12
115 #define ENT_INT_SRC3_LM_OFF 14
116 #define ENT_INT_SRC3_ITC_INT_OFF 15
117 #define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF)
118 #define ENT_INT_SRC3_ABT_OFF 16
119 #define ENT_INT_SRC_MSK1 0x1c4
120 #define ENT_INT_SRC_MSK2 0x1c8
121 #define ENT_INT_SRC_MSK3 0x1cc
122 #define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31
123 #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
124 #define SAS_ECC_INTR 0x1e8
125 #define SAS_ECC_INTR_DQE_ECC_1B_OFF 0
126 #define SAS_ECC_INTR_DQE_ECC_MB_OFF 1
127 #define SAS_ECC_INTR_IOST_ECC_1B_OFF 2
128 #define SAS_ECC_INTR_IOST_ECC_MB_OFF 3
129 #define SAS_ECC_INTR_ITCT_ECC_MB_OFF 4
130 #define SAS_ECC_INTR_ITCT_ECC_1B_OFF 5
131 #define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF 6
132 #define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF 7
133 #define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF 8
134 #define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF 9
135 #define SAS_ECC_INTR_CQE_ECC_1B_OFF 10
136 #define SAS_ECC_INTR_CQE_ECC_MB_OFF 11
137 #define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF 12
138 #define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF 13
139 #define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF 14
140 #define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF 15
141 #define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF 16
142 #define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF 17
143 #define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF 18
144 #define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 19
145 #define SAS_ECC_INTR_MSK 0x1ec
146 #define HGC_ERR_STAT_EN 0x238
147 #define DLVRY_Q_0_BASE_ADDR_LO 0x260
148 #define DLVRY_Q_0_BASE_ADDR_HI 0x264
149 #define DLVRY_Q_0_DEPTH 0x268
150 #define DLVRY_Q_0_WR_PTR 0x26c
151 #define DLVRY_Q_0_RD_PTR 0x270
152 #define HYPER_STREAM_ID_EN_CFG 0xc80
153 #define OQ0_INT_SRC_MSK 0xc90
154 #define COMPL_Q_0_BASE_ADDR_LO 0x4e0
155 #define COMPL_Q_0_BASE_ADDR_HI 0x4e4
156 #define COMPL_Q_0_DEPTH 0x4e8
157 #define COMPL_Q_0_WR_PTR 0x4ec
158 #define COMPL_Q_0_RD_PTR 0x4f0
159 #define HGC_RXM_DFX_STATUS14 0xae8
160 #define HGC_RXM_DFX_STATUS14_MEM0_OFF 0
161 #define HGC_RXM_DFX_STATUS14_MEM0_MSK (0x1ff << \
162 HGC_RXM_DFX_STATUS14_MEM0_OFF)
163 #define HGC_RXM_DFX_STATUS14_MEM1_OFF 9
164 #define HGC_RXM_DFX_STATUS14_MEM1_MSK (0x1ff << \
165 HGC_RXM_DFX_STATUS14_MEM1_OFF)
166 #define HGC_RXM_DFX_STATUS14_MEM2_OFF 18
167 #define HGC_RXM_DFX_STATUS14_MEM2_MSK (0x1ff << \
168 HGC_RXM_DFX_STATUS14_MEM2_OFF)
169 #define HGC_RXM_DFX_STATUS15 0xaec
170 #define HGC_RXM_DFX_STATUS15_MEM3_OFF 0
171 #define HGC_RXM_DFX_STATUS15_MEM3_MSK (0x1ff << \
172 HGC_RXM_DFX_STATUS15_MEM3_OFF)
173 /* phy registers need init */
174 #define PORT_BASE (0x2000)
175
176 #define PHY_CFG (PORT_BASE + 0x0)
177 #define HARD_PHY_LINKRATE (PORT_BASE + 0x4)
178 #define PHY_CFG_ENA_OFF 0
179 #define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF)
180 #define PHY_CFG_DC_OPT_OFF 2
181 #define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF)
182 #define PROG_PHY_LINK_RATE (PORT_BASE + 0x8)
183 #define PROG_PHY_LINK_RATE_MAX_OFF 0
184 #define PROG_PHY_LINK_RATE_MAX_MSK (0xff << PROG_PHY_LINK_RATE_MAX_OFF)
185 #define PHY_CTRL (PORT_BASE + 0x14)
186 #define PHY_CTRL_RESET_OFF 0
187 #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
188 #define SAS_PHY_CTRL (PORT_BASE + 0x20)
189 #define SL_CFG (PORT_BASE + 0x84)
190 #define PHY_PCN (PORT_BASE + 0x44)
191 #define SL_TOUT_CFG (PORT_BASE + 0x8c)
192 #define SL_CONTROL (PORT_BASE + 0x94)
193 #define SL_CONTROL_NOTIFY_EN_OFF 0
194 #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
195 #define SL_CONTROL_CTA_OFF 17
196 #define SL_CONTROL_CTA_MSK (0x1 << SL_CONTROL_CTA_OFF)
197 #define RX_PRIMS_STATUS (PORT_BASE + 0x98)
198 #define RX_BCAST_CHG_OFF 1
199 #define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF)
200 #define TX_ID_DWORD0 (PORT_BASE + 0x9c)
201 #define TX_ID_DWORD1 (PORT_BASE + 0xa0)
202 #define TX_ID_DWORD2 (PORT_BASE + 0xa4)
203 #define TX_ID_DWORD3 (PORT_BASE + 0xa8)
204 #define TX_ID_DWORD4 (PORT_BASE + 0xaC)
205 #define TX_ID_DWORD5 (PORT_BASE + 0xb0)
206 #define TX_ID_DWORD6 (PORT_BASE + 0xb4)
207 #define TXID_AUTO (PORT_BASE + 0xb8)
208 #define TXID_AUTO_CT3_OFF 1
209 #define TXID_AUTO_CT3_MSK (0x1 << TXID_AUTO_CT3_OFF)
210 #define TX_HARDRST_OFF 2
211 #define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF)
212 #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
213 #define RX_IDAF_DWORD1 (PORT_BASE + 0xc8)
214 #define RX_IDAF_DWORD2 (PORT_BASE + 0xcc)
215 #define RX_IDAF_DWORD3 (PORT_BASE + 0xd0)
216 #define RX_IDAF_DWORD4 (PORT_BASE + 0xd4)
217 #define RX_IDAF_DWORD5 (PORT_BASE + 0xd8)
218 #define RX_IDAF_DWORD6 (PORT_BASE + 0xdc)
219 #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
220 #define CON_CONTROL (PORT_BASE + 0x118)
221 #define CON_CONTROL_CFG_OPEN_ACC_STP_OFF 0
222 #define CON_CONTROL_CFG_OPEN_ACC_STP_MSK \
223 (0x01 << CON_CONTROL_CFG_OPEN_ACC_STP_OFF)
224 #define DONE_RECEIVED_TIME (PORT_BASE + 0x11c)
225 #define CHL_INT0 (PORT_BASE + 0x1b4)
226 #define CHL_INT0_HOTPLUG_TOUT_OFF 0
227 #define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF)
228 #define CHL_INT0_SL_RX_BCST_ACK_OFF 1
229 #define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF)
230 #define CHL_INT0_SL_PHY_ENABLE_OFF 2
231 #define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF)
232 #define CHL_INT0_NOT_RDY_OFF 4
233 #define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF)
234 #define CHL_INT0_PHY_RDY_OFF 5
235 #define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF)
236 #define CHL_INT1 (PORT_BASE + 0x1b8)
237 #define CHL_INT1_DMAC_TX_ECC_ERR_OFF 15
238 #define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
239 #define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17
240 #define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
241 #define CHL_INT2 (PORT_BASE + 0x1bc)
242 #define CHL_INT0_MSK (PORT_BASE + 0x1c0)
243 #define CHL_INT1_MSK (PORT_BASE + 0x1c4)
244 #define CHL_INT2_MSK (PORT_BASE + 0x1c8)
245 #define CHL_INT_COAL_EN (PORT_BASE + 0x1d0)
246 #define DMA_TX_DFX1 (PORT_BASE + 0x204)
247 #define DMA_TX_DFX1_IPTT_OFF 0
248 #define DMA_TX_DFX1_IPTT_MSK (0xffff << DMA_TX_DFX1_IPTT_OFF)
249 #define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0)
250 #define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4)
251 #define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8)
252 #define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc)
253 #define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0)
254 #define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4)
255 #define DMA_TX_STATUS (PORT_BASE + 0x2d0)
256 #define DMA_TX_STATUS_BUSY_OFF 0
257 #define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF)
258 #define DMA_RX_STATUS (PORT_BASE + 0x2e8)
259 #define DMA_RX_STATUS_BUSY_OFF 0
260 #define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF)
261
262 #define AXI_CFG (0x5100)
263 #define AM_CFG_MAX_TRANS (0x5010)
264 #define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014)
265
266 #define AXI_MASTER_CFG_BASE (0x5000)
267 #define AM_CTRL_GLOBAL (0x0)
268 #define AM_CURR_TRANS_RETURN (0x150)
269
270 /* HW dma structures */
271 /* Delivery queue header */
272 /* dw0 */
273 #define CMD_HDR_ABORT_FLAG_OFF 0
274 #define CMD_HDR_ABORT_FLAG_MSK (0x3 << CMD_HDR_ABORT_FLAG_OFF)
275 #define CMD_HDR_ABORT_DEVICE_TYPE_OFF 2
276 #define CMD_HDR_ABORT_DEVICE_TYPE_MSK (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF)
277 #define CMD_HDR_RESP_REPORT_OFF 5
278 #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF)
279 #define CMD_HDR_TLR_CTRL_OFF 6
280 #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF)
281 #define CMD_HDR_PORT_OFF 18
282 #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF)
283 #define CMD_HDR_PRIORITY_OFF 27
284 #define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF)
285 #define CMD_HDR_CMD_OFF 29
286 #define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF)
287 /* dw1 */
288 #define CMD_HDR_DIR_OFF 5
289 #define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF)
290 #define CMD_HDR_RESET_OFF 7
291 #define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF)
292 #define CMD_HDR_VDTL_OFF 10
293 #define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF)
294 #define CMD_HDR_FRAME_TYPE_OFF 11
295 #define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF)
296 #define CMD_HDR_DEV_ID_OFF 16
297 #define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF)
298 /* dw2 */
299 #define CMD_HDR_CFL_OFF 0
300 #define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF)
301 #define CMD_HDR_NCQ_TAG_OFF 10
302 #define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF)
303 #define CMD_HDR_MRFL_OFF 15
304 #define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF)
305 #define CMD_HDR_SG_MOD_OFF 24
306 #define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF)
307 #define CMD_HDR_FIRST_BURST_OFF 26
308 #define CMD_HDR_FIRST_BURST_MSK (0x1 << CMD_HDR_SG_MOD_OFF)
309 /* dw3 */
310 #define CMD_HDR_IPTT_OFF 0
311 #define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF)
312 /* dw6 */
313 #define CMD_HDR_DIF_SGL_LEN_OFF 0
314 #define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF)
315 #define CMD_HDR_DATA_SGL_LEN_OFF 16
316 #define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF)
317 #define CMD_HDR_ABORT_IPTT_OFF 16
318 #define CMD_HDR_ABORT_IPTT_MSK (0xffff << CMD_HDR_ABORT_IPTT_OFF)
319
320 /* Completion header */
321 /* dw0 */
322 #define CMPLT_HDR_ERR_PHASE_OFF 2
323 #define CMPLT_HDR_ERR_PHASE_MSK (0xff << CMPLT_HDR_ERR_PHASE_OFF)
324 #define CMPLT_HDR_RSPNS_XFRD_OFF 10
325 #define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
326 #define CMPLT_HDR_ERX_OFF 12
327 #define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF)
328 #define CMPLT_HDR_ABORT_STAT_OFF 13
329 #define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF)
330 /* abort_stat */
331 #define STAT_IO_NOT_VALID 0x1
332 #define STAT_IO_NO_DEVICE 0x2
333 #define STAT_IO_COMPLETE 0x3
334 #define STAT_IO_ABORTED 0x4
335 /* dw1 */
336 #define CMPLT_HDR_IPTT_OFF 0
337 #define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF)
338 #define CMPLT_HDR_DEV_ID_OFF 16
339 #define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF)
340
341 /* ITCT header */
342 /* qw0 */
343 #define ITCT_HDR_DEV_TYPE_OFF 0
344 #define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF)
345 #define ITCT_HDR_VALID_OFF 2
346 #define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF)
347 #define ITCT_HDR_MCR_OFF 5
348 #define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF)
349 #define ITCT_HDR_VLN_OFF 9
350 #define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF)
351 #define ITCT_HDR_SMP_TIMEOUT_OFF 16
352 #define ITCT_HDR_SMP_TIMEOUT_8US 1
353 #define ITCT_HDR_SMP_TIMEOUT (ITCT_HDR_SMP_TIMEOUT_8US * \
354 250) /* 2ms */
355 #define ITCT_HDR_AWT_CONTINUE_OFF 25
356 #define ITCT_HDR_PORT_ID_OFF 28
357 #define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF)
358 /* qw2 */
359 #define ITCT_HDR_INLT_OFF 0
360 #define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF)
361 #define ITCT_HDR_BITLT_OFF 16
362 #define ITCT_HDR_BITLT_MSK (0xffffULL << ITCT_HDR_BITLT_OFF)
363 #define ITCT_HDR_MCTLT_OFF 32
364 #define ITCT_HDR_MCTLT_MSK (0xffffULL << ITCT_HDR_MCTLT_OFF)
365 #define ITCT_HDR_RTOLT_OFF 48
366 #define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF)
367
368 #define HISI_SAS_FATAL_INT_NR 2
369
370 struct hisi_sas_complete_v2_hdr {
371 __le32 dw0;
372 __le32 dw1;
373 __le32 act;
374 __le32 dw3;
375 };
376
377 struct hisi_sas_err_record_v2 {
378 /* dw0 */
379 __le32 trans_tx_fail_type;
380
381 /* dw1 */
382 __le32 trans_rx_fail_type;
383
384 /* dw2 */
385 __le16 dma_tx_err_type;
386 __le16 sipc_rx_err_type;
387
388 /* dw3 */
389 __le32 dma_rx_err_type;
390 };
391
392 enum {
393 HISI_SAS_PHY_PHY_UPDOWN,
394 HISI_SAS_PHY_CHNL_INT,
395 HISI_SAS_PHY_INT_NR
396 };
397
398 enum {
399 TRANS_TX_FAIL_BASE = 0x0, /* dw0 */
400 TRANS_RX_FAIL_BASE = 0x20, /* dw1 */
401 DMA_TX_ERR_BASE = 0x40, /* dw2 bit 15-0 */
402 SIPC_RX_ERR_BASE = 0x50, /* dw2 bit 31-16*/
403 DMA_RX_ERR_BASE = 0x60, /* dw3 */
404
405 /* trans tx*/
406 TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS = TRANS_TX_FAIL_BASE, /* 0x0 */
407 TRANS_TX_ERR_PHY_NOT_ENABLE, /* 0x1 */
408 TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION, /* 0x2 */
409 TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION, /* 0x3 */
410 TRANS_TX_OPEN_CNX_ERR_BY_OTHER, /* 0x4 */
411 RESERVED0, /* 0x5 */
412 TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT, /* 0x6 */
413 TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY, /* 0x7 */
414 TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED, /* 0x8 */
415 TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED, /* 0x9 */
416 TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION, /* 0xa */
417 TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD, /* 0xb */
418 TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER, /* 0xc */
419 TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED, /* 0xd */
420 TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT, /* 0xe */
421 TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION, /* 0xf */
422 TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED, /* 0x10 */
423 TRANS_TX_ERR_FRAME_TXED, /* 0x11 */
424 TRANS_TX_ERR_WITH_BREAK_TIMEOUT, /* 0x12 */
425 TRANS_TX_ERR_WITH_BREAK_REQUEST, /* 0x13 */
426 TRANS_TX_ERR_WITH_BREAK_RECEVIED, /* 0x14 */
427 TRANS_TX_ERR_WITH_CLOSE_TIMEOUT, /* 0x15 */
428 TRANS_TX_ERR_WITH_CLOSE_NORMAL, /* 0x16 for ssp*/
429 TRANS_TX_ERR_WITH_CLOSE_PHYDISALE, /* 0x17 */
430 TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x18 */
431 TRANS_TX_ERR_WITH_CLOSE_COMINIT, /* 0x19 */
432 TRANS_TX_ERR_WITH_NAK_RECEVIED, /* 0x1a for ssp*/
433 TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT, /* 0x1b for ssp*/
434 /*IO_TX_ERR_WITH_R_ERR_RECEVIED, [> 0x1b for sata/stp<] */
435 TRANS_TX_ERR_WITH_CREDIT_TIMEOUT, /* 0x1c for ssp */
436 /*IO_RX_ERR_WITH_SATA_DEVICE_LOST 0x1c for sata/stp */
437 TRANS_TX_ERR_WITH_IPTT_CONFLICT, /* 0x1d for ssp/smp */
438 TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS, /* 0x1e */
439 /*IO_TX_ERR_WITH_SYNC_RXD, [> 0x1e <] for sata/stp */
440 TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT, /* 0x1f for sata/stp */
441
442 /* trans rx */
443 TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR = TRANS_RX_FAIL_BASE, /* 0x20 */
444 TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, /* 0x21 for sata/stp */
445 TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, /* 0x22 for ssp/smp */
446 /*IO_ERR_WITH_RXFIS_8B10B_CODE_ERR, [> 0x22 <] for sata/stp */
447 TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR, /* 0x23 for sata/stp */
448 TRANS_RX_ERR_WITH_RXFIS_CRC_ERR, /* 0x24 for sata/stp */
449 TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN, /* 0x25 for smp */
450 /*IO_ERR_WITH_RXFIS_TX SYNCP, [> 0x25 <] for sata/stp */
451 TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP, /* 0x26 for sata/stp*/
452 TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN, /* 0x27 */
453 TRANS_RX_ERR_WITH_BREAK_TIMEOUT, /* 0x28 */
454 TRANS_RX_ERR_WITH_BREAK_REQUEST, /* 0x29 */
455 TRANS_RX_ERR_WITH_BREAK_RECEVIED, /* 0x2a */
456 RESERVED1, /* 0x2b */
457 TRANS_RX_ERR_WITH_CLOSE_NORMAL, /* 0x2c */
458 TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE, /* 0x2d */
459 TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x2e */
460 TRANS_RX_ERR_WITH_CLOSE_COMINIT, /* 0x2f */
461 TRANS_RX_ERR_WITH_DATA_LEN0, /* 0x30 for ssp/smp */
462 TRANS_RX_ERR_WITH_BAD_HASH, /* 0x31 for ssp */
463 /*IO_RX_ERR_WITH_FIS_TOO_SHORT, [> 0x31 <] for sata/stp */
464 TRANS_RX_XRDY_WLEN_ZERO_ERR, /* 0x32 for ssp*/
465 /*IO_RX_ERR_WITH_FIS_TOO_LONG, [> 0x32 <] for sata/stp */
466 TRANS_RX_SSP_FRM_LEN_ERR, /* 0x33 for ssp */
467 /*IO_RX_ERR_WITH_SATA_DEVICE_LOST, [> 0x33 <] for sata */
468 RESERVED2, /* 0x34 */
469 RESERVED3, /* 0x35 */
470 RESERVED4, /* 0x36 */
471 RESERVED5, /* 0x37 */
472 TRANS_RX_ERR_WITH_BAD_FRM_TYPE, /* 0x38 */
473 TRANS_RX_SMP_FRM_LEN_ERR, /* 0x39 */
474 TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x3a */
475 RESERVED6, /* 0x3b */
476 RESERVED7, /* 0x3c */
477 RESERVED8, /* 0x3d */
478 RESERVED9, /* 0x3e */
479 TRANS_RX_R_ERR, /* 0x3f */
480
481 /* dma tx */
482 DMA_TX_DIF_CRC_ERR = DMA_TX_ERR_BASE, /* 0x40 */
483 DMA_TX_DIF_APP_ERR, /* 0x41 */
484 DMA_TX_DIF_RPP_ERR, /* 0x42 */
485 DMA_TX_DATA_SGL_OVERFLOW, /* 0x43 */
486 DMA_TX_DIF_SGL_OVERFLOW, /* 0x44 */
487 DMA_TX_UNEXP_XFER_ERR, /* 0x45 */
488 DMA_TX_UNEXP_RETRANS_ERR, /* 0x46 */
489 DMA_TX_XFER_LEN_OVERFLOW, /* 0x47 */
490 DMA_TX_XFER_OFFSET_ERR, /* 0x48 */
491 DMA_TX_RAM_ECC_ERR, /* 0x49 */
492 DMA_TX_DIF_LEN_ALIGN_ERR, /* 0x4a */
493 DMA_TX_MAX_ERR_CODE,
494
495 /* sipc rx */
496 SIPC_RX_FIS_STATUS_ERR_BIT_VLD = SIPC_RX_ERR_BASE, /* 0x50 */
497 SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, /* 0x51 */
498 SIPC_RX_FIS_STATUS_BSY_BIT_ERR, /* 0x52 */
499 SIPC_RX_WRSETUP_LEN_ODD_ERR, /* 0x53 */
500 SIPC_RX_WRSETUP_LEN_ZERO_ERR, /* 0x54 */
501 SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR, /* 0x55 */
502 SIPC_RX_NCQ_WRSETUP_OFFSET_ERR, /* 0x56 */
503 SIPC_RX_NCQ_WRSETUP_AUTO_ACTIVE_ERR, /* 0x57 */
504 SIPC_RX_SATA_UNEXP_FIS_ERR, /* 0x58 */
505 SIPC_RX_WRSETUP_ESTATUS_ERR, /* 0x59 */
506 SIPC_RX_DATA_UNDERFLOW_ERR, /* 0x5a */
507 SIPC_RX_MAX_ERR_CODE,
508
509 /* dma rx */
510 DMA_RX_DIF_CRC_ERR = DMA_RX_ERR_BASE, /* 0x60 */
511 DMA_RX_DIF_APP_ERR, /* 0x61 */
512 DMA_RX_DIF_RPP_ERR, /* 0x62 */
513 DMA_RX_DATA_SGL_OVERFLOW, /* 0x63 */
514 DMA_RX_DIF_SGL_OVERFLOW, /* 0x64 */
515 DMA_RX_DATA_LEN_OVERFLOW, /* 0x65 */
516 DMA_RX_DATA_LEN_UNDERFLOW, /* 0x66 */
517 DMA_RX_DATA_OFFSET_ERR, /* 0x67 */
518 RESERVED10, /* 0x68 */
519 DMA_RX_SATA_FRAME_TYPE_ERR, /* 0x69 */
520 DMA_RX_RESP_BUF_OVERFLOW, /* 0x6a */
521 DMA_RX_UNEXP_RETRANS_RESP_ERR, /* 0x6b */
522 DMA_RX_UNEXP_NORM_RESP_ERR, /* 0x6c */
523 DMA_RX_UNEXP_RDFRAME_ERR, /* 0x6d */
524 DMA_RX_PIO_DATA_LEN_ERR, /* 0x6e */
525 DMA_RX_RDSETUP_STATUS_ERR, /* 0x6f */
526 DMA_RX_RDSETUP_STATUS_DRQ_ERR, /* 0x70 */
527 DMA_RX_RDSETUP_STATUS_BSY_ERR, /* 0x71 */
528 DMA_RX_RDSETUP_LEN_ODD_ERR, /* 0x72 */
529 DMA_RX_RDSETUP_LEN_ZERO_ERR, /* 0x73 */
530 DMA_RX_RDSETUP_LEN_OVER_ERR, /* 0x74 */
531 DMA_RX_RDSETUP_OFFSET_ERR, /* 0x75 */
532 DMA_RX_RDSETUP_ACTIVE_ERR, /* 0x76 */
533 DMA_RX_RDSETUP_ESTATUS_ERR, /* 0x77 */
534 DMA_RX_RAM_ECC_ERR, /* 0x78 */
535 DMA_RX_UNKNOWN_FRM_ERR, /* 0x79 */
536 DMA_RX_MAX_ERR_CODE,
537 };
538
539 #define HISI_SAS_COMMAND_ENTRIES_V2_HW 4096
540 #define HISI_MAX_SATA_SUPPORT_V2_HW (HISI_SAS_COMMAND_ENTRIES_V2_HW/64 - 1)
541
542 #define DIR_NO_DATA 0
543 #define DIR_TO_INI 1
544 #define DIR_TO_DEVICE 2
545 #define DIR_RESERVED 3
546
547 #define SATA_PROTOCOL_NONDATA 0x1
548 #define SATA_PROTOCOL_PIO 0x2
549 #define SATA_PROTOCOL_DMA 0x4
550 #define SATA_PROTOCOL_FPDMA 0x8
551 #define SATA_PROTOCOL_ATAPI 0x10
552
553 #define ERR_ON_TX_PHASE(err_phase) (err_phase == 0x2 || \
554 err_phase == 0x4 || err_phase == 0x8 ||\
555 err_phase == 0x6 || err_phase == 0xa)
556 #define ERR_ON_RX_PHASE(err_phase) (err_phase == 0x10 || \
557 err_phase == 0x20 || err_phase == 0x40)
558
559 static void link_timeout_disable_link(unsigned long data);
560
561 static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
562 {
563 void __iomem *regs = hisi_hba->regs + off;
564
565 return readl(regs);
566 }
567
568 static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off)
569 {
570 void __iomem *regs = hisi_hba->regs + off;
571
572 return readl_relaxed(regs);
573 }
574
575 static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val)
576 {
577 void __iomem *regs = hisi_hba->regs + off;
578
579 writel(val, regs);
580 }
581
582 static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no,
583 u32 off, u32 val)
584 {
585 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off;
586
587 writel(val, regs);
588 }
589
590 static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
591 int phy_no, u32 off)
592 {
593 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off;
594
595 return readl(regs);
596 }
597
598 /* This function needs to be protected from pre-emption. */
599 static int
600 slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
601 struct domain_device *device)
602 {
603 int sata_dev = dev_is_sata(device);
604 void *bitmap = hisi_hba->slot_index_tags;
605 struct hisi_sas_device *sas_dev = device->lldd_dev;
606 int sata_idx = sas_dev->sata_idx;
607 int start, end;
608
609 if (!sata_dev) {
610 /*
611 * STP link SoC bug workaround: index starts from 1.
612 * additionally, we can only allocate odd IPTT(1~4095)
613 * for SAS/SMP device.
614 */
615 start = 1;
616 end = hisi_hba->slot_index_count;
617 } else {
618 if (sata_idx >= HISI_MAX_SATA_SUPPORT_V2_HW)
619 return -EINVAL;
620
621 /*
622 * For SATA device: allocate even IPTT in this interval
623 * [64*(sata_idx+1), 64*(sata_idx+2)], then each SATA device
624 * own 32 IPTTs. IPTT 0 shall not be used duing to STP link
625 * SoC bug workaround. So we ignore the first 32 even IPTTs.
626 */
627 start = 64 * (sata_idx + 1);
628 end = 64 * (sata_idx + 2);
629 }
630
631 while (1) {
632 start = find_next_zero_bit(bitmap,
633 hisi_hba->slot_index_count, start);
634 if (start >= end)
635 return -SAS_QUEUE_FULL;
636 /*
637 * SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0.
638 */
639 if (sata_dev ^ (start & 1))
640 break;
641 start++;
642 }
643
644 set_bit(start, bitmap);
645 *slot_idx = start;
646 return 0;
647 }
648
649 static bool sata_index_alloc_v2_hw(struct hisi_hba *hisi_hba, int *idx)
650 {
651 unsigned int index;
652 struct device *dev = &hisi_hba->pdev->dev;
653 void *bitmap = hisi_hba->sata_dev_bitmap;
654
655 index = find_first_zero_bit(bitmap, HISI_MAX_SATA_SUPPORT_V2_HW);
656 if (index >= HISI_MAX_SATA_SUPPORT_V2_HW) {
657 dev_warn(dev, "alloc sata index failed, index=%d\n", index);
658 return false;
659 }
660
661 set_bit(index, bitmap);
662 *idx = index;
663 return true;
664 }
665
666
667 static struct
668 hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
669 {
670 struct hisi_hba *hisi_hba = device->port->ha->lldd_ha;
671 struct hisi_sas_device *sas_dev = NULL;
672 int i, sata_dev = dev_is_sata(device);
673 int sata_idx = -1;
674
675 spin_lock(&hisi_hba->lock);
676
677 if (sata_dev)
678 if (!sata_index_alloc_v2_hw(hisi_hba, &sata_idx))
679 goto out;
680
681 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
682 /*
683 * SATA device id bit0 should be 0
684 */
685 if (sata_dev && (i & 1))
686 continue;
687 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
688 hisi_hba->devices[i].device_id = i;
689 sas_dev = &hisi_hba->devices[i];
690 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
691 sas_dev->dev_type = device->dev_type;
692 sas_dev->hisi_hba = hisi_hba;
693 sas_dev->sas_device = device;
694 sas_dev->sata_idx = sata_idx;
695 INIT_LIST_HEAD(&hisi_hba->devices[i].list);
696 break;
697 }
698 }
699
700 out:
701 spin_unlock(&hisi_hba->lock);
702
703 return sas_dev;
704 }
705
706 static void config_phy_opt_mode_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
707 {
708 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
709
710 cfg &= ~PHY_CFG_DC_OPT_MSK;
711 cfg |= 1 << PHY_CFG_DC_OPT_OFF;
712 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
713 }
714
715 static void config_id_frame_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
716 {
717 struct sas_identify_frame identify_frame;
718 u32 *identify_buffer;
719
720 memset(&identify_frame, 0, sizeof(identify_frame));
721 identify_frame.dev_type = SAS_END_DEVICE;
722 identify_frame.frame_type = 0;
723 identify_frame._un1 = 1;
724 identify_frame.initiator_bits = SAS_PROTOCOL_ALL;
725 identify_frame.target_bits = SAS_PROTOCOL_NONE;
726 memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE);
727 memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE);
728 identify_frame.phy_id = phy_no;
729 identify_buffer = (u32 *)(&identify_frame);
730
731 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0,
732 __swab32(identify_buffer[0]));
733 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1,
734 __swab32(identify_buffer[1]));
735 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2,
736 __swab32(identify_buffer[2]));
737 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3,
738 __swab32(identify_buffer[3]));
739 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4,
740 __swab32(identify_buffer[4]));
741 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5,
742 __swab32(identify_buffer[5]));
743 }
744
745 static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
746 struct hisi_sas_device *sas_dev)
747 {
748 struct domain_device *device = sas_dev->sas_device;
749 struct device *dev = &hisi_hba->pdev->dev;
750 u64 qw0, device_id = sas_dev->device_id;
751 struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
752 struct domain_device *parent_dev = device->parent;
753 struct asd_sas_port *sas_port = device->port;
754 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
755
756 memset(itct, 0, sizeof(*itct));
757
758 /* qw0 */
759 qw0 = 0;
760 switch (sas_dev->dev_type) {
761 case SAS_END_DEVICE:
762 case SAS_EDGE_EXPANDER_DEVICE:
763 case SAS_FANOUT_EXPANDER_DEVICE:
764 qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF;
765 break;
766 case SAS_SATA_DEV:
767 case SAS_SATA_PENDING:
768 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
769 qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF;
770 else
771 qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF;
772 break;
773 default:
774 dev_warn(dev, "setup itct: unsupported dev type (%d)\n",
775 sas_dev->dev_type);
776 }
777
778 qw0 |= ((1 << ITCT_HDR_VALID_OFF) |
779 (device->linkrate << ITCT_HDR_MCR_OFF) |
780 (1 << ITCT_HDR_VLN_OFF) |
781 (ITCT_HDR_SMP_TIMEOUT << ITCT_HDR_SMP_TIMEOUT_OFF) |
782 (1 << ITCT_HDR_AWT_CONTINUE_OFF) |
783 (port->id << ITCT_HDR_PORT_ID_OFF));
784 itct->qw0 = cpu_to_le64(qw0);
785
786 /* qw1 */
787 memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE);
788 itct->sas_addr = __swab64(itct->sas_addr);
789
790 /* qw2 */
791 if (!dev_is_sata(device))
792 itct->qw2 = cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF) |
793 (0x1ULL << ITCT_HDR_BITLT_OFF) |
794 (0x32ULL << ITCT_HDR_MCTLT_OFF) |
795 (0x1ULL << ITCT_HDR_RTOLT_OFF));
796 }
797
798 static void free_device_v2_hw(struct hisi_hba *hisi_hba,
799 struct hisi_sas_device *sas_dev)
800 {
801 u64 dev_id = sas_dev->device_id;
802 struct device *dev = &hisi_hba->pdev->dev;
803 struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
804 u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
805 int i;
806
807 /* SoC bug workaround */
808 if (dev_is_sata(sas_dev->sas_device))
809 clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap);
810
811 /* clear the itct interrupt state */
812 if (ENT_INT_SRC3_ITC_INT_MSK & reg_val)
813 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
814 ENT_INT_SRC3_ITC_INT_MSK);
815
816 /* clear the itct int*/
817 for (i = 0; i < 2; i++) {
818 /* clear the itct table*/
819 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
820 reg_val |= ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
821 hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
822
823 udelay(10);
824 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
825 if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) {
826 dev_dbg(dev, "got clear ITCT done interrupt\n");
827
828 /* invalid the itct state*/
829 memset(itct, 0, sizeof(struct hisi_sas_itct));
830 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
831 ENT_INT_SRC3_ITC_INT_MSK);
832
833 /* clear the itct */
834 hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
835 dev_dbg(dev, "clear ITCT ok\n");
836 break;
837 }
838 }
839 }
840
841 static int reset_hw_v2_hw(struct hisi_hba *hisi_hba)
842 {
843 int i, reset_val;
844 u32 val;
845 unsigned long end_time;
846 struct device *dev = &hisi_hba->pdev->dev;
847
848 /* The mask needs to be set depending on the number of phys */
849 if (hisi_hba->n_phy == 9)
850 reset_val = 0x1fffff;
851 else
852 reset_val = 0x7ffff;
853
854 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
855
856 /* Disable all of the PHYs */
857 for (i = 0; i < hisi_hba->n_phy; i++) {
858 u32 phy_cfg = hisi_sas_phy_read32(hisi_hba, i, PHY_CFG);
859
860 phy_cfg &= ~PHY_CTRL_RESET_MSK;
861 hisi_sas_phy_write32(hisi_hba, i, PHY_CFG, phy_cfg);
862 }
863 udelay(50);
864
865 /* Ensure DMA tx & rx idle */
866 for (i = 0; i < hisi_hba->n_phy; i++) {
867 u32 dma_tx_status, dma_rx_status;
868
869 end_time = jiffies + msecs_to_jiffies(1000);
870
871 while (1) {
872 dma_tx_status = hisi_sas_phy_read32(hisi_hba, i,
873 DMA_TX_STATUS);
874 dma_rx_status = hisi_sas_phy_read32(hisi_hba, i,
875 DMA_RX_STATUS);
876
877 if (!(dma_tx_status & DMA_TX_STATUS_BUSY_MSK) &&
878 !(dma_rx_status & DMA_RX_STATUS_BUSY_MSK))
879 break;
880
881 msleep(20);
882 if (time_after(jiffies, end_time))
883 return -EIO;
884 }
885 }
886
887 /* Ensure axi bus idle */
888 end_time = jiffies + msecs_to_jiffies(1000);
889 while (1) {
890 u32 axi_status =
891 hisi_sas_read32(hisi_hba, AXI_CFG);
892
893 if (axi_status == 0)
894 break;
895
896 msleep(20);
897 if (time_after(jiffies, end_time))
898 return -EIO;
899 }
900
901 if (ACPI_HANDLE(dev)) {
902 acpi_status s;
903
904 s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL);
905 if (ACPI_FAILURE(s)) {
906 dev_err(dev, "Reset failed\n");
907 return -EIO;
908 }
909 } else if (hisi_hba->ctrl) {
910 /* reset and disable clock*/
911 regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg,
912 reset_val);
913 regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4,
914 reset_val);
915 msleep(1);
916 regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val);
917 if (reset_val != (val & reset_val)) {
918 dev_err(dev, "SAS reset fail.\n");
919 return -EIO;
920 }
921
922 /* De-reset and enable clock*/
923 regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4,
924 reset_val);
925 regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg,
926 reset_val);
927 msleep(1);
928 regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg,
929 &val);
930 if (val & reset_val) {
931 dev_err(dev, "SAS de-reset fail.\n");
932 return -EIO;
933 }
934 } else
935 dev_warn(dev, "no reset method\n");
936
937 return 0;
938 }
939
940 /* This function needs to be called after resetting SAS controller. */
941 static void phys_reject_stp_links_v2_hw(struct hisi_hba *hisi_hba)
942 {
943 u32 cfg;
944 int phy_no;
945
946 hisi_hba->reject_stp_links_msk = (1 << hisi_hba->n_phy) - 1;
947 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
948 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, CON_CONTROL);
949 if (!(cfg & CON_CONTROL_CFG_OPEN_ACC_STP_MSK))
950 continue;
951
952 cfg &= ~CON_CONTROL_CFG_OPEN_ACC_STP_MSK;
953 hisi_sas_phy_write32(hisi_hba, phy_no, CON_CONTROL, cfg);
954 }
955 }
956
957 static void phys_try_accept_stp_links_v2_hw(struct hisi_hba *hisi_hba)
958 {
959 int phy_no;
960 u32 dma_tx_dfx1;
961
962 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
963 if (!(hisi_hba->reject_stp_links_msk & BIT(phy_no)))
964 continue;
965
966 dma_tx_dfx1 = hisi_sas_phy_read32(hisi_hba, phy_no,
967 DMA_TX_DFX1);
968 if (dma_tx_dfx1 & DMA_TX_DFX1_IPTT_MSK) {
969 u32 cfg = hisi_sas_phy_read32(hisi_hba,
970 phy_no, CON_CONTROL);
971
972 cfg |= CON_CONTROL_CFG_OPEN_ACC_STP_MSK;
973 hisi_sas_phy_write32(hisi_hba, phy_no,
974 CON_CONTROL, cfg);
975 clear_bit(phy_no, &hisi_hba->reject_stp_links_msk);
976 }
977 }
978 }
979
980 static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
981 {
982 struct device *dev = &hisi_hba->pdev->dev;
983 int i;
984
985 /* Global registers init */
986
987 /* Deal with am-max-transmissions quirk */
988 if (device_property_present(dev, "hip06-sas-v2-quirk-amt")) {
989 hisi_sas_write32(hisi_hba, AM_CFG_MAX_TRANS, 0x2020);
990 hisi_sas_write32(hisi_hba, AM_CFG_SINGLE_PORT_MAX_TRANS,
991 0x2020);
992 } /* Else, use defaults -> do nothing */
993
994 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
995 (u32)((1ULL << hisi_hba->queue_count) - 1));
996 hisi_sas_write32(hisi_hba, AXI_USER1, 0xc0000000);
997 hisi_sas_write32(hisi_hba, AXI_USER2, 0x10000);
998 hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x0);
999 hisi_sas_write32(hisi_hba, HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL, 0x7FF);
1000 hisi_sas_write32(hisi_hba, OPENA_WT_CONTI_TIME, 0x1);
1001 hisi_sas_write32(hisi_hba, I_T_NEXUS_LOSS_TIME, 0x1F4);
1002 hisi_sas_write32(hisi_hba, MAX_CON_TIME_LIMIT_TIME, 0x32);
1003 hisi_sas_write32(hisi_hba, BUS_INACTIVE_LIMIT_TIME, 0x1);
1004 hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1);
1005 hisi_sas_write32(hisi_hba, HGC_ERR_STAT_EN, 0x1);
1006 hisi_sas_write32(hisi_hba, HGC_GET_ITV_TIME, 0x1);
1007 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0xc);
1008 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x60);
1009 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x3);
1010 hisi_sas_write32(hisi_hba, ENT_INT_COAL_TIME, 0x1);
1011 hisi_sas_write32(hisi_hba, ENT_INT_COAL_CNT, 0x1);
1012 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0x0);
1013 hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff);
1014 hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff);
1015 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
1016 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0x7efefefe);
1017 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0x7efefefe);
1018 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffffffe);
1019 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30);
1020 for (i = 0; i < hisi_hba->queue_count; i++)
1021 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
1022
1023 hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1);
1024 hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
1025
1026 for (i = 0; i < hisi_hba->n_phy; i++) {
1027 hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x855);
1028 hisi_sas_phy_write32(hisi_hba, i, SAS_PHY_CTRL, 0x30b9908);
1029 hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d);
1030 hisi_sas_phy_write32(hisi_hba, i, SL_CONTROL, 0x0);
1031 hisi_sas_phy_write32(hisi_hba, i, TXID_AUTO, 0x2);
1032 hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x8);
1033 hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
1034 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
1035 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xfff87fff);
1036 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
1037 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
1038 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
1039 hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x13f801fc);
1040 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
1041 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
1042 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0);
1043 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0);
1044 hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0);
1045 hisi_sas_phy_write32(hisi_hba, i, CHL_INT_COAL_EN, 0x0);
1046 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0);
1047 if (hisi_hba->refclk_frequency_mhz == 66)
1048 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694);
1049 /* else, do nothing -> leave it how you found it */
1050 }
1051
1052 for (i = 0; i < hisi_hba->queue_count; i++) {
1053 /* Delivery queue */
1054 hisi_sas_write32(hisi_hba,
1055 DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14),
1056 upper_32_bits(hisi_hba->cmd_hdr_dma[i]));
1057
1058 hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14),
1059 lower_32_bits(hisi_hba->cmd_hdr_dma[i]));
1060
1061 hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14),
1062 HISI_SAS_QUEUE_SLOTS);
1063
1064 /* Completion queue */
1065 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14),
1066 upper_32_bits(hisi_hba->complete_hdr_dma[i]));
1067
1068 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14),
1069 lower_32_bits(hisi_hba->complete_hdr_dma[i]));
1070
1071 hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14),
1072 HISI_SAS_QUEUE_SLOTS);
1073 }
1074
1075 /* itct */
1076 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO,
1077 lower_32_bits(hisi_hba->itct_dma));
1078
1079 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI,
1080 upper_32_bits(hisi_hba->itct_dma));
1081
1082 /* iost */
1083 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO,
1084 lower_32_bits(hisi_hba->iost_dma));
1085
1086 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI,
1087 upper_32_bits(hisi_hba->iost_dma));
1088
1089 /* breakpoint */
1090 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO,
1091 lower_32_bits(hisi_hba->breakpoint_dma));
1092
1093 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI,
1094 upper_32_bits(hisi_hba->breakpoint_dma));
1095
1096 /* SATA broken msg */
1097 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO,
1098 lower_32_bits(hisi_hba->sata_breakpoint_dma));
1099
1100 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI,
1101 upper_32_bits(hisi_hba->sata_breakpoint_dma));
1102
1103 /* SATA initial fis */
1104 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO,
1105 lower_32_bits(hisi_hba->initial_fis_dma));
1106
1107 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI,
1108 upper_32_bits(hisi_hba->initial_fis_dma));
1109 }
1110
1111 static void link_timeout_enable_link(unsigned long data)
1112 {
1113 struct hisi_hba *hisi_hba = (struct hisi_hba *)data;
1114 int i, reg_val;
1115
1116 for (i = 0; i < hisi_hba->n_phy; i++) {
1117 if (hisi_hba->reject_stp_links_msk & BIT(i))
1118 continue;
1119
1120 reg_val = hisi_sas_phy_read32(hisi_hba, i, CON_CONTROL);
1121 if (!(reg_val & BIT(0))) {
1122 hisi_sas_phy_write32(hisi_hba, i,
1123 CON_CONTROL, 0x7);
1124 break;
1125 }
1126 }
1127
1128 hisi_hba->timer.function = link_timeout_disable_link;
1129 mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(900));
1130 }
1131
1132 static void link_timeout_disable_link(unsigned long data)
1133 {
1134 struct hisi_hba *hisi_hba = (struct hisi_hba *)data;
1135 int i, reg_val;
1136
1137 reg_val = hisi_sas_read32(hisi_hba, PHY_STATE);
1138 for (i = 0; i < hisi_hba->n_phy && reg_val; i++) {
1139 if (hisi_hba->reject_stp_links_msk & BIT(i))
1140 continue;
1141
1142 if (reg_val & BIT(i)) {
1143 hisi_sas_phy_write32(hisi_hba, i,
1144 CON_CONTROL, 0x6);
1145 break;
1146 }
1147 }
1148
1149 hisi_hba->timer.function = link_timeout_enable_link;
1150 mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(100));
1151 }
1152
1153 static void set_link_timer_quirk(struct hisi_hba *hisi_hba)
1154 {
1155 hisi_hba->timer.data = (unsigned long)hisi_hba;
1156 hisi_hba->timer.function = link_timeout_disable_link;
1157 hisi_hba->timer.expires = jiffies + msecs_to_jiffies(1000);
1158 add_timer(&hisi_hba->timer);
1159 }
1160
1161 static int hw_init_v2_hw(struct hisi_hba *hisi_hba)
1162 {
1163 struct device *dev = &hisi_hba->pdev->dev;
1164 int rc;
1165
1166 rc = reset_hw_v2_hw(hisi_hba);
1167 if (rc) {
1168 dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc);
1169 return rc;
1170 }
1171
1172 msleep(100);
1173 init_reg_v2_hw(hisi_hba);
1174
1175 return 0;
1176 }
1177
1178 static void enable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
1179 {
1180 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
1181
1182 cfg |= PHY_CFG_ENA_MSK;
1183 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
1184 }
1185
1186 static bool is_sata_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
1187 {
1188 u32 context;
1189
1190 context = hisi_sas_read32(hisi_hba, PHY_CONTEXT);
1191 if (context & (1 << phy_no))
1192 return true;
1193
1194 return false;
1195 }
1196
1197 static void disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
1198 {
1199 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
1200
1201 cfg &= ~PHY_CFG_ENA_MSK;
1202 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
1203 }
1204
1205 static void start_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
1206 {
1207 config_id_frame_v2_hw(hisi_hba, phy_no);
1208 config_phy_opt_mode_v2_hw(hisi_hba, phy_no);
1209 enable_phy_v2_hw(hisi_hba, phy_no);
1210 }
1211
1212 static void stop_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
1213 {
1214 disable_phy_v2_hw(hisi_hba, phy_no);
1215 }
1216
1217 static void stop_phys_v2_hw(struct hisi_hba *hisi_hba)
1218 {
1219 int i;
1220
1221 for (i = 0; i < hisi_hba->n_phy; i++)
1222 stop_phy_v2_hw(hisi_hba, i);
1223 }
1224
1225 static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
1226 {
1227 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1228 u32 txid_auto;
1229
1230 stop_phy_v2_hw(hisi_hba, phy_no);
1231 if (phy->identify.device_type == SAS_END_DEVICE) {
1232 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
1233 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
1234 txid_auto | TX_HARDRST_MSK);
1235 }
1236 msleep(100);
1237 start_phy_v2_hw(hisi_hba, phy_no);
1238 }
1239
1240 static void start_phys_v2_hw(struct hisi_hba *hisi_hba)
1241 {
1242 int i;
1243
1244 for (i = 0; i < hisi_hba->n_phy; i++)
1245 start_phy_v2_hw(hisi_hba, i);
1246 }
1247
1248 static void phys_init_v2_hw(struct hisi_hba *hisi_hba)
1249 {
1250 start_phys_v2_hw(hisi_hba);
1251 }
1252
1253 static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
1254 {
1255 u32 sl_control;
1256
1257 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
1258 sl_control |= SL_CONTROL_NOTIFY_EN_MSK;
1259 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
1260 msleep(1);
1261 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
1262 sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK;
1263 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
1264 }
1265
1266 static enum sas_linkrate phy_get_max_linkrate_v2_hw(void)
1267 {
1268 return SAS_LINK_RATE_12_0_GBPS;
1269 }
1270
1271 static void phy_set_linkrate_v2_hw(struct hisi_hba *hisi_hba, int phy_no,
1272 struct sas_phy_linkrates *r)
1273 {
1274 u32 prog_phy_link_rate =
1275 hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
1276 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1277 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1278 int i;
1279 enum sas_linkrate min, max;
1280 u32 rate_mask = 0;
1281
1282 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
1283 max = sas_phy->phy->maximum_linkrate;
1284 min = r->minimum_linkrate;
1285 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
1286 max = r->maximum_linkrate;
1287 min = sas_phy->phy->minimum_linkrate;
1288 } else
1289 return;
1290
1291 sas_phy->phy->maximum_linkrate = max;
1292 sas_phy->phy->minimum_linkrate = min;
1293
1294 min -= SAS_LINK_RATE_1_5_GBPS;
1295 max -= SAS_LINK_RATE_1_5_GBPS;
1296
1297 for (i = 0; i <= max; i++)
1298 rate_mask |= 1 << (i * 2);
1299
1300 prog_phy_link_rate &= ~0xff;
1301 prog_phy_link_rate |= rate_mask;
1302
1303 hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
1304 prog_phy_link_rate);
1305
1306 phy_hard_reset_v2_hw(hisi_hba, phy_no);
1307 }
1308
1309 static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
1310 {
1311 int i, bitmap = 0;
1312 u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
1313 u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
1314
1315 for (i = 0; i < (hisi_hba->n_phy < 9 ? hisi_hba->n_phy : 8); i++)
1316 if (phy_state & 1 << i)
1317 if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id)
1318 bitmap |= 1 << i;
1319
1320 if (hisi_hba->n_phy == 9) {
1321 u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
1322
1323 if (phy_state & 1 << 8)
1324 if (((port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >>
1325 PORT_STATE_PHY8_PORT_NUM_OFF) == port_id)
1326 bitmap |= 1 << 9;
1327 }
1328
1329 return bitmap;
1330 }
1331
1332 /**
1333 * This function allocates across all queues to load balance.
1334 * Slots are allocated from queues in a round-robin fashion.
1335 *
1336 * The callpath to this function and upto writing the write
1337 * queue pointer should be safe from interruption.
1338 */
1339 static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, u32 dev_id,
1340 int *q, int *s)
1341 {
1342 struct device *dev = &hisi_hba->pdev->dev;
1343 struct hisi_sas_dq *dq;
1344 u32 r, w;
1345 int queue = dev_id % hisi_hba->queue_count;
1346
1347 dq = &hisi_hba->dq[queue];
1348 w = dq->wr_point;
1349 r = hisi_sas_read32_relaxed(hisi_hba,
1350 DLVRY_Q_0_RD_PTR + (queue * 0x14));
1351 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
1352 dev_warn(dev, "full queue=%d r=%d w=%d\n\n",
1353 queue, r, w);
1354 return -EAGAIN;
1355 }
1356
1357 *q = queue;
1358 *s = w;
1359 return 0;
1360 }
1361
1362 static void start_delivery_v2_hw(struct hisi_hba *hisi_hba)
1363 {
1364 int dlvry_queue = hisi_hba->slot_prep->dlvry_queue;
1365 int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot;
1366 struct hisi_sas_dq *dq = &hisi_hba->dq[dlvry_queue];
1367
1368 dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
1369 hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
1370 dq->wr_point);
1371 }
1372
1373 static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
1374 struct hisi_sas_slot *slot,
1375 struct hisi_sas_cmd_hdr *hdr,
1376 struct scatterlist *scatter,
1377 int n_elem)
1378 {
1379 struct device *dev = &hisi_hba->pdev->dev;
1380 struct scatterlist *sg;
1381 int i;
1382
1383 if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
1384 dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
1385 n_elem);
1386 return -EINVAL;
1387 }
1388
1389 slot->sge_page = dma_pool_alloc(hisi_hba->sge_page_pool, GFP_ATOMIC,
1390 &slot->sge_page_dma);
1391 if (!slot->sge_page)
1392 return -ENOMEM;
1393
1394 for_each_sg(scatter, sg, n_elem, i) {
1395 struct hisi_sas_sge *entry = &slot->sge_page->sge[i];
1396
1397 entry->addr = cpu_to_le64(sg_dma_address(sg));
1398 entry->page_ctrl_0 = entry->page_ctrl_1 = 0;
1399 entry->data_len = cpu_to_le32(sg_dma_len(sg));
1400 entry->data_off = 0;
1401 }
1402
1403 hdr->prd_table_addr = cpu_to_le64(slot->sge_page_dma);
1404
1405 hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
1406
1407 return 0;
1408 }
1409
1410 static int prep_smp_v2_hw(struct hisi_hba *hisi_hba,
1411 struct hisi_sas_slot *slot)
1412 {
1413 struct sas_task *task = slot->task;
1414 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
1415 struct domain_device *device = task->dev;
1416 struct device *dev = &hisi_hba->pdev->dev;
1417 struct hisi_sas_port *port = slot->port;
1418 struct scatterlist *sg_req, *sg_resp;
1419 struct hisi_sas_device *sas_dev = device->lldd_dev;
1420 dma_addr_t req_dma_addr;
1421 unsigned int req_len, resp_len;
1422 int elem, rc;
1423
1424 /*
1425 * DMA-map SMP request, response buffers
1426 */
1427 /* req */
1428 sg_req = &task->smp_task.smp_req;
1429 elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE);
1430 if (!elem)
1431 return -ENOMEM;
1432 req_len = sg_dma_len(sg_req);
1433 req_dma_addr = sg_dma_address(sg_req);
1434
1435 /* resp */
1436 sg_resp = &task->smp_task.smp_resp;
1437 elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE);
1438 if (!elem) {
1439 rc = -ENOMEM;
1440 goto err_out_req;
1441 }
1442 resp_len = sg_dma_len(sg_resp);
1443 if ((req_len & 0x3) || (resp_len & 0x3)) {
1444 rc = -EINVAL;
1445 goto err_out_resp;
1446 }
1447
1448 /* create header */
1449 /* dw0 */
1450 hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) |
1451 (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */
1452 (2 << CMD_HDR_CMD_OFF)); /* smp */
1453
1454 /* map itct entry */
1455 hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) |
1456 (1 << CMD_HDR_FRAME_TYPE_OFF) |
1457 (DIR_NO_DATA << CMD_HDR_DIR_OFF));
1458
1459 /* dw2 */
1460 hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) |
1461 (HISI_SAS_MAX_SMP_RESP_SZ / 4 <<
1462 CMD_HDR_MRFL_OFF));
1463
1464 hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF);
1465
1466 hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
1467 hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
1468
1469 return 0;
1470
1471 err_out_resp:
1472 dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1,
1473 DMA_FROM_DEVICE);
1474 err_out_req:
1475 dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1,
1476 DMA_TO_DEVICE);
1477 return rc;
1478 }
1479
1480 static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
1481 struct hisi_sas_slot *slot, int is_tmf,
1482 struct hisi_sas_tmf_task *tmf)
1483 {
1484 struct sas_task *task = slot->task;
1485 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
1486 struct domain_device *device = task->dev;
1487 struct hisi_sas_device *sas_dev = device->lldd_dev;
1488 struct hisi_sas_port *port = slot->port;
1489 struct sas_ssp_task *ssp_task = &task->ssp_task;
1490 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
1491 int has_data = 0, rc, priority = is_tmf;
1492 u8 *buf_cmd;
1493 u32 dw1 = 0, dw2 = 0;
1494
1495 hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) |
1496 (2 << CMD_HDR_TLR_CTRL_OFF) |
1497 (port->id << CMD_HDR_PORT_OFF) |
1498 (priority << CMD_HDR_PRIORITY_OFF) |
1499 (1 << CMD_HDR_CMD_OFF)); /* ssp */
1500
1501 dw1 = 1 << CMD_HDR_VDTL_OFF;
1502 if (is_tmf) {
1503 dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF;
1504 dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF;
1505 } else {
1506 dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF;
1507 switch (scsi_cmnd->sc_data_direction) {
1508 case DMA_TO_DEVICE:
1509 has_data = 1;
1510 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF;
1511 break;
1512 case DMA_FROM_DEVICE:
1513 has_data = 1;
1514 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF;
1515 break;
1516 default:
1517 dw1 &= ~CMD_HDR_DIR_MSK;
1518 }
1519 }
1520
1521 /* map itct entry */
1522 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
1523 hdr->dw1 = cpu_to_le32(dw1);
1524
1525 dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr)
1526 + 3) / 4) << CMD_HDR_CFL_OFF) |
1527 ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) |
1528 (2 << CMD_HDR_SG_MOD_OFF);
1529 hdr->dw2 = cpu_to_le32(dw2);
1530
1531 hdr->transfer_tags = cpu_to_le32(slot->idx);
1532
1533 if (has_data) {
1534 rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter,
1535 slot->n_elem);
1536 if (rc)
1537 return rc;
1538 }
1539
1540 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
1541 hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma);
1542 hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
1543
1544 buf_cmd = slot->command_table + sizeof(struct ssp_frame_hdr);
1545
1546 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
1547 if (!is_tmf) {
1548 buf_cmd[9] = task->ssp_task.task_attr |
1549 (task->ssp_task.task_prio << 3);
1550 memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
1551 task->ssp_task.cmd->cmd_len);
1552 } else {
1553 buf_cmd[10] = tmf->tmf;
1554 switch (tmf->tmf) {
1555 case TMF_ABORT_TASK:
1556 case TMF_QUERY_TASK:
1557 buf_cmd[12] =
1558 (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
1559 buf_cmd[13] =
1560 tmf->tag_of_task_to_be_managed & 0xff;
1561 break;
1562 default:
1563 break;
1564 }
1565 }
1566
1567 return 0;
1568 }
1569
1570 static void sata_done_v2_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
1571 struct hisi_sas_slot *slot)
1572 {
1573 struct task_status_struct *ts = &task->task_status;
1574 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
1575 struct dev_to_host_fis *d2h = slot->status_buffer +
1576 sizeof(struct hisi_sas_err_record);
1577
1578 resp->frame_len = sizeof(struct dev_to_host_fis);
1579 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
1580
1581 ts->buf_valid_size = sizeof(*resp);
1582 }
1583
1584 #define TRANS_TX_ERR 0
1585 #define TRANS_RX_ERR 1
1586 #define DMA_TX_ERR 2
1587 #define SIPC_RX_ERR 3
1588 #define DMA_RX_ERR 4
1589
1590 #define DMA_TX_ERR_OFF 0
1591 #define DMA_TX_ERR_MSK (0xffff << DMA_TX_ERR_OFF)
1592 #define SIPC_RX_ERR_OFF 16
1593 #define SIPC_RX_ERR_MSK (0xffff << SIPC_RX_ERR_OFF)
1594
1595 static int parse_trans_tx_err_code_v2_hw(u32 err_msk)
1596 {
1597 const u8 trans_tx_err_code_prio[] = {
1598 TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS,
1599 TRANS_TX_ERR_PHY_NOT_ENABLE,
1600 TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION,
1601 TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION,
1602 TRANS_TX_OPEN_CNX_ERR_BY_OTHER,
1603 RESERVED0,
1604 TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT,
1605 TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY,
1606 TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED,
1607 TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED,
1608 TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION,
1609 TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD,
1610 TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER,
1611 TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED,
1612 TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT,
1613 TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION,
1614 TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED,
1615 TRANS_TX_ERR_WITH_CLOSE_PHYDISALE,
1616 TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT,
1617 TRANS_TX_ERR_WITH_CLOSE_COMINIT,
1618 TRANS_TX_ERR_WITH_BREAK_TIMEOUT,
1619 TRANS_TX_ERR_WITH_BREAK_REQUEST,
1620 TRANS_TX_ERR_WITH_BREAK_RECEVIED,
1621 TRANS_TX_ERR_WITH_CLOSE_TIMEOUT,
1622 TRANS_TX_ERR_WITH_CLOSE_NORMAL,
1623 TRANS_TX_ERR_WITH_NAK_RECEVIED,
1624 TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT,
1625 TRANS_TX_ERR_WITH_CREDIT_TIMEOUT,
1626 TRANS_TX_ERR_WITH_IPTT_CONFLICT,
1627 TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS,
1628 TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT,
1629 };
1630 int index, i;
1631
1632 for (i = 0; i < ARRAY_SIZE(trans_tx_err_code_prio); i++) {
1633 index = trans_tx_err_code_prio[i] - TRANS_TX_FAIL_BASE;
1634 if (err_msk & (1 << index))
1635 return trans_tx_err_code_prio[i];
1636 }
1637 return -1;
1638 }
1639
1640 static int parse_trans_rx_err_code_v2_hw(u32 err_msk)
1641 {
1642 const u8 trans_rx_err_code_prio[] = {
1643 TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR,
1644 TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR,
1645 TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM,
1646 TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR,
1647 TRANS_RX_ERR_WITH_RXFIS_CRC_ERR,
1648 TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN,
1649 TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP,
1650 TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN,
1651 TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE,
1652 TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT,
1653 TRANS_RX_ERR_WITH_CLOSE_COMINIT,
1654 TRANS_RX_ERR_WITH_BREAK_TIMEOUT,
1655 TRANS_RX_ERR_WITH_BREAK_REQUEST,
1656 TRANS_RX_ERR_WITH_BREAK_RECEVIED,
1657 RESERVED1,
1658 TRANS_RX_ERR_WITH_CLOSE_NORMAL,
1659 TRANS_RX_ERR_WITH_DATA_LEN0,
1660 TRANS_RX_ERR_WITH_BAD_HASH,
1661 TRANS_RX_XRDY_WLEN_ZERO_ERR,
1662 TRANS_RX_SSP_FRM_LEN_ERR,
1663 RESERVED2,
1664 RESERVED3,
1665 RESERVED4,
1666 RESERVED5,
1667 TRANS_RX_ERR_WITH_BAD_FRM_TYPE,
1668 TRANS_RX_SMP_FRM_LEN_ERR,
1669 TRANS_RX_SMP_RESP_TIMEOUT_ERR,
1670 RESERVED6,
1671 RESERVED7,
1672 RESERVED8,
1673 RESERVED9,
1674 TRANS_RX_R_ERR,
1675 };
1676 int index, i;
1677
1678 for (i = 0; i < ARRAY_SIZE(trans_rx_err_code_prio); i++) {
1679 index = trans_rx_err_code_prio[i] - TRANS_RX_FAIL_BASE;
1680 if (err_msk & (1 << index))
1681 return trans_rx_err_code_prio[i];
1682 }
1683 return -1;
1684 }
1685
1686 static int parse_dma_tx_err_code_v2_hw(u32 err_msk)
1687 {
1688 const u8 dma_tx_err_code_prio[] = {
1689 DMA_TX_UNEXP_XFER_ERR,
1690 DMA_TX_UNEXP_RETRANS_ERR,
1691 DMA_TX_XFER_LEN_OVERFLOW,
1692 DMA_TX_XFER_OFFSET_ERR,
1693 DMA_TX_RAM_ECC_ERR,
1694 DMA_TX_DIF_LEN_ALIGN_ERR,
1695 DMA_TX_DIF_CRC_ERR,
1696 DMA_TX_DIF_APP_ERR,
1697 DMA_TX_DIF_RPP_ERR,
1698 DMA_TX_DATA_SGL_OVERFLOW,
1699 DMA_TX_DIF_SGL_OVERFLOW,
1700 };
1701 int index, i;
1702
1703 for (i = 0; i < ARRAY_SIZE(dma_tx_err_code_prio); i++) {
1704 index = dma_tx_err_code_prio[i] - DMA_TX_ERR_BASE;
1705 err_msk = err_msk & DMA_TX_ERR_MSK;
1706 if (err_msk & (1 << index))
1707 return dma_tx_err_code_prio[i];
1708 }
1709 return -1;
1710 }
1711
1712 static int parse_sipc_rx_err_code_v2_hw(u32 err_msk)
1713 {
1714 const u8 sipc_rx_err_code_prio[] = {
1715 SIPC_RX_FIS_STATUS_ERR_BIT_VLD,
1716 SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR,
1717 SIPC_RX_FIS_STATUS_BSY_BIT_ERR,
1718 SIPC_RX_WRSETUP_LEN_ODD_ERR,
1719 SIPC_RX_WRSETUP_LEN_ZERO_ERR,
1720 SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR,
1721 SIPC_RX_NCQ_WRSETUP_OFFSET_ERR,
1722 SIPC_RX_NCQ_WRSETUP_AUTO_ACTIVE_ERR,
1723 SIPC_RX_SATA_UNEXP_FIS_ERR,
1724 SIPC_RX_WRSETUP_ESTATUS_ERR,
1725 SIPC_RX_DATA_UNDERFLOW_ERR,
1726 };
1727 int index, i;
1728
1729 for (i = 0; i < ARRAY_SIZE(sipc_rx_err_code_prio); i++) {
1730 index = sipc_rx_err_code_prio[i] - SIPC_RX_ERR_BASE;
1731 err_msk = err_msk & SIPC_RX_ERR_MSK;
1732 if (err_msk & (1 << (index + 0x10)))
1733 return sipc_rx_err_code_prio[i];
1734 }
1735 return -1;
1736 }
1737
1738 static int parse_dma_rx_err_code_v2_hw(u32 err_msk)
1739 {
1740 const u8 dma_rx_err_code_prio[] = {
1741 DMA_RX_UNKNOWN_FRM_ERR,
1742 DMA_RX_DATA_LEN_OVERFLOW,
1743 DMA_RX_DATA_LEN_UNDERFLOW,
1744 DMA_RX_DATA_OFFSET_ERR,
1745 RESERVED10,
1746 DMA_RX_SATA_FRAME_TYPE_ERR,
1747 DMA_RX_RESP_BUF_OVERFLOW,
1748 DMA_RX_UNEXP_RETRANS_RESP_ERR,
1749 DMA_RX_UNEXP_NORM_RESP_ERR,
1750 DMA_RX_UNEXP_RDFRAME_ERR,
1751 DMA_RX_PIO_DATA_LEN_ERR,
1752 DMA_RX_RDSETUP_STATUS_ERR,
1753 DMA_RX_RDSETUP_STATUS_DRQ_ERR,
1754 DMA_RX_RDSETUP_STATUS_BSY_ERR,
1755 DMA_RX_RDSETUP_LEN_ODD_ERR,
1756 DMA_RX_RDSETUP_LEN_ZERO_ERR,
1757 DMA_RX_RDSETUP_LEN_OVER_ERR,
1758 DMA_RX_RDSETUP_OFFSET_ERR,
1759 DMA_RX_RDSETUP_ACTIVE_ERR,
1760 DMA_RX_RDSETUP_ESTATUS_ERR,
1761 DMA_RX_RAM_ECC_ERR,
1762 DMA_RX_DIF_CRC_ERR,
1763 DMA_RX_DIF_APP_ERR,
1764 DMA_RX_DIF_RPP_ERR,
1765 DMA_RX_DATA_SGL_OVERFLOW,
1766 DMA_RX_DIF_SGL_OVERFLOW,
1767 };
1768 int index, i;
1769
1770 for (i = 0; i < ARRAY_SIZE(dma_rx_err_code_prio); i++) {
1771 index = dma_rx_err_code_prio[i] - DMA_RX_ERR_BASE;
1772 if (err_msk & (1 << index))
1773 return dma_rx_err_code_prio[i];
1774 }
1775 return -1;
1776 }
1777
1778 /* by default, task resp is complete */
1779 static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
1780 struct sas_task *task,
1781 struct hisi_sas_slot *slot,
1782 int err_phase)
1783 {
1784 struct task_status_struct *ts = &task->task_status;
1785 struct hisi_sas_err_record_v2 *err_record = slot->status_buffer;
1786 u32 trans_tx_fail_type = cpu_to_le32(err_record->trans_tx_fail_type);
1787 u32 trans_rx_fail_type = cpu_to_le32(err_record->trans_rx_fail_type);
1788 u16 dma_tx_err_type = cpu_to_le16(err_record->dma_tx_err_type);
1789 u16 sipc_rx_err_type = cpu_to_le16(err_record->sipc_rx_err_type);
1790 u32 dma_rx_err_type = cpu_to_le32(err_record->dma_rx_err_type);
1791 int error = -1;
1792
1793 if (err_phase == 1) {
1794 /* error in TX phase, the priority of error is: DW2 > DW0 */
1795 error = parse_dma_tx_err_code_v2_hw(dma_tx_err_type);
1796 if (error == -1)
1797 error = parse_trans_tx_err_code_v2_hw(
1798 trans_tx_fail_type);
1799 } else if (err_phase == 2) {
1800 /* error in RX phase, the priority is: DW1 > DW3 > DW2 */
1801 error = parse_trans_rx_err_code_v2_hw(
1802 trans_rx_fail_type);
1803 if (error == -1) {
1804 error = parse_dma_rx_err_code_v2_hw(
1805 dma_rx_err_type);
1806 if (error == -1)
1807 error = parse_sipc_rx_err_code_v2_hw(
1808 sipc_rx_err_type);
1809 }
1810 }
1811
1812 switch (task->task_proto) {
1813 case SAS_PROTOCOL_SSP:
1814 {
1815 switch (error) {
1816 case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION:
1817 {
1818 ts->stat = SAS_OPEN_REJECT;
1819 ts->open_rej_reason = SAS_OREJ_NO_DEST;
1820 break;
1821 }
1822 case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED:
1823 {
1824 ts->stat = SAS_OPEN_REJECT;
1825 ts->open_rej_reason = SAS_OREJ_EPROTO;
1826 break;
1827 }
1828 case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED:
1829 {
1830 ts->stat = SAS_OPEN_REJECT;
1831 ts->open_rej_reason = SAS_OREJ_CONN_RATE;
1832 break;
1833 }
1834 case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION:
1835 {
1836 ts->stat = SAS_OPEN_REJECT;
1837 ts->open_rej_reason = SAS_OREJ_BAD_DEST;
1838 break;
1839 }
1840 case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION:
1841 {
1842 ts->stat = SAS_OPEN_REJECT;
1843 ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
1844 break;
1845 }
1846 case DMA_RX_UNEXP_NORM_RESP_ERR:
1847 case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION:
1848 case DMA_RX_RESP_BUF_OVERFLOW:
1849 {
1850 ts->stat = SAS_OPEN_REJECT;
1851 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
1852 break;
1853 }
1854 case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER:
1855 {
1856 /* not sure */
1857 ts->stat = SAS_DEV_NO_RESPONSE;
1858 break;
1859 }
1860 case DMA_RX_DATA_LEN_OVERFLOW:
1861 {
1862 ts->stat = SAS_DATA_OVERRUN;
1863 ts->residual = 0;
1864 break;
1865 }
1866 case DMA_RX_DATA_LEN_UNDERFLOW:
1867 {
1868 ts->residual = dma_rx_err_type;
1869 ts->stat = SAS_DATA_UNDERRUN;
1870 break;
1871 }
1872 case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS:
1873 case TRANS_TX_ERR_PHY_NOT_ENABLE:
1874 case TRANS_TX_OPEN_CNX_ERR_BY_OTHER:
1875 case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT:
1876 case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD:
1877 case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED:
1878 case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT:
1879 case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED:
1880 case TRANS_TX_ERR_WITH_BREAK_TIMEOUT:
1881 case TRANS_TX_ERR_WITH_BREAK_REQUEST:
1882 case TRANS_TX_ERR_WITH_BREAK_RECEVIED:
1883 case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT:
1884 case TRANS_TX_ERR_WITH_CLOSE_NORMAL:
1885 case TRANS_TX_ERR_WITH_CLOSE_PHYDISALE:
1886 case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT:
1887 case TRANS_TX_ERR_WITH_CLOSE_COMINIT:
1888 case TRANS_TX_ERR_WITH_NAK_RECEVIED:
1889 case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT:
1890 case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT:
1891 case TRANS_TX_ERR_WITH_IPTT_CONFLICT:
1892 case TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR:
1893 case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR:
1894 case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM:
1895 case TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN:
1896 case TRANS_RX_ERR_WITH_BREAK_TIMEOUT:
1897 case TRANS_RX_ERR_WITH_BREAK_REQUEST:
1898 case TRANS_RX_ERR_WITH_BREAK_RECEVIED:
1899 case TRANS_RX_ERR_WITH_CLOSE_NORMAL:
1900 case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT:
1901 case TRANS_RX_ERR_WITH_CLOSE_COMINIT:
1902 case TRANS_TX_ERR_FRAME_TXED:
1903 case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE:
1904 case TRANS_RX_ERR_WITH_DATA_LEN0:
1905 case TRANS_RX_ERR_WITH_BAD_HASH:
1906 case TRANS_RX_XRDY_WLEN_ZERO_ERR:
1907 case TRANS_RX_SSP_FRM_LEN_ERR:
1908 case TRANS_RX_ERR_WITH_BAD_FRM_TYPE:
1909 case DMA_TX_DATA_SGL_OVERFLOW:
1910 case DMA_TX_UNEXP_XFER_ERR:
1911 case DMA_TX_UNEXP_RETRANS_ERR:
1912 case DMA_TX_XFER_LEN_OVERFLOW:
1913 case DMA_TX_XFER_OFFSET_ERR:
1914 case SIPC_RX_DATA_UNDERFLOW_ERR:
1915 case DMA_RX_DATA_SGL_OVERFLOW:
1916 case DMA_RX_DATA_OFFSET_ERR:
1917 case DMA_RX_RDSETUP_LEN_ODD_ERR:
1918 case DMA_RX_RDSETUP_LEN_ZERO_ERR:
1919 case DMA_RX_RDSETUP_LEN_OVER_ERR:
1920 case DMA_RX_SATA_FRAME_TYPE_ERR:
1921 case DMA_RX_UNKNOWN_FRM_ERR:
1922 {
1923 /* This will request a retry */
1924 ts->stat = SAS_QUEUE_FULL;
1925 slot->abort = 1;
1926 break;
1927 }
1928 default:
1929 break;
1930 }
1931 }
1932 break;
1933 case SAS_PROTOCOL_SMP:
1934 ts->stat = SAM_STAT_CHECK_CONDITION;
1935 break;
1936
1937 case SAS_PROTOCOL_SATA:
1938 case SAS_PROTOCOL_STP:
1939 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1940 {
1941 switch (error) {
1942 case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION:
1943 {
1944 ts->stat = SAS_OPEN_REJECT;
1945 ts->open_rej_reason = SAS_OREJ_NO_DEST;
1946 break;
1947 }
1948 case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER:
1949 {
1950 ts->resp = SAS_TASK_UNDELIVERED;
1951 ts->stat = SAS_DEV_NO_RESPONSE;
1952 break;
1953 }
1954 case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED:
1955 {
1956 ts->stat = SAS_OPEN_REJECT;
1957 ts->open_rej_reason = SAS_OREJ_EPROTO;
1958 break;
1959 }
1960 case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED:
1961 {
1962 ts->stat = SAS_OPEN_REJECT;
1963 ts->open_rej_reason = SAS_OREJ_CONN_RATE;
1964 break;
1965 }
1966 case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION:
1967 {
1968 ts->stat = SAS_OPEN_REJECT;
1969 ts->open_rej_reason = SAS_OREJ_CONN_RATE;
1970 break;
1971 }
1972 case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION:
1973 {
1974 ts->stat = SAS_OPEN_REJECT;
1975 ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
1976 break;
1977 }
1978 case DMA_RX_RESP_BUF_OVERFLOW:
1979 case DMA_RX_UNEXP_NORM_RESP_ERR:
1980 case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION:
1981 {
1982 ts->stat = SAS_OPEN_REJECT;
1983 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
1984 break;
1985 }
1986 case DMA_RX_DATA_LEN_OVERFLOW:
1987 {
1988 ts->stat = SAS_DATA_OVERRUN;
1989 ts->residual = 0;
1990 break;
1991 }
1992 case DMA_RX_DATA_LEN_UNDERFLOW:
1993 {
1994 ts->residual = dma_rx_err_type;
1995 ts->stat = SAS_DATA_UNDERRUN;
1996 break;
1997 }
1998 case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS:
1999 case TRANS_TX_ERR_PHY_NOT_ENABLE:
2000 case TRANS_TX_OPEN_CNX_ERR_BY_OTHER:
2001 case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT:
2002 case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD:
2003 case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED:
2004 case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT:
2005 case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED:
2006 case TRANS_TX_ERR_WITH_BREAK_TIMEOUT:
2007 case TRANS_TX_ERR_WITH_BREAK_REQUEST:
2008 case TRANS_TX_ERR_WITH_BREAK_RECEVIED:
2009 case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT:
2010 case TRANS_TX_ERR_WITH_CLOSE_NORMAL:
2011 case TRANS_TX_ERR_WITH_CLOSE_PHYDISALE:
2012 case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT:
2013 case TRANS_TX_ERR_WITH_CLOSE_COMINIT:
2014 case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT:
2015 case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT:
2016 case TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS:
2017 case TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT:
2018 case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM:
2019 case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR:
2020 case TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR:
2021 case TRANS_RX_ERR_WITH_RXFIS_CRC_ERR:
2022 case TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN:
2023 case TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP:
2024 case TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN:
2025 case TRANS_RX_ERR_WITH_BREAK_TIMEOUT:
2026 case TRANS_RX_ERR_WITH_BREAK_REQUEST:
2027 case TRANS_RX_ERR_WITH_BREAK_RECEVIED:
2028 case TRANS_RX_ERR_WITH_CLOSE_NORMAL:
2029 case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE:
2030 case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT:
2031 case TRANS_RX_ERR_WITH_CLOSE_COMINIT:
2032 case TRANS_RX_ERR_WITH_DATA_LEN0:
2033 case TRANS_RX_ERR_WITH_BAD_HASH:
2034 case TRANS_RX_XRDY_WLEN_ZERO_ERR:
2035 case TRANS_RX_ERR_WITH_BAD_FRM_TYPE:
2036 case DMA_TX_DATA_SGL_OVERFLOW:
2037 case DMA_TX_UNEXP_XFER_ERR:
2038 case DMA_TX_UNEXP_RETRANS_ERR:
2039 case DMA_TX_XFER_LEN_OVERFLOW:
2040 case DMA_TX_XFER_OFFSET_ERR:
2041 case SIPC_RX_FIS_STATUS_ERR_BIT_VLD:
2042 case SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR:
2043 case SIPC_RX_FIS_STATUS_BSY_BIT_ERR:
2044 case SIPC_RX_WRSETUP_LEN_ODD_ERR:
2045 case SIPC_RX_WRSETUP_LEN_ZERO_ERR:
2046 case SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR:
2047 case SIPC_RX_SATA_UNEXP_FIS_ERR:
2048 case DMA_RX_DATA_SGL_OVERFLOW:
2049 case DMA_RX_DATA_OFFSET_ERR:
2050 case DMA_RX_SATA_FRAME_TYPE_ERR:
2051 case DMA_RX_UNEXP_RDFRAME_ERR:
2052 case DMA_RX_PIO_DATA_LEN_ERR:
2053 case DMA_RX_RDSETUP_STATUS_ERR:
2054 case DMA_RX_RDSETUP_STATUS_DRQ_ERR:
2055 case DMA_RX_RDSETUP_STATUS_BSY_ERR:
2056 case DMA_RX_RDSETUP_LEN_ODD_ERR:
2057 case DMA_RX_RDSETUP_LEN_ZERO_ERR:
2058 case DMA_RX_RDSETUP_LEN_OVER_ERR:
2059 case DMA_RX_RDSETUP_OFFSET_ERR:
2060 case DMA_RX_RDSETUP_ACTIVE_ERR:
2061 case DMA_RX_RDSETUP_ESTATUS_ERR:
2062 case DMA_RX_UNKNOWN_FRM_ERR:
2063 case TRANS_RX_SSP_FRM_LEN_ERR:
2064 case TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY:
2065 {
2066 slot->abort = 1;
2067 ts->stat = SAS_PHY_DOWN;
2068 break;
2069 }
2070 default:
2071 {
2072 ts->stat = SAS_PROTO_RESPONSE;
2073 break;
2074 }
2075 }
2076 sata_done_v2_hw(hisi_hba, task, slot);
2077 }
2078 break;
2079 default:
2080 break;
2081 }
2082 }
2083
2084 static int
2085 slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
2086 {
2087 struct sas_task *task = slot->task;
2088 struct hisi_sas_device *sas_dev;
2089 struct device *dev = &hisi_hba->pdev->dev;
2090 struct task_status_struct *ts;
2091 struct domain_device *device;
2092 enum exec_status sts;
2093 struct hisi_sas_complete_v2_hdr *complete_queue =
2094 hisi_hba->complete_hdr[slot->cmplt_queue];
2095 struct hisi_sas_complete_v2_hdr *complete_hdr =
2096 &complete_queue[slot->cmplt_queue_slot];
2097 unsigned long flags;
2098 int aborted;
2099
2100 if (unlikely(!task || !task->lldd_task || !task->dev))
2101 return -EINVAL;
2102
2103 ts = &task->task_status;
2104 device = task->dev;
2105 sas_dev = device->lldd_dev;
2106
2107 spin_lock_irqsave(&task->task_state_lock, flags);
2108 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
2109 task->task_state_flags &=
2110 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
2111 spin_unlock_irqrestore(&task->task_state_lock, flags);
2112
2113 memset(ts, 0, sizeof(*ts));
2114 ts->resp = SAS_TASK_COMPLETE;
2115
2116 if (unlikely(aborted)) {
2117 ts->stat = SAS_ABORTED_TASK;
2118 hisi_sas_slot_task_free(hisi_hba, task, slot);
2119 return -1;
2120 }
2121
2122 if (unlikely(!sas_dev)) {
2123 dev_dbg(dev, "slot complete: port has no device\n");
2124 ts->stat = SAS_PHY_DOWN;
2125 goto out;
2126 }
2127
2128 /* Use SAS+TMF status codes */
2129 switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK)
2130 >> CMPLT_HDR_ABORT_STAT_OFF) {
2131 case STAT_IO_ABORTED:
2132 /* this io has been aborted by abort command */
2133 ts->stat = SAS_ABORTED_TASK;
2134 goto out;
2135 case STAT_IO_COMPLETE:
2136 /* internal abort command complete */
2137 ts->stat = TMF_RESP_FUNC_SUCC;
2138 goto out;
2139 case STAT_IO_NO_DEVICE:
2140 ts->stat = TMF_RESP_FUNC_COMPLETE;
2141 goto out;
2142 case STAT_IO_NOT_VALID:
2143 /* abort single io, controller don't find
2144 * the io need to abort
2145 */
2146 ts->stat = TMF_RESP_FUNC_FAILED;
2147 goto out;
2148 default:
2149 break;
2150 }
2151
2152 if ((complete_hdr->dw0 & CMPLT_HDR_ERX_MSK) &&
2153 (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
2154 u32 err_phase = (complete_hdr->dw0 & CMPLT_HDR_ERR_PHASE_MSK)
2155 >> CMPLT_HDR_ERR_PHASE_OFF;
2156
2157 /* Analyse error happens on which phase TX or RX */
2158 if (ERR_ON_TX_PHASE(err_phase))
2159 slot_err_v2_hw(hisi_hba, task, slot, 1);
2160 else if (ERR_ON_RX_PHASE(err_phase))
2161 slot_err_v2_hw(hisi_hba, task, slot, 2);
2162
2163 if (unlikely(slot->abort))
2164 return ts->stat;
2165 goto out;
2166 }
2167
2168 switch (task->task_proto) {
2169 case SAS_PROTOCOL_SSP:
2170 {
2171 struct ssp_response_iu *iu = slot->status_buffer +
2172 sizeof(struct hisi_sas_err_record);
2173
2174 sas_ssp_task_response(dev, task, iu);
2175 break;
2176 }
2177 case SAS_PROTOCOL_SMP:
2178 {
2179 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
2180 void *to;
2181
2182 ts->stat = SAM_STAT_GOOD;
2183 to = kmap_atomic(sg_page(sg_resp));
2184
2185 dma_unmap_sg(dev, &task->smp_task.smp_resp, 1,
2186 DMA_FROM_DEVICE);
2187 dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
2188 DMA_TO_DEVICE);
2189 memcpy(to + sg_resp->offset,
2190 slot->status_buffer +
2191 sizeof(struct hisi_sas_err_record),
2192 sg_dma_len(sg_resp));
2193 kunmap_atomic(to);
2194 break;
2195 }
2196 case SAS_PROTOCOL_SATA:
2197 case SAS_PROTOCOL_STP:
2198 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
2199 {
2200 ts->stat = SAM_STAT_GOOD;
2201 sata_done_v2_hw(hisi_hba, task, slot);
2202 break;
2203 }
2204 default:
2205 ts->stat = SAM_STAT_CHECK_CONDITION;
2206 break;
2207 }
2208
2209 if (!slot->port->port_attached) {
2210 dev_err(dev, "slot complete: port %d has removed\n",
2211 slot->port->sas_port.id);
2212 ts->stat = SAS_PHY_DOWN;
2213 }
2214
2215 out:
2216 spin_lock_irqsave(&task->task_state_lock, flags);
2217 task->task_state_flags |= SAS_TASK_STATE_DONE;
2218 spin_unlock_irqrestore(&task->task_state_lock, flags);
2219 hisi_sas_slot_task_free(hisi_hba, task, slot);
2220 sts = ts->stat;
2221
2222 if (task->task_done)
2223 task->task_done(task);
2224
2225 return sts;
2226 }
2227
2228 static u8 get_ata_protocol(u8 cmd, int direction)
2229 {
2230 switch (cmd) {
2231 case ATA_CMD_FPDMA_WRITE:
2232 case ATA_CMD_FPDMA_READ:
2233 case ATA_CMD_FPDMA_RECV:
2234 case ATA_CMD_FPDMA_SEND:
2235 case ATA_CMD_NCQ_NON_DATA:
2236 return SATA_PROTOCOL_FPDMA;
2237
2238 case ATA_CMD_DOWNLOAD_MICRO:
2239 case ATA_CMD_ID_ATA:
2240 case ATA_CMD_PMP_READ:
2241 case ATA_CMD_READ_LOG_EXT:
2242 case ATA_CMD_PIO_READ:
2243 case ATA_CMD_PIO_READ_EXT:
2244 case ATA_CMD_PMP_WRITE:
2245 case ATA_CMD_WRITE_LOG_EXT:
2246 case ATA_CMD_PIO_WRITE:
2247 case ATA_CMD_PIO_WRITE_EXT:
2248 return SATA_PROTOCOL_PIO;
2249
2250 case ATA_CMD_DSM:
2251 case ATA_CMD_DOWNLOAD_MICRO_DMA:
2252 case ATA_CMD_PMP_READ_DMA:
2253 case ATA_CMD_PMP_WRITE_DMA:
2254 case ATA_CMD_READ:
2255 case ATA_CMD_READ_EXT:
2256 case ATA_CMD_READ_LOG_DMA_EXT:
2257 case ATA_CMD_READ_STREAM_DMA_EXT:
2258 case ATA_CMD_TRUSTED_RCV_DMA:
2259 case ATA_CMD_TRUSTED_SND_DMA:
2260 case ATA_CMD_WRITE:
2261 case ATA_CMD_WRITE_EXT:
2262 case ATA_CMD_WRITE_FUA_EXT:
2263 case ATA_CMD_WRITE_QUEUED:
2264 case ATA_CMD_WRITE_LOG_DMA_EXT:
2265 case ATA_CMD_WRITE_STREAM_DMA_EXT:
2266 return SATA_PROTOCOL_DMA;
2267
2268 case ATA_CMD_CHK_POWER:
2269 case ATA_CMD_DEV_RESET:
2270 case ATA_CMD_EDD:
2271 case ATA_CMD_FLUSH:
2272 case ATA_CMD_FLUSH_EXT:
2273 case ATA_CMD_VERIFY:
2274 case ATA_CMD_VERIFY_EXT:
2275 case ATA_CMD_SET_FEATURES:
2276 case ATA_CMD_STANDBY:
2277 case ATA_CMD_STANDBYNOW1:
2278 return SATA_PROTOCOL_NONDATA;
2279 default:
2280 if (direction == DMA_NONE)
2281 return SATA_PROTOCOL_NONDATA;
2282 return SATA_PROTOCOL_PIO;
2283 }
2284 }
2285
2286 static int get_ncq_tag_v2_hw(struct sas_task *task, u32 *tag)
2287 {
2288 struct ata_queued_cmd *qc = task->uldd_task;
2289
2290 if (qc) {
2291 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
2292 qc->tf.command == ATA_CMD_FPDMA_READ) {
2293 *tag = qc->tag;
2294 return 1;
2295 }
2296 }
2297 return 0;
2298 }
2299
2300 static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
2301 struct hisi_sas_slot *slot)
2302 {
2303 struct sas_task *task = slot->task;
2304 struct domain_device *device = task->dev;
2305 struct domain_device *parent_dev = device->parent;
2306 struct hisi_sas_device *sas_dev = device->lldd_dev;
2307 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
2308 struct asd_sas_port *sas_port = device->port;
2309 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
2310 u8 *buf_cmd;
2311 int has_data = 0, rc = 0, hdr_tag = 0;
2312 u32 dw1 = 0, dw2 = 0;
2313
2314 /* create header */
2315 /* dw0 */
2316 hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
2317 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
2318 hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF);
2319 else
2320 hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF);
2321
2322 /* dw1 */
2323 switch (task->data_dir) {
2324 case DMA_TO_DEVICE:
2325 has_data = 1;
2326 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF;
2327 break;
2328 case DMA_FROM_DEVICE:
2329 has_data = 1;
2330 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF;
2331 break;
2332 default:
2333 dw1 &= ~CMD_HDR_DIR_MSK;
2334 }
2335
2336 if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) &&
2337 (task->ata_task.fis.control & ATA_SRST))
2338 dw1 |= 1 << CMD_HDR_RESET_OFF;
2339
2340 dw1 |= (get_ata_protocol(task->ata_task.fis.command, task->data_dir))
2341 << CMD_HDR_FRAME_TYPE_OFF;
2342 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
2343 hdr->dw1 = cpu_to_le32(dw1);
2344
2345 /* dw2 */
2346 if (task->ata_task.use_ncq && get_ncq_tag_v2_hw(task, &hdr_tag)) {
2347 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
2348 dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
2349 }
2350
2351 dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF |
2352 2 << CMD_HDR_SG_MOD_OFF;
2353 hdr->dw2 = cpu_to_le32(dw2);
2354
2355 /* dw3 */
2356 hdr->transfer_tags = cpu_to_le32(slot->idx);
2357
2358 if (has_data) {
2359 rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter,
2360 slot->n_elem);
2361 if (rc)
2362 return rc;
2363 }
2364
2365
2366 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
2367 hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma);
2368 hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
2369
2370 buf_cmd = slot->command_table;
2371
2372 if (likely(!task->ata_task.device_control_reg_update))
2373 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
2374 /* fill in command FIS */
2375 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
2376
2377 return 0;
2378 }
2379
2380 static int prep_abort_v2_hw(struct hisi_hba *hisi_hba,
2381 struct hisi_sas_slot *slot,
2382 int device_id, int abort_flag, int tag_to_abort)
2383 {
2384 struct sas_task *task = slot->task;
2385 struct domain_device *dev = task->dev;
2386 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
2387 struct hisi_sas_port *port = slot->port;
2388
2389 /* dw0 */
2390 hdr->dw0 = cpu_to_le32((5 << CMD_HDR_CMD_OFF) | /*abort*/
2391 (port->id << CMD_HDR_PORT_OFF) |
2392 ((dev_is_sata(dev) ? 1:0) <<
2393 CMD_HDR_ABORT_DEVICE_TYPE_OFF) |
2394 (abort_flag << CMD_HDR_ABORT_FLAG_OFF));
2395
2396 /* dw1 */
2397 hdr->dw1 = cpu_to_le32(device_id << CMD_HDR_DEV_ID_OFF);
2398
2399 /* dw7 */
2400 hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
2401 hdr->transfer_tags = cpu_to_le32(slot->idx);
2402
2403 return 0;
2404 }
2405
2406 static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
2407 {
2408 int i, res = IRQ_HANDLED;
2409 u32 port_id, link_rate, hard_phy_linkrate;
2410 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
2411 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2412 struct device *dev = &hisi_hba->pdev->dev;
2413 u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
2414 struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd;
2415
2416 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
2417
2418 if (is_sata_phy_v2_hw(hisi_hba, phy_no))
2419 goto end;
2420
2421 if (phy_no == 8) {
2422 u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
2423
2424 port_id = (port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >>
2425 PORT_STATE_PHY8_PORT_NUM_OFF;
2426 link_rate = (port_state & PORT_STATE_PHY8_CONN_RATE_MSK) >>
2427 PORT_STATE_PHY8_CONN_RATE_OFF;
2428 } else {
2429 port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
2430 port_id = (port_id >> (4 * phy_no)) & 0xf;
2431 link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
2432 link_rate = (link_rate >> (phy_no * 4)) & 0xf;
2433 }
2434
2435 if (port_id == 0xf) {
2436 dev_err(dev, "phyup: phy%d invalid portid\n", phy_no);
2437 res = IRQ_NONE;
2438 goto end;
2439 }
2440
2441 for (i = 0; i < 6; i++) {
2442 u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no,
2443 RX_IDAF_DWORD0 + (i * 4));
2444 frame_rcvd[i] = __swab32(idaf);
2445 }
2446
2447 sas_phy->linkrate = link_rate;
2448 hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no,
2449 HARD_PHY_LINKRATE);
2450 phy->maximum_linkrate = hard_phy_linkrate & 0xf;
2451 phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf;
2452
2453 sas_phy->oob_mode = SAS_OOB_MODE;
2454 memcpy(sas_phy->attached_sas_addr, &id->sas_addr, SAS_ADDR_SIZE);
2455 dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
2456 phy->port_id = port_id;
2457 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
2458 phy->phy_type |= PORT_TYPE_SAS;
2459 phy->phy_attached = 1;
2460 phy->identify.device_type = id->dev_type;
2461 phy->frame_rcvd_size = sizeof(struct sas_identify_frame);
2462 if (phy->identify.device_type == SAS_END_DEVICE)
2463 phy->identify.target_port_protocols =
2464 SAS_PROTOCOL_SSP;
2465 else if (phy->identify.device_type != SAS_PHY_UNUSED) {
2466 phy->identify.target_port_protocols =
2467 SAS_PROTOCOL_SMP;
2468 if (!timer_pending(&hisi_hba->timer))
2469 set_link_timer_quirk(hisi_hba);
2470 }
2471 queue_work(hisi_hba->wq, &phy->phyup_ws);
2472
2473 end:
2474 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
2475 CHL_INT0_SL_PHY_ENABLE_MSK);
2476 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0);
2477
2478 return res;
2479 }
2480
2481 static bool check_any_wideports_v2_hw(struct hisi_hba *hisi_hba)
2482 {
2483 u32 port_state;
2484
2485 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
2486 if (port_state & 0x1ff)
2487 return true;
2488
2489 return false;
2490 }
2491
2492 static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
2493 {
2494 u32 phy_state, sl_ctrl, txid_auto;
2495 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
2496 struct hisi_sas_port *port = phy->port;
2497
2498 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
2499
2500 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
2501 hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
2502
2503 sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
2504 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL,
2505 sl_ctrl & ~SL_CONTROL_CTA_MSK);
2506 if (port && !get_wideport_bitmap_v2_hw(hisi_hba, port->id))
2507 if (!check_any_wideports_v2_hw(hisi_hba) &&
2508 timer_pending(&hisi_hba->timer))
2509 del_timer(&hisi_hba->timer);
2510
2511 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
2512 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
2513 txid_auto | TXID_AUTO_CT3_MSK);
2514
2515 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK);
2516 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0);
2517
2518 return IRQ_HANDLED;
2519 }
2520
2521 static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p)
2522 {
2523 struct hisi_hba *hisi_hba = p;
2524 u32 irq_msk;
2525 int phy_no = 0;
2526
2527 irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO)
2528 >> HGC_INVLD_DQE_INFO_FB_CH0_OFF) & 0x1ff;
2529 while (irq_msk) {
2530 if (irq_msk & 1) {
2531 u32 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no,
2532 CHL_INT0);
2533
2534 switch (reg_value & (CHL_INT0_NOT_RDY_MSK |
2535 CHL_INT0_SL_PHY_ENABLE_MSK)) {
2536
2537 case CHL_INT0_SL_PHY_ENABLE_MSK:
2538 /* phy up */
2539 if (phy_up_v2_hw(phy_no, hisi_hba) ==
2540 IRQ_NONE)
2541 return IRQ_NONE;
2542 break;
2543
2544 case CHL_INT0_NOT_RDY_MSK:
2545 /* phy down */
2546 if (phy_down_v2_hw(phy_no, hisi_hba) ==
2547 IRQ_NONE)
2548 return IRQ_NONE;
2549 break;
2550
2551 case (CHL_INT0_NOT_RDY_MSK |
2552 CHL_INT0_SL_PHY_ENABLE_MSK):
2553 reg_value = hisi_sas_read32(hisi_hba,
2554 PHY_STATE);
2555 if (reg_value & BIT(phy_no)) {
2556 /* phy up */
2557 if (phy_up_v2_hw(phy_no, hisi_hba) ==
2558 IRQ_NONE)
2559 return IRQ_NONE;
2560 } else {
2561 /* phy down */
2562 if (phy_down_v2_hw(phy_no, hisi_hba) ==
2563 IRQ_NONE)
2564 return IRQ_NONE;
2565 }
2566 break;
2567
2568 default:
2569 break;
2570 }
2571
2572 }
2573 irq_msk >>= 1;
2574 phy_no++;
2575 }
2576
2577 return IRQ_HANDLED;
2578 }
2579
2580 static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
2581 {
2582 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
2583 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2584 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
2585 u32 bcast_status;
2586
2587 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
2588 bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
2589 if (bcast_status & RX_BCAST_CHG_MSK)
2590 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
2591 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
2592 CHL_INT0_SL_RX_BCST_ACK_MSK);
2593 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
2594 }
2595
2596 static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
2597 {
2598 struct hisi_hba *hisi_hba = p;
2599 struct device *dev = &hisi_hba->pdev->dev;
2600 u32 ent_msk, ent_tmp, irq_msk;
2601 int phy_no = 0;
2602
2603 ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
2604 ent_tmp = ent_msk;
2605 ent_msk |= ENT_INT_SRC_MSK3_ENT95_MSK_MSK;
2606 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_msk);
2607
2608 irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO) >>
2609 HGC_INVLD_DQE_INFO_FB_CH3_OFF) & 0x1ff;
2610
2611 while (irq_msk) {
2612 if (irq_msk & (1 << phy_no)) {
2613 u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
2614 CHL_INT0);
2615 u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
2616 CHL_INT1);
2617 u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
2618 CHL_INT2);
2619
2620 if (irq_value1) {
2621 if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
2622 CHL_INT1_DMAC_TX_ECC_ERR_MSK))
2623 panic("%s: DMAC RX/TX ecc bad error!\
2624 (0x%x)",
2625 dev_name(dev), irq_value1);
2626
2627 hisi_sas_phy_write32(hisi_hba, phy_no,
2628 CHL_INT1, irq_value1);
2629 }
2630
2631 if (irq_value2)
2632 hisi_sas_phy_write32(hisi_hba, phy_no,
2633 CHL_INT2, irq_value2);
2634
2635
2636 if (irq_value0) {
2637 if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK)
2638 phy_bcast_v2_hw(phy_no, hisi_hba);
2639
2640 hisi_sas_phy_write32(hisi_hba, phy_no,
2641 CHL_INT0, irq_value0
2642 & (~CHL_INT0_HOTPLUG_TOUT_MSK)
2643 & (~CHL_INT0_SL_PHY_ENABLE_MSK)
2644 & (~CHL_INT0_NOT_RDY_MSK));
2645 }
2646 }
2647 irq_msk &= ~(1 << phy_no);
2648 phy_no++;
2649 }
2650
2651 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_tmp);
2652
2653 return IRQ_HANDLED;
2654 }
2655
2656 static void
2657 one_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value)
2658 {
2659 struct device *dev = &hisi_hba->pdev->dev;
2660 u32 reg_val;
2661
2662 if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF)) {
2663 reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR);
2664 dev_warn(dev, "hgc_dqe_acc1b_intr found: \
2665 Ram address is 0x%08X\n",
2666 (reg_val & HGC_DQE_ECC_1B_ADDR_MSK) >>
2667 HGC_DQE_ECC_1B_ADDR_OFF);
2668 }
2669
2670 if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF)) {
2671 reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR);
2672 dev_warn(dev, "hgc_iost_acc1b_intr found: \
2673 Ram address is 0x%08X\n",
2674 (reg_val & HGC_IOST_ECC_1B_ADDR_MSK) >>
2675 HGC_IOST_ECC_1B_ADDR_OFF);
2676 }
2677
2678 if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF)) {
2679 reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR);
2680 dev_warn(dev, "hgc_itct_acc1b_intr found: \
2681 Ram address is 0x%08X\n",
2682 (reg_val & HGC_ITCT_ECC_1B_ADDR_MSK) >>
2683 HGC_ITCT_ECC_1B_ADDR_OFF);
2684 }
2685
2686 if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF)) {
2687 reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
2688 dev_warn(dev, "hgc_iostl_acc1b_intr found: \
2689 memory address is 0x%08X\n",
2690 (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >>
2691 HGC_LM_DFX_STATUS2_IOSTLIST_OFF);
2692 }
2693
2694 if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF)) {
2695 reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
2696 dev_warn(dev, "hgc_itctl_acc1b_intr found: \
2697 memory address is 0x%08X\n",
2698 (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >>
2699 HGC_LM_DFX_STATUS2_ITCTLIST_OFF);
2700 }
2701
2702 if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF)) {
2703 reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR);
2704 dev_warn(dev, "hgc_cqe_acc1b_intr found: \
2705 Ram address is 0x%08X\n",
2706 (reg_val & HGC_CQE_ECC_1B_ADDR_MSK) >>
2707 HGC_CQE_ECC_1B_ADDR_OFF);
2708 }
2709
2710 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF)) {
2711 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
2712 dev_warn(dev, "rxm_mem0_acc1b_intr found: \
2713 memory address is 0x%08X\n",
2714 (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >>
2715 HGC_RXM_DFX_STATUS14_MEM0_OFF);
2716 }
2717
2718 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF)) {
2719 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
2720 dev_warn(dev, "rxm_mem1_acc1b_intr found: \
2721 memory address is 0x%08X\n",
2722 (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >>
2723 HGC_RXM_DFX_STATUS14_MEM1_OFF);
2724 }
2725
2726 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF)) {
2727 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
2728 dev_warn(dev, "rxm_mem2_acc1b_intr found: \
2729 memory address is 0x%08X\n",
2730 (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >>
2731 HGC_RXM_DFX_STATUS14_MEM2_OFF);
2732 }
2733
2734 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF)) {
2735 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15);
2736 dev_warn(dev, "rxm_mem3_acc1b_intr found: \
2737 memory address is 0x%08X\n",
2738 (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >>
2739 HGC_RXM_DFX_STATUS15_MEM3_OFF);
2740 }
2741
2742 }
2743
2744 static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba,
2745 u32 irq_value)
2746 {
2747 u32 reg_val;
2748 struct device *dev = &hisi_hba->pdev->dev;
2749
2750 if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF)) {
2751 reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR);
2752 panic("%s: hgc_dqe_accbad_intr (0x%x) found: \
2753 Ram address is 0x%08X\n",
2754 dev_name(dev), irq_value,
2755 (reg_val & HGC_DQE_ECC_MB_ADDR_MSK) >>
2756 HGC_DQE_ECC_MB_ADDR_OFF);
2757 }
2758
2759 if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF)) {
2760 reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR);
2761 panic("%s: hgc_iost_accbad_intr (0x%x) found: \
2762 Ram address is 0x%08X\n",
2763 dev_name(dev), irq_value,
2764 (reg_val & HGC_IOST_ECC_MB_ADDR_MSK) >>
2765 HGC_IOST_ECC_MB_ADDR_OFF);
2766 }
2767
2768 if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF)) {
2769 reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR);
2770 panic("%s: hgc_itct_accbad_intr (0x%x) found: \
2771 Ram address is 0x%08X\n",
2772 dev_name(dev), irq_value,
2773 (reg_val & HGC_ITCT_ECC_MB_ADDR_MSK) >>
2774 HGC_ITCT_ECC_MB_ADDR_OFF);
2775 }
2776
2777 if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF)) {
2778 reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
2779 panic("%s: hgc_iostl_accbad_intr (0x%x) found: \
2780 memory address is 0x%08X\n",
2781 dev_name(dev), irq_value,
2782 (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >>
2783 HGC_LM_DFX_STATUS2_IOSTLIST_OFF);
2784 }
2785
2786 if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF)) {
2787 reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
2788 panic("%s: hgc_itctl_accbad_intr (0x%x) found: \
2789 memory address is 0x%08X\n",
2790 dev_name(dev), irq_value,
2791 (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >>
2792 HGC_LM_DFX_STATUS2_ITCTLIST_OFF);
2793 }
2794
2795 if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF)) {
2796 reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR);
2797 panic("%s: hgc_cqe_accbad_intr (0x%x) found: \
2798 Ram address is 0x%08X\n",
2799 dev_name(dev), irq_value,
2800 (reg_val & HGC_CQE_ECC_MB_ADDR_MSK) >>
2801 HGC_CQE_ECC_MB_ADDR_OFF);
2802 }
2803
2804 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF)) {
2805 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
2806 panic("%s: rxm_mem0_accbad_intr (0x%x) found: \
2807 memory address is 0x%08X\n",
2808 dev_name(dev), irq_value,
2809 (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >>
2810 HGC_RXM_DFX_STATUS14_MEM0_OFF);
2811 }
2812
2813 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF)) {
2814 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
2815 panic("%s: rxm_mem1_accbad_intr (0x%x) found: \
2816 memory address is 0x%08X\n",
2817 dev_name(dev), irq_value,
2818 (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >>
2819 HGC_RXM_DFX_STATUS14_MEM1_OFF);
2820 }
2821
2822 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF)) {
2823 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
2824 panic("%s: rxm_mem2_accbad_intr (0x%x) found: \
2825 memory address is 0x%08X\n",
2826 dev_name(dev), irq_value,
2827 (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >>
2828 HGC_RXM_DFX_STATUS14_MEM2_OFF);
2829 }
2830
2831 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF)) {
2832 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15);
2833 panic("%s: rxm_mem3_accbad_intr (0x%x) found: \
2834 memory address is 0x%08X\n",
2835 dev_name(dev), irq_value,
2836 (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >>
2837 HGC_RXM_DFX_STATUS15_MEM3_OFF);
2838 }
2839
2840 }
2841
2842 static irqreturn_t fatal_ecc_int_v2_hw(int irq_no, void *p)
2843 {
2844 struct hisi_hba *hisi_hba = p;
2845 u32 irq_value, irq_msk;
2846
2847 irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK);
2848 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk | 0xffffffff);
2849
2850 irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR);
2851 if (irq_value) {
2852 one_bit_ecc_error_process_v2_hw(hisi_hba, irq_value);
2853 multi_bit_ecc_error_process_v2_hw(hisi_hba, irq_value);
2854 }
2855
2856 hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value);
2857 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk);
2858
2859 return IRQ_HANDLED;
2860 }
2861
2862 #define AXI_ERR_NR 8
2863 static const char axi_err_info[AXI_ERR_NR][32] = {
2864 "IOST_AXI_W_ERR",
2865 "IOST_AXI_R_ERR",
2866 "ITCT_AXI_W_ERR",
2867 "ITCT_AXI_R_ERR",
2868 "SATA_AXI_W_ERR",
2869 "SATA_AXI_R_ERR",
2870 "DQE_AXI_R_ERR",
2871 "CQE_AXI_W_ERR"
2872 };
2873
2874 #define FIFO_ERR_NR 5
2875 static const char fifo_err_info[FIFO_ERR_NR][32] = {
2876 "CQE_WINFO_FIFO",
2877 "CQE_MSG_FIFIO",
2878 "GETDQE_FIFO",
2879 "CMDP_FIFO",
2880 "AWTCTRL_FIFO"
2881 };
2882
2883 static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
2884 {
2885 struct hisi_hba *hisi_hba = p;
2886 u32 irq_value, irq_msk, err_value;
2887 struct device *dev = &hisi_hba->pdev->dev;
2888
2889 irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
2890 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0xfffffffe);
2891
2892 irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
2893 if (irq_value) {
2894 if (irq_value & BIT(ENT_INT_SRC3_WP_DEPTH_OFF)) {
2895 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
2896 1 << ENT_INT_SRC3_WP_DEPTH_OFF);
2897 panic("%s: write pointer and depth error (0x%x) \
2898 found!\n",
2899 dev_name(dev), irq_value);
2900 }
2901
2902 if (irq_value & BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF)) {
2903 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
2904 1 <<
2905 ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF);
2906 panic("%s: iptt no match slot error (0x%x) found!\n",
2907 dev_name(dev), irq_value);
2908 }
2909
2910 if (irq_value & BIT(ENT_INT_SRC3_RP_DEPTH_OFF))
2911 panic("%s: read pointer and depth error (0x%x) \
2912 found!\n",
2913 dev_name(dev), irq_value);
2914
2915 if (irq_value & BIT(ENT_INT_SRC3_AXI_OFF)) {
2916 int i;
2917
2918 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
2919 1 << ENT_INT_SRC3_AXI_OFF);
2920 err_value = hisi_sas_read32(hisi_hba,
2921 HGC_AXI_FIFO_ERR_INFO);
2922
2923 for (i = 0; i < AXI_ERR_NR; i++) {
2924 if (err_value & BIT(i))
2925 panic("%s: %s (0x%x) found!\n",
2926 dev_name(dev),
2927 axi_err_info[i], irq_value);
2928 }
2929 }
2930
2931 if (irq_value & BIT(ENT_INT_SRC3_FIFO_OFF)) {
2932 int i;
2933
2934 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
2935 1 << ENT_INT_SRC3_FIFO_OFF);
2936 err_value = hisi_sas_read32(hisi_hba,
2937 HGC_AXI_FIFO_ERR_INFO);
2938
2939 for (i = 0; i < FIFO_ERR_NR; i++) {
2940 if (err_value & BIT(AXI_ERR_NR + i))
2941 panic("%s: %s (0x%x) found!\n",
2942 dev_name(dev),
2943 fifo_err_info[i], irq_value);
2944 }
2945
2946 }
2947
2948 if (irq_value & BIT(ENT_INT_SRC3_LM_OFF)) {
2949 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
2950 1 << ENT_INT_SRC3_LM_OFF);
2951 panic("%s: LM add/fetch list error (0x%x) found!\n",
2952 dev_name(dev), irq_value);
2953 }
2954
2955 if (irq_value & BIT(ENT_INT_SRC3_ABT_OFF)) {
2956 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
2957 1 << ENT_INT_SRC3_ABT_OFF);
2958 panic("%s: SAS_HGC_ABT fetch LM list error (0x%x) found!\n",
2959 dev_name(dev), irq_value);
2960 }
2961 }
2962
2963 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk);
2964
2965 return IRQ_HANDLED;
2966 }
2967
2968 static void cq_tasklet_v2_hw(unsigned long val)
2969 {
2970 struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val;
2971 struct hisi_hba *hisi_hba = cq->hisi_hba;
2972 struct hisi_sas_slot *slot;
2973 struct hisi_sas_itct *itct;
2974 struct hisi_sas_complete_v2_hdr *complete_queue;
2975 u32 rd_point = cq->rd_point, wr_point, dev_id;
2976 int queue = cq->id;
2977
2978 if (unlikely(hisi_hba->reject_stp_links_msk))
2979 phys_try_accept_stp_links_v2_hw(hisi_hba);
2980
2981 complete_queue = hisi_hba->complete_hdr[queue];
2982
2983 spin_lock(&hisi_hba->lock);
2984 wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
2985 (0x14 * queue));
2986
2987 while (rd_point != wr_point) {
2988 struct hisi_sas_complete_v2_hdr *complete_hdr;
2989 int iptt;
2990
2991 complete_hdr = &complete_queue[rd_point];
2992
2993 /* Check for NCQ completion */
2994 if (complete_hdr->act) {
2995 u32 act_tmp = complete_hdr->act;
2996 int ncq_tag_count = ffs(act_tmp);
2997
2998 dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >>
2999 CMPLT_HDR_DEV_ID_OFF;
3000 itct = &hisi_hba->itct[dev_id];
3001
3002 /* The NCQ tags are held in the itct header */
3003 while (ncq_tag_count) {
3004 __le64 *ncq_tag = &itct->qw4_15[0];
3005
3006 ncq_tag_count -= 1;
3007 iptt = (ncq_tag[ncq_tag_count / 5]
3008 >> (ncq_tag_count % 5) * 12) & 0xfff;
3009
3010 slot = &hisi_hba->slot_info[iptt];
3011 slot->cmplt_queue_slot = rd_point;
3012 slot->cmplt_queue = queue;
3013 slot_complete_v2_hw(hisi_hba, slot);
3014
3015 act_tmp &= ~(1 << ncq_tag_count);
3016 ncq_tag_count = ffs(act_tmp);
3017 }
3018 } else {
3019 iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
3020 slot = &hisi_hba->slot_info[iptt];
3021 slot->cmplt_queue_slot = rd_point;
3022 slot->cmplt_queue = queue;
3023 slot_complete_v2_hw(hisi_hba, slot);
3024 }
3025
3026 if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
3027 rd_point = 0;
3028 }
3029
3030 /* update rd_point */
3031 cq->rd_point = rd_point;
3032 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
3033 spin_unlock(&hisi_hba->lock);
3034 }
3035
3036 static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
3037 {
3038 struct hisi_sas_cq *cq = p;
3039 struct hisi_hba *hisi_hba = cq->hisi_hba;
3040 int queue = cq->id;
3041
3042 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
3043
3044 tasklet_schedule(&cq->tasklet);
3045
3046 return IRQ_HANDLED;
3047 }
3048
3049 static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
3050 {
3051 struct hisi_sas_phy *phy = p;
3052 struct hisi_hba *hisi_hba = phy->hisi_hba;
3053 struct asd_sas_phy *sas_phy = &phy->sas_phy;
3054 struct device *dev = &hisi_hba->pdev->dev;
3055 struct hisi_sas_initial_fis *initial_fis;
3056 struct dev_to_host_fis *fis;
3057 u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate;
3058 irqreturn_t res = IRQ_HANDLED;
3059 u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
3060 int phy_no, offset;
3061
3062 phy_no = sas_phy->id;
3063 initial_fis = &hisi_hba->initial_fis[phy_no];
3064 fis = &initial_fis->fis;
3065
3066 offset = 4 * (phy_no / 4);
3067 ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK1 + offset);
3068 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset,
3069 ent_msk | 1 << ((phy_no % 4) * 8));
3070
3071 ent_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC1 + offset);
3072 ent_tmp = ent_int & (1 << (ENT_INT_SRC1_D2H_FIS_CH1_OFF *
3073 (phy_no % 4)));
3074 ent_int >>= ENT_INT_SRC1_D2H_FIS_CH1_OFF * (phy_no % 4);
3075 if ((ent_int & ENT_INT_SRC1_D2H_FIS_CH0_MSK) == 0) {
3076 dev_warn(dev, "sata int: phy%d did not receive FIS\n", phy_no);
3077 res = IRQ_NONE;
3078 goto end;
3079 }
3080
3081 /* check ERR bit of Status Register */
3082 if (fis->status & ATA_ERR) {
3083 dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", phy_no,
3084 fis->status);
3085 disable_phy_v2_hw(hisi_hba, phy_no);
3086 enable_phy_v2_hw(hisi_hba, phy_no);
3087 res = IRQ_NONE;
3088 goto end;
3089 }
3090
3091 if (unlikely(phy_no == 8)) {
3092 u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
3093
3094 port_id = (port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >>
3095 PORT_STATE_PHY8_PORT_NUM_OFF;
3096 link_rate = (port_state & PORT_STATE_PHY8_CONN_RATE_MSK) >>
3097 PORT_STATE_PHY8_CONN_RATE_OFF;
3098 } else {
3099 port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
3100 port_id = (port_id >> (4 * phy_no)) & 0xf;
3101 link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
3102 link_rate = (link_rate >> (phy_no * 4)) & 0xf;
3103 }
3104
3105 if (port_id == 0xf) {
3106 dev_err(dev, "sata int: phy%d invalid portid\n", phy_no);
3107 res = IRQ_NONE;
3108 goto end;
3109 }
3110
3111 sas_phy->linkrate = link_rate;
3112 hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no,
3113 HARD_PHY_LINKRATE);
3114 phy->maximum_linkrate = hard_phy_linkrate & 0xf;
3115 phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf;
3116
3117 sas_phy->oob_mode = SATA_OOB_MODE;
3118 /* Make up some unique SAS address */
3119 attached_sas_addr[0] = 0x50;
3120 attached_sas_addr[7] = phy_no;
3121 memcpy(sas_phy->attached_sas_addr, attached_sas_addr, SAS_ADDR_SIZE);
3122 memcpy(sas_phy->frame_rcvd, fis, sizeof(struct dev_to_host_fis));
3123 dev_info(dev, "sata int phyup: phy%d link_rate=%d\n", phy_no, link_rate);
3124 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
3125 phy->port_id = port_id;
3126 phy->phy_type |= PORT_TYPE_SATA;
3127 phy->phy_attached = 1;
3128 phy->identify.device_type = SAS_SATA_DEV;
3129 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
3130 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
3131 queue_work(hisi_hba->wq, &phy->phyup_ws);
3132
3133 end:
3134 hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp);
3135 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, ent_msk);
3136
3137 return res;
3138 }
3139
3140 static irq_handler_t phy_interrupts[HISI_SAS_PHY_INT_NR] = {
3141 int_phy_updown_v2_hw,
3142 int_chnl_int_v2_hw,
3143 };
3144
3145 static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = {
3146 fatal_ecc_int_v2_hw,
3147 fatal_axi_int_v2_hw
3148 };
3149
3150 /**
3151 * There is a limitation in the hip06 chipset that we need
3152 * to map in all mbigen interrupts, even if they are not used.
3153 */
3154 static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
3155 {
3156 struct platform_device *pdev = hisi_hba->pdev;
3157 struct device *dev = &pdev->dev;
3158 int i, irq, rc, irq_map[128];
3159
3160
3161 for (i = 0; i < 128; i++)
3162 irq_map[i] = platform_get_irq(pdev, i);
3163
3164 for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) {
3165 int idx = i;
3166
3167 irq = irq_map[idx + 1]; /* Phy up/down is irq1 */
3168 if (!irq) {
3169 dev_err(dev, "irq init: fail map phy interrupt %d\n",
3170 idx);
3171 return -ENOENT;
3172 }
3173
3174 rc = devm_request_irq(dev, irq, phy_interrupts[i], 0,
3175 DRV_NAME " phy", hisi_hba);
3176 if (rc) {
3177 dev_err(dev, "irq init: could not request "
3178 "phy interrupt %d, rc=%d\n",
3179 irq, rc);
3180 return -ENOENT;
3181 }
3182 }
3183
3184 for (i = 0; i < hisi_hba->n_phy; i++) {
3185 struct hisi_sas_phy *phy = &hisi_hba->phy[i];
3186 int idx = i + 72; /* First SATA interrupt is irq72 */
3187
3188 irq = irq_map[idx];
3189 if (!irq) {
3190 dev_err(dev, "irq init: fail map phy interrupt %d\n",
3191 idx);
3192 return -ENOENT;
3193 }
3194
3195 rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0,
3196 DRV_NAME " sata", phy);
3197 if (rc) {
3198 dev_err(dev, "irq init: could not request "
3199 "sata interrupt %d, rc=%d\n",
3200 irq, rc);
3201 return -ENOENT;
3202 }
3203 }
3204
3205 for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++) {
3206 int idx = i;
3207
3208 irq = irq_map[idx + 81];
3209 if (!irq) {
3210 dev_err(dev, "irq init: fail map fatal interrupt %d\n",
3211 idx);
3212 return -ENOENT;
3213 }
3214
3215 rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0,
3216 DRV_NAME " fatal", hisi_hba);
3217 if (rc) {
3218 dev_err(dev,
3219 "irq init: could not request fatal interrupt %d, rc=%d\n",
3220 irq, rc);
3221 return -ENOENT;
3222 }
3223 }
3224
3225 for (i = 0; i < hisi_hba->queue_count; i++) {
3226 int idx = i + 96; /* First cq interrupt is irq96 */
3227 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
3228 struct tasklet_struct *t = &cq->tasklet;
3229
3230 irq = irq_map[idx];
3231 if (!irq) {
3232 dev_err(dev,
3233 "irq init: could not map cq interrupt %d\n",
3234 idx);
3235 return -ENOENT;
3236 }
3237 rc = devm_request_irq(dev, irq, cq_interrupt_v2_hw, 0,
3238 DRV_NAME " cq", &hisi_hba->cq[i]);
3239 if (rc) {
3240 dev_err(dev,
3241 "irq init: could not request cq interrupt %d, rc=%d\n",
3242 irq, rc);
3243 return -ENOENT;
3244 }
3245 tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq);
3246 }
3247
3248 return 0;
3249 }
3250
3251 static int hisi_sas_v2_init(struct hisi_hba *hisi_hba)
3252 {
3253 int rc;
3254
3255 memset(hisi_hba->sata_dev_bitmap, 0, sizeof(hisi_hba->sata_dev_bitmap));
3256
3257 rc = hw_init_v2_hw(hisi_hba);
3258 if (rc)
3259 return rc;
3260
3261 rc = interrupt_init_v2_hw(hisi_hba);
3262 if (rc)
3263 return rc;
3264
3265 return 0;
3266 }
3267
3268 static void interrupt_disable_v2_hw(struct hisi_hba *hisi_hba)
3269 {
3270 struct platform_device *pdev = hisi_hba->pdev;
3271 int i;
3272
3273 for (i = 0; i < hisi_hba->queue_count; i++)
3274 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1);
3275
3276 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff);
3277 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff);
3278 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff);
3279 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff);
3280
3281 for (i = 0; i < hisi_hba->n_phy; i++) {
3282 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
3283 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffffff);
3284 }
3285
3286 for (i = 0; i < 128; i++)
3287 synchronize_irq(platform_get_irq(pdev, i));
3288 }
3289
3290 static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
3291 {
3292 struct device *dev = &hisi_hba->pdev->dev;
3293 u32 old_state, state;
3294 int rc, cnt;
3295 int phy_no;
3296
3297 old_state = hisi_sas_read32(hisi_hba, PHY_STATE);
3298
3299 interrupt_disable_v2_hw(hisi_hba);
3300 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
3301
3302 stop_phys_v2_hw(hisi_hba);
3303
3304 mdelay(10);
3305
3306 hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, 0x1);
3307
3308 /* wait until bus idle */
3309 cnt = 0;
3310 while (1) {
3311 u32 status = hisi_sas_read32_relaxed(hisi_hba,
3312 AXI_MASTER_CFG_BASE + AM_CURR_TRANS_RETURN);
3313
3314 if (status == 0x3)
3315 break;
3316
3317 udelay(10);
3318 if (cnt++ > 10) {
3319 dev_info(dev, "wait axi bus state to idle timeout!\n");
3320 return -1;
3321 }
3322 }
3323
3324 hisi_sas_init_mem(hisi_hba);
3325
3326 rc = hw_init_v2_hw(hisi_hba);
3327 if (rc)
3328 return rc;
3329
3330 phys_reject_stp_links_v2_hw(hisi_hba);
3331
3332 /* Re-enable the PHYs */
3333 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
3334 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
3335 struct asd_sas_phy *sas_phy = &phy->sas_phy;
3336
3337 if (sas_phy->enabled)
3338 start_phy_v2_hw(hisi_hba, phy_no);
3339 }
3340
3341 /* Wait for the PHYs to come up and read the PHY state */
3342 msleep(1000);
3343
3344 state = hisi_sas_read32(hisi_hba, PHY_STATE);
3345
3346 hisi_sas_rescan_topology(hisi_hba, old_state, state);
3347
3348 return 0;
3349 }
3350
3351 static const struct hisi_sas_hw hisi_sas_v2_hw = {
3352 .hw_init = hisi_sas_v2_init,
3353 .setup_itct = setup_itct_v2_hw,
3354 .slot_index_alloc = slot_index_alloc_quirk_v2_hw,
3355 .alloc_dev = alloc_dev_quirk_v2_hw,
3356 .sl_notify = sl_notify_v2_hw,
3357 .get_wideport_bitmap = get_wideport_bitmap_v2_hw,
3358 .free_device = free_device_v2_hw,
3359 .prep_smp = prep_smp_v2_hw,
3360 .prep_ssp = prep_ssp_v2_hw,
3361 .prep_stp = prep_ata_v2_hw,
3362 .prep_abort = prep_abort_v2_hw,
3363 .get_free_slot = get_free_slot_v2_hw,
3364 .start_delivery = start_delivery_v2_hw,
3365 .slot_complete = slot_complete_v2_hw,
3366 .phys_init = phys_init_v2_hw,
3367 .phy_enable = enable_phy_v2_hw,
3368 .phy_disable = disable_phy_v2_hw,
3369 .phy_hard_reset = phy_hard_reset_v2_hw,
3370 .phy_set_linkrate = phy_set_linkrate_v2_hw,
3371 .phy_get_max_linkrate = phy_get_max_linkrate_v2_hw,
3372 .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW,
3373 .complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr),
3374 .soft_reset = soft_reset_v2_hw,
3375 };
3376
3377 static int hisi_sas_v2_probe(struct platform_device *pdev)
3378 {
3379 /*
3380 * Check if we should defer the probe before we probe the
3381 * upper layer, as it's hard to defer later on.
3382 */
3383 int ret = platform_get_irq(pdev, 0);
3384
3385 if (ret < 0) {
3386 if (ret != -EPROBE_DEFER)
3387 dev_err(&pdev->dev, "cannot obtain irq\n");
3388 return ret;
3389 }
3390
3391 return hisi_sas_probe(pdev, &hisi_sas_v2_hw);
3392 }
3393
3394 static int hisi_sas_v2_remove(struct platform_device *pdev)
3395 {
3396 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
3397 struct hisi_hba *hisi_hba = sha->lldd_ha;
3398
3399 if (timer_pending(&hisi_hba->timer))
3400 del_timer(&hisi_hba->timer);
3401
3402 return hisi_sas_remove(pdev);
3403 }
3404
3405 static const struct of_device_id sas_v2_of_match[] = {
3406 { .compatible = "hisilicon,hip06-sas-v2",},
3407 { .compatible = "hisilicon,hip07-sas-v2",},
3408 {},
3409 };
3410 MODULE_DEVICE_TABLE(of, sas_v2_of_match);
3411
3412 static const struct acpi_device_id sas_v2_acpi_match[] = {
3413 { "HISI0162", 0 },
3414 { }
3415 };
3416
3417 MODULE_DEVICE_TABLE(acpi, sas_v2_acpi_match);
3418
3419 static struct platform_driver hisi_sas_v2_driver = {
3420 .probe = hisi_sas_v2_probe,
3421 .remove = hisi_sas_v2_remove,
3422 .driver = {
3423 .name = DRV_NAME,
3424 .of_match_table = sas_v2_of_match,
3425 .acpi_match_table = ACPI_PTR(sas_v2_acpi_match),
3426 },
3427 };
3428
3429 module_platform_driver(hisi_sas_v2_driver);
3430
3431 MODULE_LICENSE("GPL");
3432 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
3433 MODULE_DESCRIPTION("HISILICON SAS controller v2 hw driver");
3434 MODULE_ALIAS("platform:" DRV_NAME);