]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2017 Hisilicon Limited. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | */ | |
10 | ||
11 | #include "hisi_sas.h" | |
12 | #define DRV_NAME "hisi_sas_v3_hw" | |
13 | ||
14 | /* global registers need init*/ | |
15 | #define DLVRY_QUEUE_ENABLE 0x0 | |
16 | #define IOST_BASE_ADDR_LO 0x8 | |
17 | #define IOST_BASE_ADDR_HI 0xc | |
18 | #define ITCT_BASE_ADDR_LO 0x10 | |
19 | #define ITCT_BASE_ADDR_HI 0x14 | |
20 | #define IO_BROKEN_MSG_ADDR_LO 0x18 | |
21 | #define IO_BROKEN_MSG_ADDR_HI 0x1c | |
22 | #define PHY_CONTEXT 0x20 | |
23 | #define PHY_STATE 0x24 | |
24 | #define PHY_PORT_NUM_MA 0x28 | |
25 | #define PHY_CONN_RATE 0x30 | |
26 | #define ITCT_CLR 0x44 | |
27 | #define ITCT_CLR_EN_OFF 16 | |
28 | #define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF) | |
29 | #define ITCT_DEV_OFF 0 | |
30 | #define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF) | |
31 | #define IO_SATA_BROKEN_MSG_ADDR_LO 0x58 | |
32 | #define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c | |
33 | #define SATA_INITI_D2H_STORE_ADDR_LO 0x60 | |
34 | #define SATA_INITI_D2H_STORE_ADDR_HI 0x64 | |
35 | #define CFG_MAX_TAG 0x68 | |
36 | #define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84 | |
37 | #define HGC_SAS_TXFAIL_RETRY_CTRL 0x88 | |
38 | #define HGC_GET_ITV_TIME 0x90 | |
39 | #define DEVICE_MSG_WORK_MODE 0x94 | |
40 | #define OPENA_WT_CONTI_TIME 0x9c | |
41 | #define I_T_NEXUS_LOSS_TIME 0xa0 | |
42 | #define MAX_CON_TIME_LIMIT_TIME 0xa4 | |
43 | #define BUS_INACTIVE_LIMIT_TIME 0xa8 | |
44 | #define REJECT_TO_OPEN_LIMIT_TIME 0xac | |
45 | #define CFG_AGING_TIME 0xbc | |
46 | #define HGC_DFX_CFG2 0xc0 | |
47 | #define CFG_ABT_SET_QUERY_IPTT 0xd4 | |
48 | #define CFG_SET_ABORTED_IPTT_OFF 0 | |
49 | #define CFG_SET_ABORTED_IPTT_MSK (0xfff << CFG_SET_ABORTED_IPTT_OFF) | |
50 | #define CFG_SET_ABORTED_EN_OFF 12 | |
51 | #define CFG_ABT_SET_IPTT_DONE 0xd8 | |
52 | #define CFG_ABT_SET_IPTT_DONE_OFF 0 | |
53 | #define HGC_IOMB_PROC1_STATUS 0x104 | |
54 | #define CFG_1US_TIMER_TRSH 0xcc | |
55 | #define CHNL_INT_STATUS 0x148 | |
56 | #define INT_COAL_EN 0x19c | |
57 | #define OQ_INT_COAL_TIME 0x1a0 | |
58 | #define OQ_INT_COAL_CNT 0x1a4 | |
59 | #define ENT_INT_COAL_TIME 0x1a8 | |
60 | #define ENT_INT_COAL_CNT 0x1ac | |
61 | #define OQ_INT_SRC 0x1b0 | |
62 | #define OQ_INT_SRC_MSK 0x1b4 | |
63 | #define ENT_INT_SRC1 0x1b8 | |
64 | #define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0 | |
65 | #define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF) | |
66 | #define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8 | |
67 | #define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF) | |
68 | #define ENT_INT_SRC2 0x1bc | |
69 | #define ENT_INT_SRC3 0x1c0 | |
70 | #define ENT_INT_SRC3_WP_DEPTH_OFF 8 | |
71 | #define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9 | |
72 | #define ENT_INT_SRC3_RP_DEPTH_OFF 10 | |
73 | #define ENT_INT_SRC3_AXI_OFF 11 | |
74 | #define ENT_INT_SRC3_FIFO_OFF 12 | |
75 | #define ENT_INT_SRC3_LM_OFF 14 | |
76 | #define ENT_INT_SRC3_ITC_INT_OFF 15 | |
77 | #define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF) | |
78 | #define ENT_INT_SRC3_ABT_OFF 16 | |
79 | #define ENT_INT_SRC_MSK1 0x1c4 | |
80 | #define ENT_INT_SRC_MSK2 0x1c8 | |
81 | #define ENT_INT_SRC_MSK3 0x1cc | |
82 | #define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31 | |
83 | #define CHNL_PHYUPDOWN_INT_MSK 0x1d0 | |
84 | #define CHNL_ENT_INT_MSK 0x1d4 | |
85 | #define HGC_COM_INT_MSK 0x1d8 | |
86 | #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF) | |
87 | #define SAS_ECC_INTR 0x1e8 | |
88 | #define SAS_ECC_INTR_MSK 0x1ec | |
89 | #define HGC_ERR_STAT_EN 0x238 | |
90 | #define DLVRY_Q_0_BASE_ADDR_LO 0x260 | |
91 | #define DLVRY_Q_0_BASE_ADDR_HI 0x264 | |
92 | #define DLVRY_Q_0_DEPTH 0x268 | |
93 | #define DLVRY_Q_0_WR_PTR 0x26c | |
94 | #define DLVRY_Q_0_RD_PTR 0x270 | |
95 | #define HYPER_STREAM_ID_EN_CFG 0xc80 | |
96 | #define OQ0_INT_SRC_MSK 0xc90 | |
97 | #define COMPL_Q_0_BASE_ADDR_LO 0x4e0 | |
98 | #define COMPL_Q_0_BASE_ADDR_HI 0x4e4 | |
99 | #define COMPL_Q_0_DEPTH 0x4e8 | |
100 | #define COMPL_Q_0_WR_PTR 0x4ec | |
101 | #define COMPL_Q_0_RD_PTR 0x4f0 | |
102 | #define AWQOS_AWCACHE_CFG 0xc84 | |
103 | #define ARQOS_ARCACHE_CFG 0xc88 | |
104 | ||
105 | /* phy registers requiring init */ | |
106 | #define PORT_BASE (0x2000) | |
107 | #define PHY_CFG (PORT_BASE + 0x0) | |
108 | #define HARD_PHY_LINKRATE (PORT_BASE + 0x4) | |
109 | #define PHY_CFG_ENA_OFF 0 | |
110 | #define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF) | |
111 | #define PHY_CFG_DC_OPT_OFF 2 | |
112 | #define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF) | |
113 | #define PROG_PHY_LINK_RATE (PORT_BASE + 0x8) | |
114 | #define PHY_CTRL (PORT_BASE + 0x14) | |
115 | #define PHY_CTRL_RESET_OFF 0 | |
116 | #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) | |
117 | #define SL_CFG (PORT_BASE + 0x84) | |
118 | #define SL_CONTROL (PORT_BASE + 0x94) | |
119 | #define SL_CONTROL_NOTIFY_EN_OFF 0 | |
120 | #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) | |
121 | #define SL_CTA_OFF 17 | |
122 | #define SL_CTA_MSK (0x1 << SL_CTA_OFF) | |
123 | #define TX_ID_DWORD0 (PORT_BASE + 0x9c) | |
124 | #define TX_ID_DWORD1 (PORT_BASE + 0xa0) | |
125 | #define TX_ID_DWORD2 (PORT_BASE + 0xa4) | |
126 | #define TX_ID_DWORD3 (PORT_BASE + 0xa8) | |
127 | #define TX_ID_DWORD4 (PORT_BASE + 0xaC) | |
128 | #define TX_ID_DWORD5 (PORT_BASE + 0xb0) | |
129 | #define TX_ID_DWORD6 (PORT_BASE + 0xb4) | |
130 | #define TXID_AUTO (PORT_BASE + 0xb8) | |
131 | #define CT3_OFF 1 | |
132 | #define CT3_MSK (0x1 << CT3_OFF) | |
133 | #define TX_HARDRST_OFF 2 | |
134 | #define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF) | |
135 | #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) | |
136 | #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) | |
137 | #define STP_LINK_TIMER (PORT_BASE + 0x120) | |
138 | #define CON_CFG_DRIVER (PORT_BASE + 0x130) | |
139 | #define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134) | |
140 | #define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138) | |
141 | #define SAS_STP_CON_TIMER_CFG (PORT_BASE + 0x13c) | |
142 | #define CHL_INT0 (PORT_BASE + 0x1b4) | |
143 | #define CHL_INT0_HOTPLUG_TOUT_OFF 0 | |
144 | #define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF) | |
145 | #define CHL_INT0_SL_RX_BCST_ACK_OFF 1 | |
146 | #define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF) | |
147 | #define CHL_INT0_SL_PHY_ENABLE_OFF 2 | |
148 | #define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF) | |
149 | #define CHL_INT0_NOT_RDY_OFF 4 | |
150 | #define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF) | |
151 | #define CHL_INT0_PHY_RDY_OFF 5 | |
152 | #define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF) | |
153 | #define CHL_INT1 (PORT_BASE + 0x1b8) | |
154 | #define CHL_INT1_DMAC_TX_ECC_ERR_OFF 15 | |
155 | #define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF) | |
156 | #define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17 | |
157 | #define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF) | |
158 | #define CHL_INT2 (PORT_BASE + 0x1bc) | |
159 | #define CHL_INT0_MSK (PORT_BASE + 0x1c0) | |
160 | #define CHL_INT1_MSK (PORT_BASE + 0x1c4) | |
161 | #define CHL_INT2_MSK (PORT_BASE + 0x1c8) | |
162 | #define CHL_INT_COAL_EN (PORT_BASE + 0x1d0) | |
163 | #define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0) | |
164 | #define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4) | |
165 | #define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8) | |
166 | #define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc) | |
167 | #define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0) | |
168 | #define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4) | |
169 | #define DMA_TX_STATUS (PORT_BASE + 0x2d0) | |
170 | #define DMA_TX_STATUS_BUSY_OFF 0 | |
171 | #define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF) | |
172 | #define DMA_RX_STATUS (PORT_BASE + 0x2e8) | |
173 | #define DMA_RX_STATUS_BUSY_OFF 0 | |
174 | #define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF) | |
175 | #define ERR_CNT_DWS_LOST (PORT_BASE + 0x380) | |
176 | #define ERR_CNT_RESET_PROB (PORT_BASE + 0x384) | |
177 | #define ERR_CNT_INVLD_DW (PORT_BASE + 0x390) | |
178 | #define ERR_CNT_DISP_ERR (PORT_BASE + 0x398) | |
179 | ||
180 | #define DEFAULT_ITCT_HW 2048 /* reset value, not reprogrammed */ | |
181 | #if (HISI_SAS_MAX_DEVICES > DEFAULT_ITCT_HW) | |
182 | #error Max ITCT exceeded | |
183 | #endif | |
184 | ||
185 | #define AXI_MASTER_CFG_BASE (0x5000) | |
186 | #define AM_CTRL_GLOBAL (0x0) | |
187 | #define AM_CURR_TRANS_RETURN (0x150) | |
188 | ||
189 | #define AM_CFG_MAX_TRANS (0x5010) | |
190 | #define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014) | |
191 | #define AXI_CFG (0x5100) | |
192 | #define AM_ROB_ECC_ERR_ADDR (0x510c) | |
193 | #define AM_ROB_ECC_ONEBIT_ERR_ADDR_OFF 0 | |
194 | #define AM_ROB_ECC_ONEBIT_ERR_ADDR_MSK (0xff << AM_ROB_ECC_ONEBIT_ERR_ADDR_OFF) | |
195 | #define AM_ROB_ECC_MULBIT_ERR_ADDR_OFF 8 | |
196 | #define AM_ROB_ECC_MULBIT_ERR_ADDR_MSK (0xff << AM_ROB_ECC_MULBIT_ERR_ADDR_OFF) | |
197 | ||
198 | /* HW dma structures */ | |
199 | /* Delivery queue header */ | |
200 | /* dw0 */ | |
201 | #define CMD_HDR_ABORT_FLAG_OFF 0 | |
202 | #define CMD_HDR_ABORT_FLAG_MSK (0x3 << CMD_HDR_ABORT_FLAG_OFF) | |
203 | #define CMD_HDR_ABORT_DEVICE_TYPE_OFF 2 | |
204 | #define CMD_HDR_ABORT_DEVICE_TYPE_MSK (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF) | |
205 | #define CMD_HDR_RESP_REPORT_OFF 5 | |
206 | #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) | |
207 | #define CMD_HDR_TLR_CTRL_OFF 6 | |
208 | #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF) | |
209 | #define CMD_HDR_PORT_OFF 18 | |
210 | #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF) | |
211 | #define CMD_HDR_PRIORITY_OFF 27 | |
212 | #define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF) | |
213 | #define CMD_HDR_CMD_OFF 29 | |
214 | #define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF) | |
215 | /* dw1 */ | |
216 | #define CMD_HDR_UNCON_CMD_OFF 3 | |
217 | #define CMD_HDR_DIR_OFF 5 | |
218 | #define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF) | |
219 | #define CMD_HDR_RESET_OFF 7 | |
220 | #define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF) | |
221 | #define CMD_HDR_VDTL_OFF 10 | |
222 | #define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF) | |
223 | #define CMD_HDR_FRAME_TYPE_OFF 11 | |
224 | #define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF) | |
225 | #define CMD_HDR_DEV_ID_OFF 16 | |
226 | #define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF) | |
227 | /* dw2 */ | |
228 | #define CMD_HDR_CFL_OFF 0 | |
229 | #define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF) | |
230 | #define CMD_HDR_NCQ_TAG_OFF 10 | |
231 | #define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF) | |
232 | #define CMD_HDR_MRFL_OFF 15 | |
233 | #define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF) | |
234 | #define CMD_HDR_SG_MOD_OFF 24 | |
235 | #define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF) | |
236 | /* dw3 */ | |
237 | #define CMD_HDR_IPTT_OFF 0 | |
238 | #define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF) | |
239 | /* dw6 */ | |
240 | #define CMD_HDR_DIF_SGL_LEN_OFF 0 | |
241 | #define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF) | |
242 | #define CMD_HDR_DATA_SGL_LEN_OFF 16 | |
243 | #define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF) | |
244 | /* dw7 */ | |
245 | #define CMD_HDR_ADDR_MODE_SEL_OFF 15 | |
246 | #define CMD_HDR_ADDR_MODE_SEL_MSK (1 << CMD_HDR_ADDR_MODE_SEL_OFF) | |
247 | #define CMD_HDR_ABORT_IPTT_OFF 16 | |
248 | #define CMD_HDR_ABORT_IPTT_MSK (0xffff << CMD_HDR_ABORT_IPTT_OFF) | |
249 | ||
250 | /* Completion header */ | |
251 | /* dw0 */ | |
252 | #define CMPLT_HDR_CMPLT_OFF 0 | |
253 | #define CMPLT_HDR_CMPLT_MSK (0x3 << CMPLT_HDR_CMPLT_OFF) | |
254 | #define CMPLT_HDR_ERROR_PHASE_OFF 2 | |
255 | #define CMPLT_HDR_ERROR_PHASE_MSK (0xff << CMPLT_HDR_ERROR_PHASE_OFF) | |
256 | #define CMPLT_HDR_RSPNS_XFRD_OFF 10 | |
257 | #define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF) | |
258 | #define CMPLT_HDR_ERX_OFF 12 | |
259 | #define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF) | |
260 | #define CMPLT_HDR_ABORT_STAT_OFF 13 | |
261 | #define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF) | |
262 | /* abort_stat */ | |
263 | #define STAT_IO_NOT_VALID 0x1 | |
264 | #define STAT_IO_NO_DEVICE 0x2 | |
265 | #define STAT_IO_COMPLETE 0x3 | |
266 | #define STAT_IO_ABORTED 0x4 | |
267 | /* dw1 */ | |
268 | #define CMPLT_HDR_IPTT_OFF 0 | |
269 | #define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF) | |
270 | #define CMPLT_HDR_DEV_ID_OFF 16 | |
271 | #define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF) | |
272 | /* dw3 */ | |
273 | #define CMPLT_HDR_IO_IN_TARGET_OFF 17 | |
274 | #define CMPLT_HDR_IO_IN_TARGET_MSK (0x1 << CMPLT_HDR_IO_IN_TARGET_OFF) | |
275 | ||
276 | /* ITCT header */ | |
277 | /* qw0 */ | |
278 | #define ITCT_HDR_DEV_TYPE_OFF 0 | |
279 | #define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF) | |
280 | #define ITCT_HDR_VALID_OFF 2 | |
281 | #define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF) | |
282 | #define ITCT_HDR_MCR_OFF 5 | |
283 | #define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF) | |
284 | #define ITCT_HDR_VLN_OFF 9 | |
285 | #define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF) | |
286 | #define ITCT_HDR_SMP_TIMEOUT_OFF 16 | |
287 | #define ITCT_HDR_AWT_CONTINUE_OFF 25 | |
288 | #define ITCT_HDR_PORT_ID_OFF 28 | |
289 | #define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF) | |
290 | /* qw2 */ | |
291 | #define ITCT_HDR_INLT_OFF 0 | |
292 | #define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF) | |
293 | #define ITCT_HDR_RTOLT_OFF 48 | |
294 | #define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF) | |
295 | ||
296 | struct hisi_sas_complete_v3_hdr { | |
297 | __le32 dw0; | |
298 | __le32 dw1; | |
299 | __le32 act; | |
300 | __le32 dw3; | |
301 | }; | |
302 | ||
303 | struct hisi_sas_err_record_v3 { | |
304 | /* dw0 */ | |
305 | __le32 trans_tx_fail_type; | |
306 | ||
307 | /* dw1 */ | |
308 | __le32 trans_rx_fail_type; | |
309 | ||
310 | /* dw2 */ | |
311 | __le16 dma_tx_err_type; | |
312 | __le16 sipc_rx_err_type; | |
313 | ||
314 | /* dw3 */ | |
315 | __le32 dma_rx_err_type; | |
316 | }; | |
317 | ||
318 | #define RX_DATA_LEN_UNDERFLOW_OFF 6 | |
319 | #define RX_DATA_LEN_UNDERFLOW_MSK (1 << RX_DATA_LEN_UNDERFLOW_OFF) | |
320 | ||
321 | #define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096 | |
322 | #define HISI_SAS_MSI_COUNT_V3_HW 32 | |
323 | ||
324 | enum { | |
325 | HISI_SAS_PHY_PHY_UPDOWN, | |
326 | HISI_SAS_PHY_CHNL_INT, | |
327 | HISI_SAS_PHY_INT_NR | |
328 | }; | |
329 | ||
330 | #define DIR_NO_DATA 0 | |
331 | #define DIR_TO_INI 1 | |
332 | #define DIR_TO_DEVICE 2 | |
333 | #define DIR_RESERVED 3 | |
334 | ||
335 | #define CMD_IS_UNCONSTRAINT(cmd) \ | |
336 | ((cmd == ATA_CMD_READ_LOG_EXT) || \ | |
337 | (cmd == ATA_CMD_READ_LOG_DMA_EXT) || \ | |
338 | (cmd == ATA_CMD_DEV_RESET)) | |
339 | ||
340 | static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) | |
341 | { | |
342 | void __iomem *regs = hisi_hba->regs + off; | |
343 | ||
344 | return readl(regs); | |
345 | } | |
346 | ||
347 | static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off) | |
348 | { | |
349 | void __iomem *regs = hisi_hba->regs + off; | |
350 | ||
351 | return readl_relaxed(regs); | |
352 | } | |
353 | ||
354 | static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) | |
355 | { | |
356 | void __iomem *regs = hisi_hba->regs + off; | |
357 | ||
358 | writel(val, regs); | |
359 | } | |
360 | ||
361 | static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no, | |
362 | u32 off, u32 val) | |
363 | { | |
364 | void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; | |
365 | ||
366 | writel(val, regs); | |
367 | } | |
368 | ||
369 | static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, | |
370 | int phy_no, u32 off) | |
371 | { | |
372 | void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; | |
373 | ||
374 | return readl(regs); | |
375 | } | |
376 | ||
377 | static void init_reg_v3_hw(struct hisi_hba *hisi_hba) | |
378 | { | |
379 | int i; | |
380 | ||
381 | /* Global registers init */ | |
382 | hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, | |
383 | (u32)((1ULL << hisi_hba->queue_count) - 1)); | |
384 | hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400); | |
385 | hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); | |
386 | hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0xd); | |
387 | hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); | |
388 | hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); | |
389 | hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); | |
390 | hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff); | |
391 | hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff); | |
392 | hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff); | |
393 | hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff); | |
394 | hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe); | |
395 | hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe); | |
396 | hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff); | |
397 | hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0); | |
398 | hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0); | |
399 | hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0); | |
400 | hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x0); | |
401 | hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0); | |
402 | hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0); | |
403 | for (i = 0; i < hisi_hba->queue_count; i++) | |
404 | hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0); | |
405 | ||
406 | hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1); | |
407 | hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE, 0x30000); | |
408 | ||
409 | for (i = 0; i < hisi_hba->n_phy; i++) { | |
410 | hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x801); | |
411 | hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); | |
412 | hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); | |
413 | hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff); | |
414 | hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); | |
415 | hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); | |
416 | hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff); | |
417 | hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); | |
418 | hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); | |
419 | hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); | |
420 | hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0); | |
421 | hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0); | |
422 | hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0); | |
423 | hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199b4fa); | |
424 | hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, | |
425 | 0xa03e8); | |
426 | hisi_sas_phy_write32(hisi_hba, i, SAS_STP_CON_TIMER_CFG, | |
427 | 0xa03e8); | |
428 | hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, | |
429 | 0x7f7a120); | |
430 | hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, | |
431 | 0x2a0a80); | |
432 | } | |
433 | for (i = 0; i < hisi_hba->queue_count; i++) { | |
434 | /* Delivery queue */ | |
435 | hisi_sas_write32(hisi_hba, | |
436 | DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14), | |
437 | upper_32_bits(hisi_hba->cmd_hdr_dma[i])); | |
438 | ||
439 | hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14), | |
440 | lower_32_bits(hisi_hba->cmd_hdr_dma[i])); | |
441 | ||
442 | hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14), | |
443 | HISI_SAS_QUEUE_SLOTS); | |
444 | ||
445 | /* Completion queue */ | |
446 | hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14), | |
447 | upper_32_bits(hisi_hba->complete_hdr_dma[i])); | |
448 | ||
449 | hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14), | |
450 | lower_32_bits(hisi_hba->complete_hdr_dma[i])); | |
451 | ||
452 | hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14), | |
453 | HISI_SAS_QUEUE_SLOTS); | |
454 | } | |
455 | ||
456 | /* itct */ | |
457 | hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO, | |
458 | lower_32_bits(hisi_hba->itct_dma)); | |
459 | ||
460 | hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI, | |
461 | upper_32_bits(hisi_hba->itct_dma)); | |
462 | ||
463 | /* iost */ | |
464 | hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO, | |
465 | lower_32_bits(hisi_hba->iost_dma)); | |
466 | ||
467 | hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI, | |
468 | upper_32_bits(hisi_hba->iost_dma)); | |
469 | ||
470 | /* breakpoint */ | |
471 | hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO, | |
472 | lower_32_bits(hisi_hba->breakpoint_dma)); | |
473 | ||
474 | hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI, | |
475 | upper_32_bits(hisi_hba->breakpoint_dma)); | |
476 | ||
477 | /* SATA broken msg */ | |
478 | hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO, | |
479 | lower_32_bits(hisi_hba->sata_breakpoint_dma)); | |
480 | ||
481 | hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI, | |
482 | upper_32_bits(hisi_hba->sata_breakpoint_dma)); | |
483 | ||
484 | /* SATA initial fis */ | |
485 | hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO, | |
486 | lower_32_bits(hisi_hba->initial_fis_dma)); | |
487 | ||
488 | hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI, | |
489 | upper_32_bits(hisi_hba->initial_fis_dma)); | |
490 | } | |
491 | ||
492 | static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no) | |
493 | { | |
494 | u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); | |
495 | ||
496 | cfg &= ~PHY_CFG_DC_OPT_MSK; | |
497 | cfg |= 1 << PHY_CFG_DC_OPT_OFF; | |
498 | hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); | |
499 | } | |
500 | ||
501 | static void config_id_frame_v3_hw(struct hisi_hba *hisi_hba, int phy_no) | |
502 | { | |
503 | struct sas_identify_frame identify_frame; | |
504 | u32 *identify_buffer; | |
505 | ||
506 | memset(&identify_frame, 0, sizeof(identify_frame)); | |
507 | identify_frame.dev_type = SAS_END_DEVICE; | |
508 | identify_frame.frame_type = 0; | |
509 | identify_frame._un1 = 1; | |
510 | identify_frame.initiator_bits = SAS_PROTOCOL_ALL; | |
511 | identify_frame.target_bits = SAS_PROTOCOL_NONE; | |
512 | memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); | |
513 | memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); | |
514 | identify_frame.phy_id = phy_no; | |
515 | identify_buffer = (u32 *)(&identify_frame); | |
516 | ||
517 | hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0, | |
518 | __swab32(identify_buffer[0])); | |
519 | hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1, | |
520 | __swab32(identify_buffer[1])); | |
521 | hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2, | |
522 | __swab32(identify_buffer[2])); | |
523 | hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3, | |
524 | __swab32(identify_buffer[3])); | |
525 | hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4, | |
526 | __swab32(identify_buffer[4])); | |
527 | hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5, | |
528 | __swab32(identify_buffer[5])); | |
529 | } | |
530 | ||
531 | static void setup_itct_v3_hw(struct hisi_hba *hisi_hba, | |
532 | struct hisi_sas_device *sas_dev) | |
533 | { | |
534 | struct domain_device *device = sas_dev->sas_device; | |
535 | struct device *dev = hisi_hba->dev; | |
536 | u64 qw0, device_id = sas_dev->device_id; | |
537 | struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; | |
538 | struct domain_device *parent_dev = device->parent; | |
539 | struct asd_sas_port *sas_port = device->port; | |
540 | struct hisi_sas_port *port = to_hisi_sas_port(sas_port); | |
541 | ||
542 | memset(itct, 0, sizeof(*itct)); | |
543 | ||
544 | /* qw0 */ | |
545 | qw0 = 0; | |
546 | switch (sas_dev->dev_type) { | |
547 | case SAS_END_DEVICE: | |
548 | case SAS_EDGE_EXPANDER_DEVICE: | |
549 | case SAS_FANOUT_EXPANDER_DEVICE: | |
550 | qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF; | |
551 | break; | |
552 | case SAS_SATA_DEV: | |
553 | case SAS_SATA_PENDING: | |
554 | if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) | |
555 | qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF; | |
556 | else | |
557 | qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF; | |
558 | break; | |
559 | default: | |
560 | dev_warn(dev, "setup itct: unsupported dev type (%d)\n", | |
561 | sas_dev->dev_type); | |
562 | } | |
563 | ||
564 | qw0 |= ((1 << ITCT_HDR_VALID_OFF) | | |
565 | (device->linkrate << ITCT_HDR_MCR_OFF) | | |
566 | (1 << ITCT_HDR_VLN_OFF) | | |
567 | (0xfa << ITCT_HDR_SMP_TIMEOUT_OFF) | | |
568 | (1 << ITCT_HDR_AWT_CONTINUE_OFF) | | |
569 | (port->id << ITCT_HDR_PORT_ID_OFF)); | |
570 | itct->qw0 = cpu_to_le64(qw0); | |
571 | ||
572 | /* qw1 */ | |
573 | memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE); | |
574 | itct->sas_addr = __swab64(itct->sas_addr); | |
575 | ||
576 | /* qw2 */ | |
577 | if (!dev_is_sata(device)) | |
578 | itct->qw2 = cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF) | | |
579 | (0x1ULL << ITCT_HDR_RTOLT_OFF)); | |
580 | } | |
581 | ||
582 | static void free_device_v3_hw(struct hisi_hba *hisi_hba, | |
583 | struct hisi_sas_device *sas_dev) | |
584 | { | |
585 | u64 dev_id = sas_dev->device_id; | |
586 | struct device *dev = hisi_hba->dev; | |
587 | struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; | |
588 | u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); | |
589 | ||
590 | /* clear the itct interrupt state */ | |
591 | if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) | |
592 | hisi_sas_write32(hisi_hba, ENT_INT_SRC3, | |
593 | ENT_INT_SRC3_ITC_INT_MSK); | |
594 | ||
595 | /* clear the itct table*/ | |
596 | reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR); | |
597 | reg_val |= ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); | |
598 | hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); | |
599 | ||
600 | udelay(10); | |
601 | reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); | |
602 | if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) { | |
603 | dev_dbg(dev, "got clear ITCT done interrupt\n"); | |
604 | ||
605 | /* invalid the itct state*/ | |
606 | memset(itct, 0, sizeof(struct hisi_sas_itct)); | |
607 | hisi_sas_write32(hisi_hba, ENT_INT_SRC3, | |
608 | ENT_INT_SRC3_ITC_INT_MSK); | |
609 | ||
610 | /* clear the itct */ | |
611 | hisi_sas_write32(hisi_hba, ITCT_CLR, 0); | |
612 | dev_dbg(dev, "clear ITCT ok\n"); | |
613 | } | |
614 | } | |
615 | ||
616 | static void dereg_device_v3_hw(struct hisi_hba *hisi_hba, | |
617 | struct domain_device *device) | |
618 | { | |
619 | struct hisi_sas_slot *slot, *slot2; | |
620 | struct hisi_sas_device *sas_dev = device->lldd_dev; | |
621 | u32 cfg_abt_set_query_iptt; | |
622 | ||
623 | cfg_abt_set_query_iptt = hisi_sas_read32(hisi_hba, | |
624 | CFG_ABT_SET_QUERY_IPTT); | |
625 | list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) { | |
626 | cfg_abt_set_query_iptt &= ~CFG_SET_ABORTED_IPTT_MSK; | |
627 | cfg_abt_set_query_iptt |= (1 << CFG_SET_ABORTED_EN_OFF) | | |
628 | (slot->idx << CFG_SET_ABORTED_IPTT_OFF); | |
629 | hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT, | |
630 | cfg_abt_set_query_iptt); | |
631 | } | |
632 | cfg_abt_set_query_iptt &= ~(1 << CFG_SET_ABORTED_EN_OFF); | |
633 | hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT, | |
634 | cfg_abt_set_query_iptt); | |
635 | hisi_sas_write32(hisi_hba, CFG_ABT_SET_IPTT_DONE, | |
636 | 1 << CFG_ABT_SET_IPTT_DONE_OFF); | |
637 | } | |
638 | ||
639 | static int reset_hw_v3_hw(struct hisi_hba *hisi_hba) | |
640 | { | |
641 | struct device *dev = hisi_hba->dev; | |
642 | int ret; | |
643 | u32 val; | |
644 | ||
645 | hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); | |
646 | ||
647 | /* Disable all of the PHYs */ | |
648 | hisi_sas_stop_phys(hisi_hba); | |
649 | udelay(50); | |
650 | ||
651 | /* Ensure axi bus idle */ | |
652 | ret = readl_poll_timeout(hisi_hba->regs + AXI_CFG, val, !val, | |
653 | 20000, 1000000); | |
654 | if (ret) { | |
655 | dev_err(dev, "axi bus is not idle, ret = %d!\n", ret); | |
656 | return -EIO; | |
657 | } | |
658 | ||
659 | if (ACPI_HANDLE(dev)) { | |
660 | acpi_status s; | |
661 | ||
662 | s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL); | |
663 | if (ACPI_FAILURE(s)) { | |
664 | dev_err(dev, "Reset failed\n"); | |
665 | return -EIO; | |
666 | } | |
667 | } else | |
668 | dev_err(dev, "no reset method!\n"); | |
669 | ||
670 | return 0; | |
671 | } | |
672 | ||
673 | static int hw_init_v3_hw(struct hisi_hba *hisi_hba) | |
674 | { | |
675 | struct device *dev = hisi_hba->dev; | |
676 | int rc; | |
677 | ||
678 | rc = reset_hw_v3_hw(hisi_hba); | |
679 | if (rc) { | |
680 | dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc); | |
681 | return rc; | |
682 | } | |
683 | ||
684 | msleep(100); | |
685 | init_reg_v3_hw(hisi_hba); | |
686 | ||
687 | return 0; | |
688 | } | |
689 | ||
690 | static void enable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) | |
691 | { | |
692 | u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); | |
693 | ||
694 | cfg |= PHY_CFG_ENA_MSK; | |
695 | hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); | |
696 | } | |
697 | ||
698 | static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) | |
699 | { | |
700 | u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); | |
701 | ||
702 | cfg &= ~PHY_CFG_ENA_MSK; | |
703 | hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); | |
704 | } | |
705 | ||
706 | static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) | |
707 | { | |
708 | config_id_frame_v3_hw(hisi_hba, phy_no); | |
709 | config_phy_opt_mode_v3_hw(hisi_hba, phy_no); | |
710 | enable_phy_v3_hw(hisi_hba, phy_no); | |
711 | } | |
712 | ||
713 | static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no) | |
714 | { | |
715 | struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; | |
716 | u32 txid_auto; | |
717 | ||
718 | disable_phy_v3_hw(hisi_hba, phy_no); | |
719 | if (phy->identify.device_type == SAS_END_DEVICE) { | |
720 | txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); | |
721 | hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, | |
722 | txid_auto | TX_HARDRST_MSK); | |
723 | } | |
724 | msleep(100); | |
725 | start_phy_v3_hw(hisi_hba, phy_no); | |
726 | } | |
727 | ||
728 | enum sas_linkrate phy_get_max_linkrate_v3_hw(void) | |
729 | { | |
730 | return SAS_LINK_RATE_12_0_GBPS; | |
731 | } | |
732 | ||
733 | static void phys_init_v3_hw(struct hisi_hba *hisi_hba) | |
734 | { | |
735 | int i; | |
736 | ||
737 | for (i = 0; i < hisi_hba->n_phy; i++) { | |
738 | struct hisi_sas_phy *phy = &hisi_hba->phy[i]; | |
739 | struct asd_sas_phy *sas_phy = &phy->sas_phy; | |
740 | ||
741 | if (!sas_phy->phy->enabled) | |
742 | continue; | |
743 | ||
744 | start_phy_v3_hw(hisi_hba, i); | |
745 | } | |
746 | } | |
747 | ||
748 | static void sl_notify_v3_hw(struct hisi_hba *hisi_hba, int phy_no) | |
749 | { | |
750 | u32 sl_control; | |
751 | ||
752 | sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); | |
753 | sl_control |= SL_CONTROL_NOTIFY_EN_MSK; | |
754 | hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); | |
755 | msleep(1); | |
756 | sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); | |
757 | sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK; | |
758 | hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); | |
759 | } | |
760 | ||
761 | static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id) | |
762 | { | |
763 | int i, bitmap = 0; | |
764 | u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); | |
765 | u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); | |
766 | ||
767 | for (i = 0; i < hisi_hba->n_phy; i++) | |
768 | if (phy_state & BIT(i)) | |
769 | if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id) | |
770 | bitmap |= BIT(i); | |
771 | ||
772 | return bitmap; | |
773 | } | |
774 | ||
775 | /** | |
776 | * The callpath to this function and upto writing the write | |
777 | * queue pointer should be safe from interruption. | |
778 | */ | |
779 | static int | |
780 | get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq) | |
781 | { | |
782 | struct device *dev = hisi_hba->dev; | |
783 | int queue = dq->id; | |
784 | u32 r, w; | |
785 | ||
786 | w = dq->wr_point; | |
787 | r = hisi_sas_read32_relaxed(hisi_hba, | |
788 | DLVRY_Q_0_RD_PTR + (queue * 0x14)); | |
789 | if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { | |
790 | dev_warn(dev, "full queue=%d r=%d w=%d\n\n", | |
791 | queue, r, w); | |
792 | return -EAGAIN; | |
793 | } | |
794 | ||
795 | return 0; | |
796 | } | |
797 | ||
798 | static void start_delivery_v3_hw(struct hisi_sas_dq *dq) | |
799 | { | |
800 | struct hisi_hba *hisi_hba = dq->hisi_hba; | |
801 | int dlvry_queue = dq->slot_prep->dlvry_queue; | |
802 | int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot; | |
803 | ||
804 | dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS; | |
805 | hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), | |
806 | dq->wr_point); | |
807 | } | |
808 | ||
809 | static int prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba, | |
810 | struct hisi_sas_slot *slot, | |
811 | struct hisi_sas_cmd_hdr *hdr, | |
812 | struct scatterlist *scatter, | |
813 | int n_elem) | |
814 | { | |
815 | struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); | |
816 | struct device *dev = hisi_hba->dev; | |
817 | struct scatterlist *sg; | |
818 | int i; | |
819 | ||
820 | if (n_elem > HISI_SAS_SGE_PAGE_CNT) { | |
821 | dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT", | |
822 | n_elem); | |
823 | return -EINVAL; | |
824 | } | |
825 | ||
826 | for_each_sg(scatter, sg, n_elem, i) { | |
827 | struct hisi_sas_sge *entry = &sge_page->sge[i]; | |
828 | ||
829 | entry->addr = cpu_to_le64(sg_dma_address(sg)); | |
830 | entry->page_ctrl_0 = entry->page_ctrl_1 = 0; | |
831 | entry->data_len = cpu_to_le32(sg_dma_len(sg)); | |
832 | entry->data_off = 0; | |
833 | } | |
834 | ||
835 | hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot)); | |
836 | ||
837 | hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); | |
838 | ||
839 | return 0; | |
840 | } | |
841 | ||
842 | static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba, | |
843 | struct hisi_sas_slot *slot, int is_tmf, | |
844 | struct hisi_sas_tmf_task *tmf) | |
845 | { | |
846 | struct sas_task *task = slot->task; | |
847 | struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; | |
848 | struct domain_device *device = task->dev; | |
849 | struct hisi_sas_device *sas_dev = device->lldd_dev; | |
850 | struct hisi_sas_port *port = slot->port; | |
851 | struct sas_ssp_task *ssp_task = &task->ssp_task; | |
852 | struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; | |
853 | int has_data = 0, rc, priority = is_tmf; | |
854 | u8 *buf_cmd; | |
855 | u32 dw1 = 0, dw2 = 0; | |
856 | ||
857 | hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) | | |
858 | (2 << CMD_HDR_TLR_CTRL_OFF) | | |
859 | (port->id << CMD_HDR_PORT_OFF) | | |
860 | (priority << CMD_HDR_PRIORITY_OFF) | | |
861 | (1 << CMD_HDR_CMD_OFF)); /* ssp */ | |
862 | ||
863 | dw1 = 1 << CMD_HDR_VDTL_OFF; | |
864 | if (is_tmf) { | |
865 | dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF; | |
866 | dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF; | |
867 | } else { | |
868 | dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF; | |
869 | switch (scsi_cmnd->sc_data_direction) { | |
870 | case DMA_TO_DEVICE: | |
871 | has_data = 1; | |
872 | dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; | |
873 | break; | |
874 | case DMA_FROM_DEVICE: | |
875 | has_data = 1; | |
876 | dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; | |
877 | break; | |
878 | default: | |
879 | dw1 &= ~CMD_HDR_DIR_MSK; | |
880 | } | |
881 | } | |
882 | ||
883 | /* map itct entry */ | |
884 | dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; | |
885 | hdr->dw1 = cpu_to_le32(dw1); | |
886 | ||
887 | dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) | |
888 | + 3) / 4) << CMD_HDR_CFL_OFF) | | |
889 | ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) | | |
890 | (2 << CMD_HDR_SG_MOD_OFF); | |
891 | hdr->dw2 = cpu_to_le32(dw2); | |
892 | hdr->transfer_tags = cpu_to_le32(slot->idx); | |
893 | ||
894 | if (has_data) { | |
895 | rc = prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, | |
896 | slot->n_elem); | |
897 | if (rc) | |
898 | return rc; | |
899 | } | |
900 | ||
901 | hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); | |
902 | hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); | |
903 | hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); | |
904 | ||
905 | buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) + | |
906 | sizeof(struct ssp_frame_hdr); | |
907 | ||
908 | memcpy(buf_cmd, &task->ssp_task.LUN, 8); | |
909 | if (!is_tmf) { | |
910 | buf_cmd[9] = ssp_task->task_attr | (ssp_task->task_prio << 3); | |
911 | memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); | |
912 | } else { | |
913 | buf_cmd[10] = tmf->tmf; | |
914 | switch (tmf->tmf) { | |
915 | case TMF_ABORT_TASK: | |
916 | case TMF_QUERY_TASK: | |
917 | buf_cmd[12] = | |
918 | (tmf->tag_of_task_to_be_managed >> 8) & 0xff; | |
919 | buf_cmd[13] = | |
920 | tmf->tag_of_task_to_be_managed & 0xff; | |
921 | break; | |
922 | default: | |
923 | break; | |
924 | } | |
925 | } | |
926 | ||
927 | return 0; | |
928 | } | |
929 | ||
930 | static int prep_smp_v3_hw(struct hisi_hba *hisi_hba, | |
931 | struct hisi_sas_slot *slot) | |
932 | { | |
933 | struct sas_task *task = slot->task; | |
934 | struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; | |
935 | struct domain_device *device = task->dev; | |
936 | struct device *dev = hisi_hba->dev; | |
937 | struct hisi_sas_port *port = slot->port; | |
938 | struct scatterlist *sg_req, *sg_resp; | |
939 | struct hisi_sas_device *sas_dev = device->lldd_dev; | |
940 | dma_addr_t req_dma_addr; | |
941 | unsigned int req_len, resp_len; | |
942 | int elem, rc; | |
943 | ||
944 | /* | |
945 | * DMA-map SMP request, response buffers | |
946 | */ | |
947 | /* req */ | |
948 | sg_req = &task->smp_task.smp_req; | |
949 | elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE); | |
950 | if (!elem) | |
951 | return -ENOMEM; | |
952 | req_len = sg_dma_len(sg_req); | |
953 | req_dma_addr = sg_dma_address(sg_req); | |
954 | ||
955 | /* resp */ | |
956 | sg_resp = &task->smp_task.smp_resp; | |
957 | elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE); | |
958 | if (!elem) { | |
959 | rc = -ENOMEM; | |
960 | goto err_out_req; | |
961 | } | |
962 | resp_len = sg_dma_len(sg_resp); | |
963 | if ((req_len & 0x3) || (resp_len & 0x3)) { | |
964 | rc = -EINVAL; | |
965 | goto err_out_resp; | |
966 | } | |
967 | ||
968 | /* create header */ | |
969 | /* dw0 */ | |
970 | hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | | |
971 | (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */ | |
972 | (2 << CMD_HDR_CMD_OFF)); /* smp */ | |
973 | ||
974 | /* map itct entry */ | |
975 | hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) | | |
976 | (1 << CMD_HDR_FRAME_TYPE_OFF) | | |
977 | (DIR_NO_DATA << CMD_HDR_DIR_OFF)); | |
978 | ||
979 | /* dw2 */ | |
980 | hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) | | |
981 | (HISI_SAS_MAX_SMP_RESP_SZ / 4 << | |
982 | CMD_HDR_MRFL_OFF)); | |
983 | ||
984 | hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); | |
985 | ||
986 | hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); | |
987 | hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); | |
988 | ||
989 | return 0; | |
990 | ||
991 | err_out_resp: | |
992 | dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1, | |
993 | DMA_FROM_DEVICE); | |
994 | err_out_req: | |
995 | dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1, | |
996 | DMA_TO_DEVICE); | |
997 | return rc; | |
998 | } | |
999 | ||
1000 | static int prep_ata_v3_hw(struct hisi_hba *hisi_hba, | |
1001 | struct hisi_sas_slot *slot) | |
1002 | { | |
1003 | struct sas_task *task = slot->task; | |
1004 | struct domain_device *device = task->dev; | |
1005 | struct domain_device *parent_dev = device->parent; | |
1006 | struct hisi_sas_device *sas_dev = device->lldd_dev; | |
1007 | struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; | |
1008 | struct asd_sas_port *sas_port = device->port; | |
1009 | struct hisi_sas_port *port = to_hisi_sas_port(sas_port); | |
1010 | u8 *buf_cmd; | |
1011 | int has_data = 0, rc = 0, hdr_tag = 0; | |
1012 | u32 dw1 = 0, dw2 = 0; | |
1013 | ||
1014 | hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF); | |
1015 | if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) | |
1016 | hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF); | |
1017 | else | |
1018 | hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF); | |
1019 | ||
1020 | switch (task->data_dir) { | |
1021 | case DMA_TO_DEVICE: | |
1022 | has_data = 1; | |
1023 | dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; | |
1024 | break; | |
1025 | case DMA_FROM_DEVICE: | |
1026 | has_data = 1; | |
1027 | dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; | |
1028 | break; | |
1029 | default: | |
1030 | dw1 &= ~CMD_HDR_DIR_MSK; | |
1031 | } | |
1032 | ||
1033 | if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) && | |
1034 | (task->ata_task.fis.control & ATA_SRST)) | |
1035 | dw1 |= 1 << CMD_HDR_RESET_OFF; | |
1036 | ||
1037 | dw1 |= (hisi_sas_get_ata_protocol( | |
1038 | task->ata_task.fis.command, task->data_dir)) | |
1039 | << CMD_HDR_FRAME_TYPE_OFF; | |
1040 | dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; | |
1041 | ||
1042 | if (CMD_IS_UNCONSTRAINT(task->ata_task.fis.command)) | |
1043 | dw1 |= 1 << CMD_HDR_UNCON_CMD_OFF; | |
1044 | ||
1045 | hdr->dw1 = cpu_to_le32(dw1); | |
1046 | ||
1047 | /* dw2 */ | |
1048 | if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) { | |
1049 | task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); | |
1050 | dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; | |
1051 | } | |
1052 | ||
1053 | dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF | | |
1054 | 2 << CMD_HDR_SG_MOD_OFF; | |
1055 | hdr->dw2 = cpu_to_le32(dw2); | |
1056 | ||
1057 | /* dw3 */ | |
1058 | hdr->transfer_tags = cpu_to_le32(slot->idx); | |
1059 | ||
1060 | if (has_data) { | |
1061 | rc = prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, | |
1062 | slot->n_elem); | |
1063 | if (rc) | |
1064 | return rc; | |
1065 | } | |
1066 | ||
1067 | hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); | |
1068 | hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); | |
1069 | hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); | |
1070 | ||
1071 | buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot); | |
1072 | ||
1073 | if (likely(!task->ata_task.device_control_reg_update)) | |
1074 | task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ | |
1075 | /* fill in command FIS */ | |
1076 | memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); | |
1077 | ||
1078 | return 0; | |
1079 | } | |
1080 | ||
1081 | static int prep_abort_v3_hw(struct hisi_hba *hisi_hba, | |
1082 | struct hisi_sas_slot *slot, | |
1083 | int device_id, int abort_flag, int tag_to_abort) | |
1084 | { | |
1085 | struct sas_task *task = slot->task; | |
1086 | struct domain_device *dev = task->dev; | |
1087 | struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; | |
1088 | struct hisi_sas_port *port = slot->port; | |
1089 | ||
1090 | /* dw0 */ | |
1091 | hdr->dw0 = cpu_to_le32((5 << CMD_HDR_CMD_OFF) | /*abort*/ | |
1092 | (port->id << CMD_HDR_PORT_OFF) | | |
1093 | ((dev_is_sata(dev) ? 1:0) | |
1094 | << CMD_HDR_ABORT_DEVICE_TYPE_OFF) | | |
1095 | (abort_flag | |
1096 | << CMD_HDR_ABORT_FLAG_OFF)); | |
1097 | ||
1098 | /* dw1 */ | |
1099 | hdr->dw1 = cpu_to_le32(device_id | |
1100 | << CMD_HDR_DEV_ID_OFF); | |
1101 | ||
1102 | /* dw7 */ | |
1103 | hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF); | |
1104 | hdr->transfer_tags = cpu_to_le32(slot->idx); | |
1105 | ||
1106 | return 0; | |
1107 | } | |
1108 | ||
1109 | static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) | |
1110 | { | |
1111 | int i, res = 0; | |
1112 | u32 context, port_id, link_rate, hard_phy_linkrate; | |
1113 | struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; | |
1114 | struct asd_sas_phy *sas_phy = &phy->sas_phy; | |
1115 | struct device *dev = hisi_hba->dev; | |
1116 | ||
1117 | hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); | |
1118 | ||
1119 | port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); | |
1120 | port_id = (port_id >> (4 * phy_no)) & 0xf; | |
1121 | link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); | |
1122 | link_rate = (link_rate >> (phy_no * 4)) & 0xf; | |
1123 | ||
1124 | if (port_id == 0xf) { | |
1125 | dev_err(dev, "phyup: phy%d invalid portid\n", phy_no); | |
1126 | res = IRQ_NONE; | |
1127 | goto end; | |
1128 | } | |
1129 | sas_phy->linkrate = link_rate; | |
1130 | hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no, | |
1131 | HARD_PHY_LINKRATE); | |
1132 | phy->maximum_linkrate = hard_phy_linkrate & 0xf; | |
1133 | phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf; | |
1134 | phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); | |
1135 | ||
1136 | /* Check for SATA dev */ | |
1137 | context = hisi_sas_read32(hisi_hba, PHY_CONTEXT); | |
1138 | if (context & (1 << phy_no)) { | |
1139 | struct hisi_sas_initial_fis *initial_fis; | |
1140 | struct dev_to_host_fis *fis; | |
1141 | u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; | |
1142 | ||
1143 | dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); | |
1144 | initial_fis = &hisi_hba->initial_fis[phy_no]; | |
1145 | fis = &initial_fis->fis; | |
1146 | sas_phy->oob_mode = SATA_OOB_MODE; | |
1147 | attached_sas_addr[0] = 0x50; | |
1148 | attached_sas_addr[7] = phy_no; | |
1149 | memcpy(sas_phy->attached_sas_addr, | |
1150 | attached_sas_addr, | |
1151 | SAS_ADDR_SIZE); | |
1152 | memcpy(sas_phy->frame_rcvd, fis, | |
1153 | sizeof(struct dev_to_host_fis)); | |
1154 | phy->phy_type |= PORT_TYPE_SATA; | |
1155 | phy->identify.device_type = SAS_SATA_DEV; | |
1156 | phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); | |
1157 | phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; | |
1158 | } else { | |
1159 | u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; | |
1160 | struct sas_identify_frame *id = | |
1161 | (struct sas_identify_frame *)frame_rcvd; | |
1162 | ||
1163 | dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); | |
1164 | for (i = 0; i < 6; i++) { | |
1165 | u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no, | |
1166 | RX_IDAF_DWORD0 + (i * 4)); | |
1167 | frame_rcvd[i] = __swab32(idaf); | |
1168 | } | |
1169 | sas_phy->oob_mode = SAS_OOB_MODE; | |
1170 | memcpy(sas_phy->attached_sas_addr, | |
1171 | &id->sas_addr, | |
1172 | SAS_ADDR_SIZE); | |
1173 | phy->phy_type |= PORT_TYPE_SAS; | |
1174 | phy->identify.device_type = id->dev_type; | |
1175 | phy->frame_rcvd_size = sizeof(struct sas_identify_frame); | |
1176 | if (phy->identify.device_type == SAS_END_DEVICE) | |
1177 | phy->identify.target_port_protocols = | |
1178 | SAS_PROTOCOL_SSP; | |
1179 | else if (phy->identify.device_type != SAS_PHY_UNUSED) | |
1180 | phy->identify.target_port_protocols = | |
1181 | SAS_PROTOCOL_SMP; | |
1182 | } | |
1183 | ||
1184 | phy->port_id = port_id; | |
1185 | phy->phy_attached = 1; | |
1186 | queue_work(hisi_hba->wq, &phy->phyup_ws); | |
1187 | ||
1188 | end: | |
1189 | hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, | |
1190 | CHL_INT0_SL_PHY_ENABLE_MSK); | |
1191 | hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0); | |
1192 | ||
1193 | return res; | |
1194 | } | |
1195 | ||
1196 | static int phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba) | |
1197 | { | |
1198 | u32 phy_state, sl_ctrl, txid_auto; | |
1199 | struct device *dev = hisi_hba->dev; | |
1200 | ||
1201 | hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); | |
1202 | ||
1203 | phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); | |
1204 | dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state); | |
1205 | hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0); | |
1206 | ||
1207 | sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); | |
1208 | hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, | |
1209 | sl_ctrl&(~SL_CTA_MSK)); | |
1210 | ||
1211 | txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); | |
1212 | hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, | |
1213 | txid_auto | CT3_MSK); | |
1214 | ||
1215 | hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK); | |
1216 | hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0); | |
1217 | ||
1218 | return 0; | |
1219 | } | |
1220 | ||
1221 | static void phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba) | |
1222 | { | |
1223 | struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; | |
1224 | struct asd_sas_phy *sas_phy = &phy->sas_phy; | |
1225 | struct sas_ha_struct *sas_ha = &hisi_hba->sha; | |
1226 | ||
1227 | hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); | |
1228 | sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); | |
1229 | hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, | |
1230 | CHL_INT0_SL_RX_BCST_ACK_MSK); | |
1231 | hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); | |
1232 | } | |
1233 | ||
1234 | static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p) | |
1235 | { | |
1236 | struct hisi_hba *hisi_hba = p; | |
1237 | u32 irq_msk; | |
1238 | int phy_no = 0; | |
1239 | irqreturn_t res = IRQ_NONE; | |
1240 | ||
1241 | irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) | |
1242 | & 0x11111111; | |
1243 | while (irq_msk) { | |
1244 | if (irq_msk & 1) { | |
1245 | u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, | |
1246 | CHL_INT0); | |
1247 | u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); | |
1248 | int rdy = phy_state & (1 << phy_no); | |
1249 | ||
1250 | if (rdy) { | |
1251 | if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK) | |
1252 | /* phy up */ | |
1253 | if (phy_up_v3_hw(phy_no, hisi_hba) | |
1254 | == IRQ_HANDLED) | |
1255 | res = IRQ_HANDLED; | |
1256 | if (irq_value & CHL_INT0_SL_RX_BCST_ACK_MSK) | |
1257 | /* phy bcast */ | |
1258 | phy_bcast_v3_hw(phy_no, hisi_hba); | |
1259 | } else { | |
1260 | if (irq_value & CHL_INT0_NOT_RDY_MSK) | |
1261 | /* phy down */ | |
1262 | if (phy_down_v3_hw(phy_no, hisi_hba) | |
1263 | == IRQ_HANDLED) | |
1264 | res = IRQ_HANDLED; | |
1265 | } | |
1266 | } | |
1267 | irq_msk >>= 4; | |
1268 | phy_no++; | |
1269 | } | |
1270 | ||
1271 | return res; | |
1272 | } | |
1273 | ||
1274 | static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) | |
1275 | { | |
1276 | struct hisi_hba *hisi_hba = p; | |
1277 | struct device *dev = hisi_hba->dev; | |
1278 | u32 ent_msk, ent_tmp, irq_msk; | |
1279 | int phy_no = 0; | |
1280 | ||
1281 | ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); | |
1282 | ent_tmp = ent_msk; | |
1283 | ent_msk |= ENT_INT_SRC_MSK3_ENT95_MSK_MSK; | |
1284 | hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_msk); | |
1285 | ||
1286 | irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) | |
1287 | & 0xeeeeeeee; | |
1288 | ||
1289 | while (irq_msk) { | |
1290 | u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, | |
1291 | CHL_INT0); | |
1292 | u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no, | |
1293 | CHL_INT1); | |
1294 | u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no, | |
1295 | CHL_INT2); | |
1296 | ||
1297 | if ((irq_msk & (4 << (phy_no * 4))) && | |
1298 | irq_value1) { | |
1299 | if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK | | |
1300 | CHL_INT1_DMAC_TX_ECC_ERR_MSK)) | |
1301 | panic("%s: DMAC RX/TX ecc bad error! (0x%x)", | |
1302 | dev_name(dev), irq_value1); | |
1303 | ||
1304 | hisi_sas_phy_write32(hisi_hba, phy_no, | |
1305 | CHL_INT1, irq_value1); | |
1306 | } | |
1307 | ||
1308 | if (irq_msk & (8 << (phy_no * 4)) && irq_value2) | |
1309 | hisi_sas_phy_write32(hisi_hba, phy_no, | |
1310 | CHL_INT2, irq_value2); | |
1311 | ||
1312 | ||
1313 | if (irq_msk & (2 << (phy_no * 4)) && irq_value0) { | |
1314 | hisi_sas_phy_write32(hisi_hba, phy_no, | |
1315 | CHL_INT0, irq_value0 | |
1316 | & (~CHL_INT0_SL_RX_BCST_ACK_MSK) | |
1317 | & (~CHL_INT0_SL_PHY_ENABLE_MSK) | |
1318 | & (~CHL_INT0_NOT_RDY_MSK)); | |
1319 | } | |
1320 | irq_msk &= ~(0xe << (phy_no * 4)); | |
1321 | phy_no++; | |
1322 | } | |
1323 | ||
1324 | hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_tmp); | |
1325 | ||
1326 | return IRQ_HANDLED; | |
1327 | } | |
1328 | ||
1329 | static void | |
1330 | slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task, | |
1331 | struct hisi_sas_slot *slot) | |
1332 | { | |
1333 | struct task_status_struct *ts = &task->task_status; | |
1334 | struct hisi_sas_complete_v3_hdr *complete_queue = | |
1335 | hisi_hba->complete_hdr[slot->cmplt_queue]; | |
1336 | struct hisi_sas_complete_v3_hdr *complete_hdr = | |
1337 | &complete_queue[slot->cmplt_queue_slot]; | |
1338 | struct hisi_sas_err_record_v3 *record = | |
1339 | hisi_sas_status_buf_addr_mem(slot); | |
1340 | u32 dma_rx_err_type = record->dma_rx_err_type; | |
1341 | u32 trans_tx_fail_type = record->trans_tx_fail_type; | |
1342 | ||
1343 | switch (task->task_proto) { | |
1344 | case SAS_PROTOCOL_SSP: | |
1345 | if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { | |
1346 | ts->residual = trans_tx_fail_type; | |
1347 | ts->stat = SAS_DATA_UNDERRUN; | |
1348 | } else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) { | |
1349 | ts->stat = SAS_QUEUE_FULL; | |
1350 | slot->abort = 1; | |
1351 | } else { | |
1352 | ts->stat = SAS_OPEN_REJECT; | |
1353 | ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; | |
1354 | } | |
1355 | break; | |
1356 | case SAS_PROTOCOL_SATA: | |
1357 | case SAS_PROTOCOL_STP: | |
1358 | case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: | |
1359 | if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { | |
1360 | ts->residual = trans_tx_fail_type; | |
1361 | ts->stat = SAS_DATA_UNDERRUN; | |
1362 | } else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) { | |
1363 | ts->stat = SAS_PHY_DOWN; | |
1364 | slot->abort = 1; | |
1365 | } else { | |
1366 | ts->stat = SAS_OPEN_REJECT; | |
1367 | ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; | |
1368 | } | |
1369 | hisi_sas_sata_done(task, slot); | |
1370 | break; | |
1371 | case SAS_PROTOCOL_SMP: | |
1372 | ts->stat = SAM_STAT_CHECK_CONDITION; | |
1373 | break; | |
1374 | default: | |
1375 | break; | |
1376 | } | |
1377 | } | |
1378 | ||
1379 | static int | |
1380 | slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) | |
1381 | { | |
1382 | struct sas_task *task = slot->task; | |
1383 | struct hisi_sas_device *sas_dev; | |
1384 | struct device *dev = hisi_hba->dev; | |
1385 | struct task_status_struct *ts; | |
1386 | struct domain_device *device; | |
1387 | enum exec_status sts; | |
1388 | struct hisi_sas_complete_v3_hdr *complete_queue = | |
1389 | hisi_hba->complete_hdr[slot->cmplt_queue]; | |
1390 | struct hisi_sas_complete_v3_hdr *complete_hdr = | |
1391 | &complete_queue[slot->cmplt_queue_slot]; | |
1392 | int aborted; | |
1393 | unsigned long flags; | |
1394 | ||
1395 | if (unlikely(!task || !task->lldd_task || !task->dev)) | |
1396 | return -EINVAL; | |
1397 | ||
1398 | ts = &task->task_status; | |
1399 | device = task->dev; | |
1400 | sas_dev = device->lldd_dev; | |
1401 | ||
1402 | spin_lock_irqsave(&task->task_state_lock, flags); | |
1403 | aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; | |
1404 | task->task_state_flags &= | |
1405 | ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); | |
1406 | spin_unlock_irqrestore(&task->task_state_lock, flags); | |
1407 | ||
1408 | memset(ts, 0, sizeof(*ts)); | |
1409 | ts->resp = SAS_TASK_COMPLETE; | |
1410 | if (unlikely(aborted)) { | |
1411 | ts->stat = SAS_ABORTED_TASK; | |
1412 | spin_lock_irqsave(&hisi_hba->lock, flags); | |
1413 | hisi_sas_slot_task_free(hisi_hba, task, slot); | |
1414 | spin_unlock_irqrestore(&hisi_hba->lock, flags); | |
1415 | return -1; | |
1416 | } | |
1417 | ||
1418 | if (unlikely(!sas_dev)) { | |
1419 | dev_dbg(dev, "slot complete: port has not device\n"); | |
1420 | ts->stat = SAS_PHY_DOWN; | |
1421 | goto out; | |
1422 | } | |
1423 | ||
1424 | /* | |
1425 | * Use SAS+TMF status codes | |
1426 | */ | |
1427 | switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK) | |
1428 | >> CMPLT_HDR_ABORT_STAT_OFF) { | |
1429 | case STAT_IO_ABORTED: | |
1430 | /* this IO has been aborted by abort command */ | |
1431 | ts->stat = SAS_ABORTED_TASK; | |
1432 | goto out; | |
1433 | case STAT_IO_COMPLETE: | |
1434 | /* internal abort command complete */ | |
1435 | ts->stat = TMF_RESP_FUNC_SUCC; | |
1436 | goto out; | |
1437 | case STAT_IO_NO_DEVICE: | |
1438 | ts->stat = TMF_RESP_FUNC_COMPLETE; | |
1439 | goto out; | |
1440 | case STAT_IO_NOT_VALID: | |
1441 | /* | |
1442 | * abort single IO, the controller can't find the IO | |
1443 | */ | |
1444 | ts->stat = TMF_RESP_FUNC_FAILED; | |
1445 | goto out; | |
1446 | default: | |
1447 | break; | |
1448 | } | |
1449 | ||
1450 | /* check for erroneous completion */ | |
1451 | if ((complete_hdr->dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) { | |
1452 | slot_err_v3_hw(hisi_hba, task, slot); | |
1453 | if (unlikely(slot->abort)) | |
1454 | return ts->stat; | |
1455 | goto out; | |
1456 | } | |
1457 | ||
1458 | switch (task->task_proto) { | |
1459 | case SAS_PROTOCOL_SSP: { | |
1460 | struct ssp_response_iu *iu = | |
1461 | hisi_sas_status_buf_addr_mem(slot) + | |
1462 | sizeof(struct hisi_sas_err_record); | |
1463 | ||
1464 | sas_ssp_task_response(dev, task, iu); | |
1465 | break; | |
1466 | } | |
1467 | case SAS_PROTOCOL_SMP: { | |
1468 | struct scatterlist *sg_resp = &task->smp_task.smp_resp; | |
1469 | void *to; | |
1470 | ||
1471 | ts->stat = SAM_STAT_GOOD; | |
1472 | to = kmap_atomic(sg_page(sg_resp)); | |
1473 | ||
1474 | dma_unmap_sg(dev, &task->smp_task.smp_resp, 1, | |
1475 | DMA_FROM_DEVICE); | |
1476 | dma_unmap_sg(dev, &task->smp_task.smp_req, 1, | |
1477 | DMA_TO_DEVICE); | |
1478 | memcpy(to + sg_resp->offset, | |
1479 | hisi_sas_status_buf_addr_mem(slot) + | |
1480 | sizeof(struct hisi_sas_err_record), | |
1481 | sg_dma_len(sg_resp)); | |
1482 | kunmap_atomic(to); | |
1483 | break; | |
1484 | } | |
1485 | case SAS_PROTOCOL_SATA: | |
1486 | case SAS_PROTOCOL_STP: | |
1487 | case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: | |
1488 | ts->stat = SAM_STAT_GOOD; | |
1489 | hisi_sas_sata_done(task, slot); | |
1490 | break; | |
1491 | default: | |
1492 | ts->stat = SAM_STAT_CHECK_CONDITION; | |
1493 | break; | |
1494 | } | |
1495 | ||
1496 | if (!slot->port->port_attached) { | |
1497 | dev_err(dev, "slot complete: port %d has removed\n", | |
1498 | slot->port->sas_port.id); | |
1499 | ts->stat = SAS_PHY_DOWN; | |
1500 | } | |
1501 | ||
1502 | out: | |
1503 | spin_lock_irqsave(&task->task_state_lock, flags); | |
1504 | task->task_state_flags |= SAS_TASK_STATE_DONE; | |
1505 | spin_unlock_irqrestore(&task->task_state_lock, flags); | |
1506 | spin_lock_irqsave(&hisi_hba->lock, flags); | |
1507 | hisi_sas_slot_task_free(hisi_hba, task, slot); | |
1508 | spin_unlock_irqrestore(&hisi_hba->lock, flags); | |
1509 | sts = ts->stat; | |
1510 | ||
1511 | if (task->task_done) | |
1512 | task->task_done(task); | |
1513 | ||
1514 | return sts; | |
1515 | } | |
1516 | ||
1517 | static void cq_tasklet_v3_hw(unsigned long val) | |
1518 | { | |
1519 | struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val; | |
1520 | struct hisi_hba *hisi_hba = cq->hisi_hba; | |
1521 | struct hisi_sas_slot *slot; | |
1522 | struct hisi_sas_itct *itct; | |
1523 | struct hisi_sas_complete_v3_hdr *complete_queue; | |
1524 | u32 rd_point = cq->rd_point, wr_point, dev_id; | |
1525 | int queue = cq->id; | |
1526 | struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; | |
1527 | ||
1528 | complete_queue = hisi_hba->complete_hdr[queue]; | |
1529 | ||
1530 | spin_lock(&dq->lock); | |
1531 | wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + | |
1532 | (0x14 * queue)); | |
1533 | ||
1534 | while (rd_point != wr_point) { | |
1535 | struct hisi_sas_complete_v3_hdr *complete_hdr; | |
1536 | int iptt; | |
1537 | ||
1538 | complete_hdr = &complete_queue[rd_point]; | |
1539 | ||
1540 | /* Check for NCQ completion */ | |
1541 | if (complete_hdr->act) { | |
1542 | u32 act_tmp = complete_hdr->act; | |
1543 | int ncq_tag_count = ffs(act_tmp); | |
1544 | ||
1545 | dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >> | |
1546 | CMPLT_HDR_DEV_ID_OFF; | |
1547 | itct = &hisi_hba->itct[dev_id]; | |
1548 | ||
1549 | /* The NCQ tags are held in the itct header */ | |
1550 | while (ncq_tag_count) { | |
1551 | __le64 *ncq_tag = &itct->qw4_15[0]; | |
1552 | ||
1553 | ncq_tag_count -= 1; | |
1554 | iptt = (ncq_tag[ncq_tag_count / 5] | |
1555 | >> (ncq_tag_count % 5) * 12) & 0xfff; | |
1556 | ||
1557 | slot = &hisi_hba->slot_info[iptt]; | |
1558 | slot->cmplt_queue_slot = rd_point; | |
1559 | slot->cmplt_queue = queue; | |
1560 | slot_complete_v3_hw(hisi_hba, slot); | |
1561 | ||
1562 | act_tmp &= ~(1 << ncq_tag_count); | |
1563 | ncq_tag_count = ffs(act_tmp); | |
1564 | } | |
1565 | } else { | |
1566 | iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK; | |
1567 | slot = &hisi_hba->slot_info[iptt]; | |
1568 | slot->cmplt_queue_slot = rd_point; | |
1569 | slot->cmplt_queue = queue; | |
1570 | slot_complete_v3_hw(hisi_hba, slot); | |
1571 | } | |
1572 | ||
1573 | if (++rd_point >= HISI_SAS_QUEUE_SLOTS) | |
1574 | rd_point = 0; | |
1575 | } | |
1576 | ||
1577 | /* update rd_point */ | |
1578 | cq->rd_point = rd_point; | |
1579 | hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); | |
1580 | spin_unlock(&dq->lock); | |
1581 | } | |
1582 | ||
1583 | static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p) | |
1584 | { | |
1585 | struct hisi_sas_cq *cq = p; | |
1586 | struct hisi_hba *hisi_hba = cq->hisi_hba; | |
1587 | int queue = cq->id; | |
1588 | ||
1589 | hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); | |
1590 | ||
1591 | tasklet_schedule(&cq->tasklet); | |
1592 | ||
1593 | return IRQ_HANDLED; | |
1594 | } | |
1595 | ||
1596 | static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) | |
1597 | { | |
1598 | struct device *dev = hisi_hba->dev; | |
1599 | struct pci_dev *pdev = hisi_hba->pci_dev; | |
1600 | int vectors, rc; | |
1601 | int i, k; | |
1602 | int max_msi = HISI_SAS_MSI_COUNT_V3_HW; | |
1603 | ||
1604 | vectors = pci_alloc_irq_vectors(hisi_hba->pci_dev, 1, | |
1605 | max_msi, PCI_IRQ_MSI); | |
1606 | if (vectors < max_msi) { | |
1607 | dev_err(dev, "could not allocate all msi (%d)\n", vectors); | |
1608 | return -ENOENT; | |
1609 | } | |
1610 | ||
1611 | rc = devm_request_irq(dev, pci_irq_vector(pdev, 1), | |
1612 | int_phy_up_down_bcast_v3_hw, 0, | |
1613 | DRV_NAME " phy", hisi_hba); | |
1614 | if (rc) { | |
1615 | dev_err(dev, "could not request phy interrupt, rc=%d\n", rc); | |
1616 | rc = -ENOENT; | |
1617 | goto free_irq_vectors; | |
1618 | } | |
1619 | ||
1620 | rc = devm_request_irq(dev, pci_irq_vector(pdev, 2), | |
1621 | int_chnl_int_v3_hw, 0, | |
1622 | DRV_NAME " channel", hisi_hba); | |
1623 | if (rc) { | |
1624 | dev_err(dev, "could not request chnl interrupt, rc=%d\n", rc); | |
1625 | rc = -ENOENT; | |
1626 | goto free_phy_irq; | |
1627 | } | |
1628 | ||
1629 | /* Init tasklets for cq only */ | |
1630 | for (i = 0; i < hisi_hba->queue_count; i++) { | |
1631 | struct hisi_sas_cq *cq = &hisi_hba->cq[i]; | |
1632 | struct tasklet_struct *t = &cq->tasklet; | |
1633 | ||
1634 | rc = devm_request_irq(dev, pci_irq_vector(pdev, i+16), | |
1635 | cq_interrupt_v3_hw, 0, | |
1636 | DRV_NAME " cq", cq); | |
1637 | if (rc) { | |
1638 | dev_err(dev, | |
1639 | "could not request cq%d interrupt, rc=%d\n", | |
1640 | i, rc); | |
1641 | rc = -ENOENT; | |
1642 | goto free_cq_irqs; | |
1643 | } | |
1644 | ||
1645 | tasklet_init(t, cq_tasklet_v3_hw, (unsigned long)cq); | |
1646 | } | |
1647 | ||
1648 | return 0; | |
1649 | ||
1650 | free_cq_irqs: | |
1651 | for (k = 0; k < i; k++) { | |
1652 | struct hisi_sas_cq *cq = &hisi_hba->cq[k]; | |
1653 | ||
1654 | free_irq(pci_irq_vector(pdev, k+16), cq); | |
1655 | } | |
1656 | free_irq(pci_irq_vector(pdev, 2), hisi_hba); | |
1657 | free_phy_irq: | |
1658 | free_irq(pci_irq_vector(pdev, 1), hisi_hba); | |
1659 | free_irq_vectors: | |
1660 | pci_free_irq_vectors(pdev); | |
1661 | return rc; | |
1662 | } | |
1663 | ||
1664 | static int hisi_sas_v3_init(struct hisi_hba *hisi_hba) | |
1665 | { | |
1666 | int rc; | |
1667 | ||
1668 | rc = hw_init_v3_hw(hisi_hba); | |
1669 | if (rc) | |
1670 | return rc; | |
1671 | ||
1672 | rc = interrupt_init_v3_hw(hisi_hba); | |
1673 | if (rc) | |
1674 | return rc; | |
1675 | ||
1676 | return 0; | |
1677 | } | |
1678 | ||
1679 | static void phy_set_linkrate_v3_hw(struct hisi_hba *hisi_hba, int phy_no, | |
1680 | struct sas_phy_linkrates *r) | |
1681 | { | |
1682 | u32 prog_phy_link_rate = | |
1683 | hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); | |
1684 | struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; | |
1685 | struct asd_sas_phy *sas_phy = &phy->sas_phy; | |
1686 | int i; | |
1687 | enum sas_linkrate min, max; | |
1688 | u32 rate_mask = 0; | |
1689 | ||
1690 | if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { | |
1691 | max = sas_phy->phy->maximum_linkrate; | |
1692 | min = r->minimum_linkrate; | |
1693 | } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { | |
1694 | max = r->maximum_linkrate; | |
1695 | min = sas_phy->phy->minimum_linkrate; | |
1696 | } else | |
1697 | return; | |
1698 | ||
1699 | sas_phy->phy->maximum_linkrate = max; | |
1700 | sas_phy->phy->minimum_linkrate = min; | |
1701 | ||
1702 | min -= SAS_LINK_RATE_1_5_GBPS; | |
1703 | max -= SAS_LINK_RATE_1_5_GBPS; | |
1704 | ||
1705 | for (i = 0; i <= max; i++) | |
1706 | rate_mask |= 1 << (i * 2); | |
1707 | ||
1708 | prog_phy_link_rate &= ~0xff; | |
1709 | prog_phy_link_rate |= rate_mask; | |
1710 | ||
1711 | hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, | |
1712 | prog_phy_link_rate); | |
1713 | ||
1714 | phy_hard_reset_v3_hw(hisi_hba, phy_no); | |
1715 | } | |
1716 | ||
1717 | static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba) | |
1718 | { | |
1719 | struct pci_dev *pdev = hisi_hba->pci_dev; | |
1720 | int i; | |
1721 | ||
1722 | synchronize_irq(pci_irq_vector(pdev, 1)); | |
1723 | synchronize_irq(pci_irq_vector(pdev, 2)); | |
1724 | synchronize_irq(pci_irq_vector(pdev, 11)); | |
1725 | for (i = 0; i < hisi_hba->queue_count; i++) { | |
1726 | hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1); | |
1727 | synchronize_irq(pci_irq_vector(pdev, i + 16)); | |
1728 | } | |
1729 | ||
1730 | hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff); | |
1731 | hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff); | |
1732 | hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff); | |
1733 | hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff); | |
1734 | ||
1735 | for (i = 0; i < hisi_hba->n_phy; i++) { | |
1736 | hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); | |
1737 | hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffffff); | |
1738 | hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x1); | |
1739 | hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x1); | |
1740 | hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x1); | |
1741 | } | |
1742 | } | |
1743 | ||
1744 | static u32 get_phys_state_v3_hw(struct hisi_hba *hisi_hba) | |
1745 | { | |
1746 | return hisi_sas_read32(hisi_hba, PHY_STATE); | |
1747 | } | |
1748 | ||
1749 | static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no) | |
1750 | { | |
1751 | struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; | |
1752 | struct asd_sas_phy *sas_phy = &phy->sas_phy; | |
1753 | struct sas_phy *sphy = sas_phy->phy; | |
1754 | u32 reg_value; | |
1755 | ||
1756 | /* loss dword sync */ | |
1757 | reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DWS_LOST); | |
1758 | sphy->loss_of_dword_sync_count += reg_value; | |
1759 | ||
1760 | /* phy reset problem */ | |
1761 | reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_RESET_PROB); | |
1762 | sphy->phy_reset_problem_count += reg_value; | |
1763 | ||
1764 | /* invalid dword */ | |
1765 | reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW); | |
1766 | sphy->invalid_dword_count += reg_value; | |
1767 | ||
1768 | /* disparity err */ | |
1769 | reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR); | |
1770 | sphy->running_disparity_error_count += reg_value; | |
1771 | ||
1772 | } | |
1773 | ||
1774 | static int soft_reset_v3_hw(struct hisi_hba *hisi_hba) | |
1775 | { | |
1776 | struct device *dev = hisi_hba->dev; | |
1777 | int rc; | |
1778 | u32 status; | |
1779 | ||
1780 | interrupt_disable_v3_hw(hisi_hba); | |
1781 | hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); | |
1782 | ||
1783 | hisi_sas_stop_phys(hisi_hba); | |
1784 | ||
1785 | mdelay(10); | |
1786 | ||
1787 | hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, 0x1); | |
1788 | ||
1789 | /* wait until bus idle */ | |
1790 | rc = readl_poll_timeout(hisi_hba->regs + AXI_MASTER_CFG_BASE + | |
1791 | AM_CURR_TRANS_RETURN, status, status == 0x3, 10, 100); | |
1792 | if (rc) { | |
1793 | dev_err(dev, "axi bus is not idle, rc = %d\n", rc); | |
1794 | return rc; | |
1795 | } | |
1796 | ||
1797 | hisi_sas_init_mem(hisi_hba); | |
1798 | ||
1799 | return hw_init_v3_hw(hisi_hba); | |
1800 | } | |
1801 | ||
1802 | static const struct hisi_sas_hw hisi_sas_v3_hw = { | |
1803 | .hw_init = hisi_sas_v3_init, | |
1804 | .setup_itct = setup_itct_v3_hw, | |
1805 | .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW, | |
1806 | .get_wideport_bitmap = get_wideport_bitmap_v3_hw, | |
1807 | .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr), | |
1808 | .free_device = free_device_v3_hw, | |
1809 | .sl_notify = sl_notify_v3_hw, | |
1810 | .prep_ssp = prep_ssp_v3_hw, | |
1811 | .prep_smp = prep_smp_v3_hw, | |
1812 | .prep_stp = prep_ata_v3_hw, | |
1813 | .prep_abort = prep_abort_v3_hw, | |
1814 | .get_free_slot = get_free_slot_v3_hw, | |
1815 | .start_delivery = start_delivery_v3_hw, | |
1816 | .slot_complete = slot_complete_v3_hw, | |
1817 | .phys_init = phys_init_v3_hw, | |
1818 | .phy_start = start_phy_v3_hw, | |
1819 | .phy_disable = disable_phy_v3_hw, | |
1820 | .phy_hard_reset = phy_hard_reset_v3_hw, | |
1821 | .phy_get_max_linkrate = phy_get_max_linkrate_v3_hw, | |
1822 | .phy_set_linkrate = phy_set_linkrate_v3_hw, | |
1823 | .dereg_device = dereg_device_v3_hw, | |
1824 | .soft_reset = soft_reset_v3_hw, | |
1825 | .get_phys_state = get_phys_state_v3_hw, | |
1826 | .get_events = phy_get_events_v3_hw, | |
1827 | }; | |
1828 | ||
1829 | static struct Scsi_Host * | |
1830 | hisi_sas_shost_alloc_pci(struct pci_dev *pdev) | |
1831 | { | |
1832 | struct Scsi_Host *shost; | |
1833 | struct hisi_hba *hisi_hba; | |
1834 | struct device *dev = &pdev->dev; | |
1835 | ||
1836 | shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba)); | |
1837 | if (!shost) { | |
1838 | dev_err(dev, "shost alloc failed\n"); | |
1839 | return NULL; | |
1840 | } | |
1841 | hisi_hba = shost_priv(shost); | |
1842 | ||
1843 | hisi_hba->hw = &hisi_sas_v3_hw; | |
1844 | hisi_hba->pci_dev = pdev; | |
1845 | hisi_hba->dev = dev; | |
1846 | hisi_hba->shost = shost; | |
1847 | SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; | |
1848 | ||
1849 | init_timer(&hisi_hba->timer); | |
1850 | ||
1851 | if (hisi_sas_get_fw_info(hisi_hba) < 0) | |
1852 | goto err_out; | |
1853 | ||
1854 | if (hisi_sas_alloc(hisi_hba, shost)) { | |
1855 | hisi_sas_free(hisi_hba); | |
1856 | goto err_out; | |
1857 | } | |
1858 | ||
1859 | return shost; | |
1860 | err_out: | |
1861 | scsi_host_put(shost); | |
1862 | dev_err(dev, "shost alloc failed\n"); | |
1863 | return NULL; | |
1864 | } | |
1865 | ||
1866 | static int | |
1867 | hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |
1868 | { | |
1869 | struct Scsi_Host *shost; | |
1870 | struct hisi_hba *hisi_hba; | |
1871 | struct device *dev = &pdev->dev; | |
1872 | struct asd_sas_phy **arr_phy; | |
1873 | struct asd_sas_port **arr_port; | |
1874 | struct sas_ha_struct *sha; | |
1875 | int rc, phy_nr, port_nr, i; | |
1876 | ||
1877 | rc = pci_enable_device(pdev); | |
1878 | if (rc) | |
1879 | goto err_out; | |
1880 | ||
1881 | pci_set_master(pdev); | |
1882 | ||
1883 | rc = pci_request_regions(pdev, DRV_NAME); | |
1884 | if (rc) | |
1885 | goto err_out_disable_device; | |
1886 | ||
1887 | if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) || | |
1888 | (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) { | |
1889 | if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) || | |
1890 | (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) { | |
1891 | dev_err(dev, "No usable DMA addressing method\n"); | |
1892 | rc = -EIO; | |
1893 | goto err_out_regions; | |
1894 | } | |
1895 | } | |
1896 | ||
1897 | shost = hisi_sas_shost_alloc_pci(pdev); | |
1898 | if (!shost) { | |
1899 | rc = -ENOMEM; | |
1900 | goto err_out_regions; | |
1901 | } | |
1902 | ||
1903 | sha = SHOST_TO_SAS_HA(shost); | |
1904 | hisi_hba = shost_priv(shost); | |
1905 | dev_set_drvdata(dev, sha); | |
1906 | ||
1907 | hisi_hba->regs = pcim_iomap(pdev, 5, 0); | |
1908 | if (!hisi_hba->regs) { | |
1909 | dev_err(dev, "cannot map register.\n"); | |
1910 | rc = -ENOMEM; | |
1911 | goto err_out_ha; | |
1912 | } | |
1913 | ||
1914 | phy_nr = port_nr = hisi_hba->n_phy; | |
1915 | ||
1916 | arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); | |
1917 | arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); | |
1918 | if (!arr_phy || !arr_port) { | |
1919 | rc = -ENOMEM; | |
1920 | goto err_out_ha; | |
1921 | } | |
1922 | ||
1923 | sha->sas_phy = arr_phy; | |
1924 | sha->sas_port = arr_port; | |
1925 | sha->core.shost = shost; | |
1926 | sha->lldd_ha = hisi_hba; | |
1927 | ||
1928 | shost->transportt = hisi_sas_stt; | |
1929 | shost->max_id = HISI_SAS_MAX_DEVICES; | |
1930 | shost->max_lun = ~0; | |
1931 | shost->max_channel = 1; | |
1932 | shost->max_cmd_len = 16; | |
1933 | shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT); | |
1934 | shost->can_queue = hisi_hba->hw->max_command_entries; | |
1935 | shost->cmd_per_lun = hisi_hba->hw->max_command_entries; | |
1936 | ||
1937 | sha->sas_ha_name = DRV_NAME; | |
1938 | sha->dev = dev; | |
1939 | sha->lldd_module = THIS_MODULE; | |
1940 | sha->sas_addr = &hisi_hba->sas_addr[0]; | |
1941 | sha->num_phys = hisi_hba->n_phy; | |
1942 | sha->core.shost = hisi_hba->shost; | |
1943 | ||
1944 | for (i = 0; i < hisi_hba->n_phy; i++) { | |
1945 | sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; | |
1946 | sha->sas_port[i] = &hisi_hba->port[i].sas_port; | |
1947 | } | |
1948 | ||
1949 | hisi_sas_init_add(hisi_hba); | |
1950 | ||
1951 | rc = scsi_add_host(shost, dev); | |
1952 | if (rc) | |
1953 | goto err_out_ha; | |
1954 | ||
1955 | rc = sas_register_ha(sha); | |
1956 | if (rc) | |
1957 | goto err_out_register_ha; | |
1958 | ||
1959 | rc = hisi_hba->hw->hw_init(hisi_hba); | |
1960 | if (rc) | |
1961 | goto err_out_register_ha; | |
1962 | ||
1963 | scsi_scan_host(shost); | |
1964 | ||
1965 | return 0; | |
1966 | ||
1967 | err_out_register_ha: | |
1968 | scsi_remove_host(shost); | |
1969 | err_out_ha: | |
1970 | scsi_host_put(shost); | |
1971 | err_out_regions: | |
1972 | pci_release_regions(pdev); | |
1973 | err_out_disable_device: | |
1974 | pci_disable_device(pdev); | |
1975 | err_out: | |
1976 | return rc; | |
1977 | } | |
1978 | ||
1979 | static void | |
1980 | hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba) | |
1981 | { | |
1982 | int i; | |
1983 | ||
1984 | free_irq(pci_irq_vector(pdev, 1), hisi_hba); | |
1985 | free_irq(pci_irq_vector(pdev, 2), hisi_hba); | |
1986 | for (i = 0; i < hisi_hba->queue_count; i++) { | |
1987 | struct hisi_sas_cq *cq = &hisi_hba->cq[i]; | |
1988 | ||
1989 | free_irq(pci_irq_vector(pdev, i+16), cq); | |
1990 | tasklet_kill(&cq->tasklet); | |
1991 | } | |
1992 | pci_free_irq_vectors(pdev); | |
1993 | } | |
1994 | ||
1995 | static void hisi_sas_v3_remove(struct pci_dev *pdev) | |
1996 | { | |
1997 | struct device *dev = &pdev->dev; | |
1998 | struct sas_ha_struct *sha = dev_get_drvdata(dev); | |
1999 | struct hisi_hba *hisi_hba = sha->lldd_ha; | |
2000 | struct Scsi_Host *shost = sha->core.shost; | |
2001 | ||
2002 | sas_unregister_ha(sha); | |
2003 | sas_remove_host(sha->core.shost); | |
2004 | ||
2005 | hisi_sas_v3_destroy_irqs(pdev, hisi_hba); | |
2006 | pci_release_regions(pdev); | |
2007 | pci_disable_device(pdev); | |
2008 | hisi_sas_free(hisi_hba); | |
2009 | scsi_host_put(shost); | |
2010 | } | |
2011 | ||
2012 | enum { | |
2013 | /* instances of the controller */ | |
2014 | hip08, | |
2015 | }; | |
2016 | ||
2017 | static const struct pci_device_id sas_v3_pci_table[] = { | |
2018 | { PCI_VDEVICE(HUAWEI, 0xa230), hip08 }, | |
2019 | {} | |
2020 | }; | |
2021 | ||
2022 | static struct pci_driver sas_v3_pci_driver = { | |
2023 | .name = DRV_NAME, | |
2024 | .id_table = sas_v3_pci_table, | |
2025 | .probe = hisi_sas_v3_probe, | |
2026 | .remove = hisi_sas_v3_remove, | |
2027 | }; | |
2028 | ||
2029 | module_pci_driver(sas_v3_pci_driver); | |
2030 | ||
2031 | MODULE_LICENSE("GPL"); | |
2032 | MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); | |
2033 | MODULE_DESCRIPTION("HISILICON SAS controller v3 hw driver based on pci device"); | |
2034 | MODULE_ALIAS("platform:" DRV_NAME); |