]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
Merge branch 'linux-4.15' of git://github.com/skeggsb/linux into drm-fixes
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / hisi_sas / hisi_sas_v3_hw.c
1 /*
2 * Copyright (c) 2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 */
10
11 #include "hisi_sas.h"
12 #define DRV_NAME "hisi_sas_v3_hw"
13
14 /* global registers need init*/
15 #define DLVRY_QUEUE_ENABLE 0x0
16 #define IOST_BASE_ADDR_LO 0x8
17 #define IOST_BASE_ADDR_HI 0xc
18 #define ITCT_BASE_ADDR_LO 0x10
19 #define ITCT_BASE_ADDR_HI 0x14
20 #define IO_BROKEN_MSG_ADDR_LO 0x18
21 #define IO_BROKEN_MSG_ADDR_HI 0x1c
22 #define PHY_CONTEXT 0x20
23 #define PHY_STATE 0x24
24 #define PHY_PORT_NUM_MA 0x28
25 #define PHY_CONN_RATE 0x30
26 #define ITCT_CLR 0x44
27 #define ITCT_CLR_EN_OFF 16
28 #define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF)
29 #define ITCT_DEV_OFF 0
30 #define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF)
31 #define IO_SATA_BROKEN_MSG_ADDR_LO 0x58
32 #define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c
33 #define SATA_INITI_D2H_STORE_ADDR_LO 0x60
34 #define SATA_INITI_D2H_STORE_ADDR_HI 0x64
35 #define CFG_MAX_TAG 0x68
36 #define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84
37 #define HGC_SAS_TXFAIL_RETRY_CTRL 0x88
38 #define HGC_GET_ITV_TIME 0x90
39 #define DEVICE_MSG_WORK_MODE 0x94
40 #define OPENA_WT_CONTI_TIME 0x9c
41 #define I_T_NEXUS_LOSS_TIME 0xa0
42 #define MAX_CON_TIME_LIMIT_TIME 0xa4
43 #define BUS_INACTIVE_LIMIT_TIME 0xa8
44 #define REJECT_TO_OPEN_LIMIT_TIME 0xac
45 #define CFG_AGING_TIME 0xbc
46 #define HGC_DFX_CFG2 0xc0
47 #define CFG_ABT_SET_QUERY_IPTT 0xd4
48 #define CFG_SET_ABORTED_IPTT_OFF 0
49 #define CFG_SET_ABORTED_IPTT_MSK (0xfff << CFG_SET_ABORTED_IPTT_OFF)
50 #define CFG_SET_ABORTED_EN_OFF 12
51 #define CFG_ABT_SET_IPTT_DONE 0xd8
52 #define CFG_ABT_SET_IPTT_DONE_OFF 0
53 #define HGC_IOMB_PROC1_STATUS 0x104
54 #define CFG_1US_TIMER_TRSH 0xcc
55 #define CHNL_INT_STATUS 0x148
56 #define HGC_AXI_FIFO_ERR_INFO 0x154
57 #define AXI_ERR_INFO_OFF 0
58 #define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF)
59 #define FIFO_ERR_INFO_OFF 8
60 #define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF)
61 #define INT_COAL_EN 0x19c
62 #define OQ_INT_COAL_TIME 0x1a0
63 #define OQ_INT_COAL_CNT 0x1a4
64 #define ENT_INT_COAL_TIME 0x1a8
65 #define ENT_INT_COAL_CNT 0x1ac
66 #define OQ_INT_SRC 0x1b0
67 #define OQ_INT_SRC_MSK 0x1b4
68 #define ENT_INT_SRC1 0x1b8
69 #define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0
70 #define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF)
71 #define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8
72 #define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF)
73 #define ENT_INT_SRC2 0x1bc
74 #define ENT_INT_SRC3 0x1c0
75 #define ENT_INT_SRC3_WP_DEPTH_OFF 8
76 #define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9
77 #define ENT_INT_SRC3_RP_DEPTH_OFF 10
78 #define ENT_INT_SRC3_AXI_OFF 11
79 #define ENT_INT_SRC3_FIFO_OFF 12
80 #define ENT_INT_SRC3_LM_OFF 14
81 #define ENT_INT_SRC3_ITC_INT_OFF 15
82 #define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF)
83 #define ENT_INT_SRC3_ABT_OFF 16
84 #define ENT_INT_SRC_MSK1 0x1c4
85 #define ENT_INT_SRC_MSK2 0x1c8
86 #define ENT_INT_SRC_MSK3 0x1cc
87 #define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31
88 #define CHNL_PHYUPDOWN_INT_MSK 0x1d0
89 #define CHNL_ENT_INT_MSK 0x1d4
90 #define HGC_COM_INT_MSK 0x1d8
91 #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
92 #define SAS_ECC_INTR 0x1e8
93 #define SAS_ECC_INTR_MSK 0x1ec
94 #define HGC_ERR_STAT_EN 0x238
95 #define DLVRY_Q_0_BASE_ADDR_LO 0x260
96 #define DLVRY_Q_0_BASE_ADDR_HI 0x264
97 #define DLVRY_Q_0_DEPTH 0x268
98 #define DLVRY_Q_0_WR_PTR 0x26c
99 #define DLVRY_Q_0_RD_PTR 0x270
100 #define HYPER_STREAM_ID_EN_CFG 0xc80
101 #define OQ0_INT_SRC_MSK 0xc90
102 #define COMPL_Q_0_BASE_ADDR_LO 0x4e0
103 #define COMPL_Q_0_BASE_ADDR_HI 0x4e4
104 #define COMPL_Q_0_DEPTH 0x4e8
105 #define COMPL_Q_0_WR_PTR 0x4ec
106 #define COMPL_Q_0_RD_PTR 0x4f0
107 #define AWQOS_AWCACHE_CFG 0xc84
108 #define ARQOS_ARCACHE_CFG 0xc88
109
110 /* phy registers requiring init */
111 #define PORT_BASE (0x2000)
112 #define PHY_CFG (PORT_BASE + 0x0)
113 #define HARD_PHY_LINKRATE (PORT_BASE + 0x4)
114 #define PHY_CFG_ENA_OFF 0
115 #define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF)
116 #define PHY_CFG_DC_OPT_OFF 2
117 #define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF)
118 #define PROG_PHY_LINK_RATE (PORT_BASE + 0x8)
119 #define PHY_CTRL (PORT_BASE + 0x14)
120 #define PHY_CTRL_RESET_OFF 0
121 #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
122 #define SL_CFG (PORT_BASE + 0x84)
123 #define SL_CONTROL (PORT_BASE + 0x94)
124 #define SL_CONTROL_NOTIFY_EN_OFF 0
125 #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
126 #define SL_CTA_OFF 17
127 #define SL_CTA_MSK (0x1 << SL_CTA_OFF)
128 #define TX_ID_DWORD0 (PORT_BASE + 0x9c)
129 #define TX_ID_DWORD1 (PORT_BASE + 0xa0)
130 #define TX_ID_DWORD2 (PORT_BASE + 0xa4)
131 #define TX_ID_DWORD3 (PORT_BASE + 0xa8)
132 #define TX_ID_DWORD4 (PORT_BASE + 0xaC)
133 #define TX_ID_DWORD5 (PORT_BASE + 0xb0)
134 #define TX_ID_DWORD6 (PORT_BASE + 0xb4)
135 #define TXID_AUTO (PORT_BASE + 0xb8)
136 #define CT3_OFF 1
137 #define CT3_MSK (0x1 << CT3_OFF)
138 #define TX_HARDRST_OFF 2
139 #define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF)
140 #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
141 #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
142 #define STP_LINK_TIMER (PORT_BASE + 0x120)
143 #define CON_CFG_DRIVER (PORT_BASE + 0x130)
144 #define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134)
145 #define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138)
146 #define SAS_STP_CON_TIMER_CFG (PORT_BASE + 0x13c)
147 #define CHL_INT0 (PORT_BASE + 0x1b4)
148 #define CHL_INT0_HOTPLUG_TOUT_OFF 0
149 #define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF)
150 #define CHL_INT0_SL_RX_BCST_ACK_OFF 1
151 #define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF)
152 #define CHL_INT0_SL_PHY_ENABLE_OFF 2
153 #define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF)
154 #define CHL_INT0_NOT_RDY_OFF 4
155 #define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF)
156 #define CHL_INT0_PHY_RDY_OFF 5
157 #define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF)
158 #define CHL_INT1 (PORT_BASE + 0x1b8)
159 #define CHL_INT1_DMAC_TX_ECC_ERR_OFF 15
160 #define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
161 #define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17
162 #define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
163 #define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19
164 #define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20
165 #define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
166 #define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
167 #define CHL_INT2 (PORT_BASE + 0x1bc)
168 #define CHL_INT0_MSK (PORT_BASE + 0x1c0)
169 #define CHL_INT1_MSK (PORT_BASE + 0x1c4)
170 #define CHL_INT2_MSK (PORT_BASE + 0x1c8)
171 #define CHL_INT_COAL_EN (PORT_BASE + 0x1d0)
172 #define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0)
173 #define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4)
174 #define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8)
175 #define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc)
176 #define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0)
177 #define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4)
178 #define DMA_TX_STATUS (PORT_BASE + 0x2d0)
179 #define DMA_TX_STATUS_BUSY_OFF 0
180 #define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF)
181 #define DMA_RX_STATUS (PORT_BASE + 0x2e8)
182 #define DMA_RX_STATUS_BUSY_OFF 0
183 #define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF)
184 #define ERR_CNT_DWS_LOST (PORT_BASE + 0x380)
185 #define ERR_CNT_RESET_PROB (PORT_BASE + 0x384)
186 #define ERR_CNT_INVLD_DW (PORT_BASE + 0x390)
187 #define ERR_CNT_DISP_ERR (PORT_BASE + 0x398)
188
189 #define DEFAULT_ITCT_HW 2048 /* reset value, not reprogrammed */
190 #if (HISI_SAS_MAX_DEVICES > DEFAULT_ITCT_HW)
191 #error Max ITCT exceeded
192 #endif
193
194 #define AXI_MASTER_CFG_BASE (0x5000)
195 #define AM_CTRL_GLOBAL (0x0)
196 #define AM_CURR_TRANS_RETURN (0x150)
197
198 #define AM_CFG_MAX_TRANS (0x5010)
199 #define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014)
200 #define AXI_CFG (0x5100)
201 #define AM_ROB_ECC_ERR_ADDR (0x510c)
202 #define AM_ROB_ECC_ONEBIT_ERR_ADDR_OFF 0
203 #define AM_ROB_ECC_ONEBIT_ERR_ADDR_MSK (0xff << AM_ROB_ECC_ONEBIT_ERR_ADDR_OFF)
204 #define AM_ROB_ECC_MULBIT_ERR_ADDR_OFF 8
205 #define AM_ROB_ECC_MULBIT_ERR_ADDR_MSK (0xff << AM_ROB_ECC_MULBIT_ERR_ADDR_OFF)
206
207 /* HW dma structures */
208 /* Delivery queue header */
209 /* dw0 */
210 #define CMD_HDR_ABORT_FLAG_OFF 0
211 #define CMD_HDR_ABORT_FLAG_MSK (0x3 << CMD_HDR_ABORT_FLAG_OFF)
212 #define CMD_HDR_ABORT_DEVICE_TYPE_OFF 2
213 #define CMD_HDR_ABORT_DEVICE_TYPE_MSK (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF)
214 #define CMD_HDR_RESP_REPORT_OFF 5
215 #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF)
216 #define CMD_HDR_TLR_CTRL_OFF 6
217 #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF)
218 #define CMD_HDR_PORT_OFF 18
219 #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF)
220 #define CMD_HDR_PRIORITY_OFF 27
221 #define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF)
222 #define CMD_HDR_CMD_OFF 29
223 #define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF)
224 /* dw1 */
225 #define CMD_HDR_UNCON_CMD_OFF 3
226 #define CMD_HDR_DIR_OFF 5
227 #define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF)
228 #define CMD_HDR_RESET_OFF 7
229 #define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF)
230 #define CMD_HDR_VDTL_OFF 10
231 #define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF)
232 #define CMD_HDR_FRAME_TYPE_OFF 11
233 #define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF)
234 #define CMD_HDR_DEV_ID_OFF 16
235 #define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF)
236 /* dw2 */
237 #define CMD_HDR_CFL_OFF 0
238 #define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF)
239 #define CMD_HDR_NCQ_TAG_OFF 10
240 #define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF)
241 #define CMD_HDR_MRFL_OFF 15
242 #define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF)
243 #define CMD_HDR_SG_MOD_OFF 24
244 #define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF)
245 /* dw3 */
246 #define CMD_HDR_IPTT_OFF 0
247 #define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF)
248 /* dw6 */
249 #define CMD_HDR_DIF_SGL_LEN_OFF 0
250 #define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF)
251 #define CMD_HDR_DATA_SGL_LEN_OFF 16
252 #define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF)
253 /* dw7 */
254 #define CMD_HDR_ADDR_MODE_SEL_OFF 15
255 #define CMD_HDR_ADDR_MODE_SEL_MSK (1 << CMD_HDR_ADDR_MODE_SEL_OFF)
256 #define CMD_HDR_ABORT_IPTT_OFF 16
257 #define CMD_HDR_ABORT_IPTT_MSK (0xffff << CMD_HDR_ABORT_IPTT_OFF)
258
259 /* Completion header */
260 /* dw0 */
261 #define CMPLT_HDR_CMPLT_OFF 0
262 #define CMPLT_HDR_CMPLT_MSK (0x3 << CMPLT_HDR_CMPLT_OFF)
263 #define CMPLT_HDR_ERROR_PHASE_OFF 2
264 #define CMPLT_HDR_ERROR_PHASE_MSK (0xff << CMPLT_HDR_ERROR_PHASE_OFF)
265 #define CMPLT_HDR_RSPNS_XFRD_OFF 10
266 #define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
267 #define CMPLT_HDR_ERX_OFF 12
268 #define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF)
269 #define CMPLT_HDR_ABORT_STAT_OFF 13
270 #define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF)
271 /* abort_stat */
272 #define STAT_IO_NOT_VALID 0x1
273 #define STAT_IO_NO_DEVICE 0x2
274 #define STAT_IO_COMPLETE 0x3
275 #define STAT_IO_ABORTED 0x4
276 /* dw1 */
277 #define CMPLT_HDR_IPTT_OFF 0
278 #define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF)
279 #define CMPLT_HDR_DEV_ID_OFF 16
280 #define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF)
281 /* dw3 */
282 #define CMPLT_HDR_IO_IN_TARGET_OFF 17
283 #define CMPLT_HDR_IO_IN_TARGET_MSK (0x1 << CMPLT_HDR_IO_IN_TARGET_OFF)
284
285 /* ITCT header */
286 /* qw0 */
287 #define ITCT_HDR_DEV_TYPE_OFF 0
288 #define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF)
289 #define ITCT_HDR_VALID_OFF 2
290 #define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF)
291 #define ITCT_HDR_MCR_OFF 5
292 #define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF)
293 #define ITCT_HDR_VLN_OFF 9
294 #define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF)
295 #define ITCT_HDR_SMP_TIMEOUT_OFF 16
296 #define ITCT_HDR_AWT_CONTINUE_OFF 25
297 #define ITCT_HDR_PORT_ID_OFF 28
298 #define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF)
299 /* qw2 */
300 #define ITCT_HDR_INLT_OFF 0
301 #define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF)
302 #define ITCT_HDR_RTOLT_OFF 48
303 #define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF)
304
305 struct hisi_sas_complete_v3_hdr {
306 __le32 dw0;
307 __le32 dw1;
308 __le32 act;
309 __le32 dw3;
310 };
311
312 struct hisi_sas_err_record_v3 {
313 /* dw0 */
314 __le32 trans_tx_fail_type;
315
316 /* dw1 */
317 __le32 trans_rx_fail_type;
318
319 /* dw2 */
320 __le16 dma_tx_err_type;
321 __le16 sipc_rx_err_type;
322
323 /* dw3 */
324 __le32 dma_rx_err_type;
325 };
326
327 #define RX_DATA_LEN_UNDERFLOW_OFF 6
328 #define RX_DATA_LEN_UNDERFLOW_MSK (1 << RX_DATA_LEN_UNDERFLOW_OFF)
329
330 #define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096
331 #define HISI_SAS_MSI_COUNT_V3_HW 32
332
333 enum {
334 HISI_SAS_PHY_PHY_UPDOWN,
335 HISI_SAS_PHY_CHNL_INT,
336 HISI_SAS_PHY_INT_NR
337 };
338
339 #define DIR_NO_DATA 0
340 #define DIR_TO_INI 1
341 #define DIR_TO_DEVICE 2
342 #define DIR_RESERVED 3
343
344 #define CMD_IS_UNCONSTRAINT(cmd) \
345 ((cmd == ATA_CMD_READ_LOG_EXT) || \
346 (cmd == ATA_CMD_READ_LOG_DMA_EXT) || \
347 (cmd == ATA_CMD_DEV_RESET))
348
349 static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
350 {
351 void __iomem *regs = hisi_hba->regs + off;
352
353 return readl(regs);
354 }
355
356 static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off)
357 {
358 void __iomem *regs = hisi_hba->regs + off;
359
360 return readl_relaxed(regs);
361 }
362
363 static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val)
364 {
365 void __iomem *regs = hisi_hba->regs + off;
366
367 writel(val, regs);
368 }
369
370 static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no,
371 u32 off, u32 val)
372 {
373 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off;
374
375 writel(val, regs);
376 }
377
378 static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
379 int phy_no, u32 off)
380 {
381 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off;
382
383 return readl(regs);
384 }
385
386 static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
387 {
388 int i;
389
390 /* Global registers init */
391 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
392 (u32)((1ULL << hisi_hba->queue_count) - 1));
393 hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400);
394 hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
395 hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0xd);
396 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
397 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
398 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
399 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff);
400 hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff);
401 hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff);
402 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
403 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
404 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
405 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff);
406 hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
407 hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0);
408 hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0);
409 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x0);
410 hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0);
411 hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0);
412 for (i = 0; i < hisi_hba->queue_count; i++)
413 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
414
415 hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
416 hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE, 0x30000);
417
418 for (i = 0; i < hisi_hba->n_phy; i++) {
419 hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x801);
420 hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
421 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
422 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
423 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
424 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff87ffff);
425 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
426 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
427 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
428 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0);
429 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0);
430 hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0);
431 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0);
432 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199b4fa);
433 hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG,
434 0xa03e8);
435 hisi_sas_phy_write32(hisi_hba, i, SAS_STP_CON_TIMER_CFG,
436 0xa03e8);
437 hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER,
438 0x7f7a120);
439 hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER,
440 0x2a0a80);
441 }
442 for (i = 0; i < hisi_hba->queue_count; i++) {
443 /* Delivery queue */
444 hisi_sas_write32(hisi_hba,
445 DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14),
446 upper_32_bits(hisi_hba->cmd_hdr_dma[i]));
447
448 hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14),
449 lower_32_bits(hisi_hba->cmd_hdr_dma[i]));
450
451 hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14),
452 HISI_SAS_QUEUE_SLOTS);
453
454 /* Completion queue */
455 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14),
456 upper_32_bits(hisi_hba->complete_hdr_dma[i]));
457
458 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14),
459 lower_32_bits(hisi_hba->complete_hdr_dma[i]));
460
461 hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14),
462 HISI_SAS_QUEUE_SLOTS);
463 }
464
465 /* itct */
466 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO,
467 lower_32_bits(hisi_hba->itct_dma));
468
469 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI,
470 upper_32_bits(hisi_hba->itct_dma));
471
472 /* iost */
473 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO,
474 lower_32_bits(hisi_hba->iost_dma));
475
476 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI,
477 upper_32_bits(hisi_hba->iost_dma));
478
479 /* breakpoint */
480 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO,
481 lower_32_bits(hisi_hba->breakpoint_dma));
482
483 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI,
484 upper_32_bits(hisi_hba->breakpoint_dma));
485
486 /* SATA broken msg */
487 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO,
488 lower_32_bits(hisi_hba->sata_breakpoint_dma));
489
490 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI,
491 upper_32_bits(hisi_hba->sata_breakpoint_dma));
492
493 /* SATA initial fis */
494 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO,
495 lower_32_bits(hisi_hba->initial_fis_dma));
496
497 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI,
498 upper_32_bits(hisi_hba->initial_fis_dma));
499 }
500
501 static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
502 {
503 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
504
505 cfg &= ~PHY_CFG_DC_OPT_MSK;
506 cfg |= 1 << PHY_CFG_DC_OPT_OFF;
507 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
508 }
509
510 static void config_id_frame_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
511 {
512 struct sas_identify_frame identify_frame;
513 u32 *identify_buffer;
514
515 memset(&identify_frame, 0, sizeof(identify_frame));
516 identify_frame.dev_type = SAS_END_DEVICE;
517 identify_frame.frame_type = 0;
518 identify_frame._un1 = 1;
519 identify_frame.initiator_bits = SAS_PROTOCOL_ALL;
520 identify_frame.target_bits = SAS_PROTOCOL_NONE;
521 memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE);
522 memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE);
523 identify_frame.phy_id = phy_no;
524 identify_buffer = (u32 *)(&identify_frame);
525
526 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0,
527 __swab32(identify_buffer[0]));
528 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1,
529 __swab32(identify_buffer[1]));
530 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2,
531 __swab32(identify_buffer[2]));
532 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3,
533 __swab32(identify_buffer[3]));
534 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4,
535 __swab32(identify_buffer[4]));
536 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5,
537 __swab32(identify_buffer[5]));
538 }
539
540 static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
541 struct hisi_sas_device *sas_dev)
542 {
543 struct domain_device *device = sas_dev->sas_device;
544 struct device *dev = hisi_hba->dev;
545 u64 qw0, device_id = sas_dev->device_id;
546 struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
547 struct domain_device *parent_dev = device->parent;
548 struct asd_sas_port *sas_port = device->port;
549 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
550
551 memset(itct, 0, sizeof(*itct));
552
553 /* qw0 */
554 qw0 = 0;
555 switch (sas_dev->dev_type) {
556 case SAS_END_DEVICE:
557 case SAS_EDGE_EXPANDER_DEVICE:
558 case SAS_FANOUT_EXPANDER_DEVICE:
559 qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF;
560 break;
561 case SAS_SATA_DEV:
562 case SAS_SATA_PENDING:
563 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
564 qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF;
565 else
566 qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF;
567 break;
568 default:
569 dev_warn(dev, "setup itct: unsupported dev type (%d)\n",
570 sas_dev->dev_type);
571 }
572
573 qw0 |= ((1 << ITCT_HDR_VALID_OFF) |
574 (device->linkrate << ITCT_HDR_MCR_OFF) |
575 (1 << ITCT_HDR_VLN_OFF) |
576 (0xfa << ITCT_HDR_SMP_TIMEOUT_OFF) |
577 (1 << ITCT_HDR_AWT_CONTINUE_OFF) |
578 (port->id << ITCT_HDR_PORT_ID_OFF));
579 itct->qw0 = cpu_to_le64(qw0);
580
581 /* qw1 */
582 memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE);
583 itct->sas_addr = __swab64(itct->sas_addr);
584
585 /* qw2 */
586 if (!dev_is_sata(device))
587 itct->qw2 = cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF) |
588 (0x1ULL << ITCT_HDR_RTOLT_OFF));
589 }
590
591 static void free_device_v3_hw(struct hisi_hba *hisi_hba,
592 struct hisi_sas_device *sas_dev)
593 {
594 DECLARE_COMPLETION_ONSTACK(completion);
595 u64 dev_id = sas_dev->device_id;
596 struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
597 u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
598
599 sas_dev->completion = &completion;
600
601 /* clear the itct interrupt state */
602 if (ENT_INT_SRC3_ITC_INT_MSK & reg_val)
603 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
604 ENT_INT_SRC3_ITC_INT_MSK);
605
606 /* clear the itct table*/
607 reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
608 hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
609
610 wait_for_completion(sas_dev->completion);
611 memset(itct, 0, sizeof(struct hisi_sas_itct));
612 }
613
614 static void dereg_device_v3_hw(struct hisi_hba *hisi_hba,
615 struct domain_device *device)
616 {
617 struct hisi_sas_slot *slot, *slot2;
618 struct hisi_sas_device *sas_dev = device->lldd_dev;
619 u32 cfg_abt_set_query_iptt;
620
621 cfg_abt_set_query_iptt = hisi_sas_read32(hisi_hba,
622 CFG_ABT_SET_QUERY_IPTT);
623 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) {
624 cfg_abt_set_query_iptt &= ~CFG_SET_ABORTED_IPTT_MSK;
625 cfg_abt_set_query_iptt |= (1 << CFG_SET_ABORTED_EN_OFF) |
626 (slot->idx << CFG_SET_ABORTED_IPTT_OFF);
627 hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT,
628 cfg_abt_set_query_iptt);
629 }
630 cfg_abt_set_query_iptt &= ~(1 << CFG_SET_ABORTED_EN_OFF);
631 hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT,
632 cfg_abt_set_query_iptt);
633 hisi_sas_write32(hisi_hba, CFG_ABT_SET_IPTT_DONE,
634 1 << CFG_ABT_SET_IPTT_DONE_OFF);
635 }
636
637 static int reset_hw_v3_hw(struct hisi_hba *hisi_hba)
638 {
639 struct device *dev = hisi_hba->dev;
640 int ret;
641 u32 val;
642
643 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
644
645 /* Disable all of the PHYs */
646 hisi_sas_stop_phys(hisi_hba);
647 udelay(50);
648
649 /* Ensure axi bus idle */
650 ret = readl_poll_timeout(hisi_hba->regs + AXI_CFG, val, !val,
651 20000, 1000000);
652 if (ret) {
653 dev_err(dev, "axi bus is not idle, ret = %d!\n", ret);
654 return -EIO;
655 }
656
657 if (ACPI_HANDLE(dev)) {
658 acpi_status s;
659
660 s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL);
661 if (ACPI_FAILURE(s)) {
662 dev_err(dev, "Reset failed\n");
663 return -EIO;
664 }
665 } else
666 dev_err(dev, "no reset method!\n");
667
668 return 0;
669 }
670
671 static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
672 {
673 struct device *dev = hisi_hba->dev;
674 int rc;
675
676 rc = reset_hw_v3_hw(hisi_hba);
677 if (rc) {
678 dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc);
679 return rc;
680 }
681
682 msleep(100);
683 init_reg_v3_hw(hisi_hba);
684
685 return 0;
686 }
687
688 static void enable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
689 {
690 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
691
692 cfg |= PHY_CFG_ENA_MSK;
693 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
694 }
695
696 static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
697 {
698 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
699
700 cfg &= ~PHY_CFG_ENA_MSK;
701 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
702 }
703
704 static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
705 {
706 config_id_frame_v3_hw(hisi_hba, phy_no);
707 config_phy_opt_mode_v3_hw(hisi_hba, phy_no);
708 enable_phy_v3_hw(hisi_hba, phy_no);
709 }
710
711 static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
712 {
713 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
714 u32 txid_auto;
715
716 disable_phy_v3_hw(hisi_hba, phy_no);
717 if (phy->identify.device_type == SAS_END_DEVICE) {
718 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
719 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
720 txid_auto | TX_HARDRST_MSK);
721 }
722 msleep(100);
723 start_phy_v3_hw(hisi_hba, phy_no);
724 }
725
726 enum sas_linkrate phy_get_max_linkrate_v3_hw(void)
727 {
728 return SAS_LINK_RATE_12_0_GBPS;
729 }
730
731 static void phys_init_v3_hw(struct hisi_hba *hisi_hba)
732 {
733 int i;
734
735 for (i = 0; i < hisi_hba->n_phy; i++) {
736 struct hisi_sas_phy *phy = &hisi_hba->phy[i];
737 struct asd_sas_phy *sas_phy = &phy->sas_phy;
738
739 if (!sas_phy->phy->enabled)
740 continue;
741
742 start_phy_v3_hw(hisi_hba, i);
743 }
744 }
745
746 static void sl_notify_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
747 {
748 u32 sl_control;
749
750 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
751 sl_control |= SL_CONTROL_NOTIFY_EN_MSK;
752 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
753 msleep(1);
754 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
755 sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK;
756 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
757 }
758
759 static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id)
760 {
761 int i, bitmap = 0;
762 u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
763 u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
764
765 for (i = 0; i < hisi_hba->n_phy; i++)
766 if (phy_state & BIT(i))
767 if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id)
768 bitmap |= BIT(i);
769
770 return bitmap;
771 }
772
773 /**
774 * The callpath to this function and upto writing the write
775 * queue pointer should be safe from interruption.
776 */
777 static int
778 get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
779 {
780 struct device *dev = hisi_hba->dev;
781 int queue = dq->id;
782 u32 r, w;
783
784 w = dq->wr_point;
785 r = hisi_sas_read32_relaxed(hisi_hba,
786 DLVRY_Q_0_RD_PTR + (queue * 0x14));
787 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
788 dev_warn(dev, "full queue=%d r=%d w=%d\n\n",
789 queue, r, w);
790 return -EAGAIN;
791 }
792
793 return 0;
794 }
795
796 static void start_delivery_v3_hw(struct hisi_sas_dq *dq)
797 {
798 struct hisi_hba *hisi_hba = dq->hisi_hba;
799 int dlvry_queue = dq->slot_prep->dlvry_queue;
800 int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot;
801
802 dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
803 hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
804 dq->wr_point);
805 }
806
807 static int prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba,
808 struct hisi_sas_slot *slot,
809 struct hisi_sas_cmd_hdr *hdr,
810 struct scatterlist *scatter,
811 int n_elem)
812 {
813 struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot);
814 struct device *dev = hisi_hba->dev;
815 struct scatterlist *sg;
816 int i;
817
818 if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
819 dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
820 n_elem);
821 return -EINVAL;
822 }
823
824 for_each_sg(scatter, sg, n_elem, i) {
825 struct hisi_sas_sge *entry = &sge_page->sge[i];
826
827 entry->addr = cpu_to_le64(sg_dma_address(sg));
828 entry->page_ctrl_0 = entry->page_ctrl_1 = 0;
829 entry->data_len = cpu_to_le32(sg_dma_len(sg));
830 entry->data_off = 0;
831 }
832
833 hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot));
834
835 hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
836
837 return 0;
838 }
839
840 static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
841 struct hisi_sas_slot *slot, int is_tmf,
842 struct hisi_sas_tmf_task *tmf)
843 {
844 struct sas_task *task = slot->task;
845 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
846 struct domain_device *device = task->dev;
847 struct hisi_sas_device *sas_dev = device->lldd_dev;
848 struct hisi_sas_port *port = slot->port;
849 struct sas_ssp_task *ssp_task = &task->ssp_task;
850 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
851 int has_data = 0, rc, priority = is_tmf;
852 u8 *buf_cmd;
853 u32 dw1 = 0, dw2 = 0;
854
855 hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) |
856 (2 << CMD_HDR_TLR_CTRL_OFF) |
857 (port->id << CMD_HDR_PORT_OFF) |
858 (priority << CMD_HDR_PRIORITY_OFF) |
859 (1 << CMD_HDR_CMD_OFF)); /* ssp */
860
861 dw1 = 1 << CMD_HDR_VDTL_OFF;
862 if (is_tmf) {
863 dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF;
864 dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF;
865 } else {
866 dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF;
867 switch (scsi_cmnd->sc_data_direction) {
868 case DMA_TO_DEVICE:
869 has_data = 1;
870 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF;
871 break;
872 case DMA_FROM_DEVICE:
873 has_data = 1;
874 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF;
875 break;
876 default:
877 dw1 &= ~CMD_HDR_DIR_MSK;
878 }
879 }
880
881 /* map itct entry */
882 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
883 hdr->dw1 = cpu_to_le32(dw1);
884
885 dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr)
886 + 3) / 4) << CMD_HDR_CFL_OFF) |
887 ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) |
888 (2 << CMD_HDR_SG_MOD_OFF);
889 hdr->dw2 = cpu_to_le32(dw2);
890 hdr->transfer_tags = cpu_to_le32(slot->idx);
891
892 if (has_data) {
893 rc = prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
894 slot->n_elem);
895 if (rc)
896 return rc;
897 }
898
899 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
900 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
901 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
902
903 buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) +
904 sizeof(struct ssp_frame_hdr);
905
906 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
907 if (!is_tmf) {
908 buf_cmd[9] = ssp_task->task_attr | (ssp_task->task_prio << 3);
909 memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
910 } else {
911 buf_cmd[10] = tmf->tmf;
912 switch (tmf->tmf) {
913 case TMF_ABORT_TASK:
914 case TMF_QUERY_TASK:
915 buf_cmd[12] =
916 (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
917 buf_cmd[13] =
918 tmf->tag_of_task_to_be_managed & 0xff;
919 break;
920 default:
921 break;
922 }
923 }
924
925 return 0;
926 }
927
928 static int prep_smp_v3_hw(struct hisi_hba *hisi_hba,
929 struct hisi_sas_slot *slot)
930 {
931 struct sas_task *task = slot->task;
932 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
933 struct domain_device *device = task->dev;
934 struct device *dev = hisi_hba->dev;
935 struct hisi_sas_port *port = slot->port;
936 struct scatterlist *sg_req, *sg_resp;
937 struct hisi_sas_device *sas_dev = device->lldd_dev;
938 dma_addr_t req_dma_addr;
939 unsigned int req_len, resp_len;
940 int elem, rc;
941
942 /*
943 * DMA-map SMP request, response buffers
944 */
945 /* req */
946 sg_req = &task->smp_task.smp_req;
947 elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE);
948 if (!elem)
949 return -ENOMEM;
950 req_len = sg_dma_len(sg_req);
951 req_dma_addr = sg_dma_address(sg_req);
952
953 /* resp */
954 sg_resp = &task->smp_task.smp_resp;
955 elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE);
956 if (!elem) {
957 rc = -ENOMEM;
958 goto err_out_req;
959 }
960 resp_len = sg_dma_len(sg_resp);
961 if ((req_len & 0x3) || (resp_len & 0x3)) {
962 rc = -EINVAL;
963 goto err_out_resp;
964 }
965
966 /* create header */
967 /* dw0 */
968 hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) |
969 (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */
970 (2 << CMD_HDR_CMD_OFF)); /* smp */
971
972 /* map itct entry */
973 hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) |
974 (1 << CMD_HDR_FRAME_TYPE_OFF) |
975 (DIR_NO_DATA << CMD_HDR_DIR_OFF));
976
977 /* dw2 */
978 hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) |
979 (HISI_SAS_MAX_SMP_RESP_SZ / 4 <<
980 CMD_HDR_MRFL_OFF));
981
982 hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF);
983
984 hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
985 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
986
987 return 0;
988
989 err_out_resp:
990 dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1,
991 DMA_FROM_DEVICE);
992 err_out_req:
993 dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1,
994 DMA_TO_DEVICE);
995 return rc;
996 }
997
998 static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
999 struct hisi_sas_slot *slot)
1000 {
1001 struct sas_task *task = slot->task;
1002 struct domain_device *device = task->dev;
1003 struct domain_device *parent_dev = device->parent;
1004 struct hisi_sas_device *sas_dev = device->lldd_dev;
1005 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
1006 struct asd_sas_port *sas_port = device->port;
1007 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
1008 u8 *buf_cmd;
1009 int has_data = 0, rc = 0, hdr_tag = 0;
1010 u32 dw1 = 0, dw2 = 0;
1011
1012 hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
1013 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
1014 hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF);
1015 else
1016 hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF);
1017
1018 switch (task->data_dir) {
1019 case DMA_TO_DEVICE:
1020 has_data = 1;
1021 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF;
1022 break;
1023 case DMA_FROM_DEVICE:
1024 has_data = 1;
1025 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF;
1026 break;
1027 default:
1028 dw1 &= ~CMD_HDR_DIR_MSK;
1029 }
1030
1031 if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) &&
1032 (task->ata_task.fis.control & ATA_SRST))
1033 dw1 |= 1 << CMD_HDR_RESET_OFF;
1034
1035 dw1 |= (hisi_sas_get_ata_protocol(
1036 task->ata_task.fis.command, task->data_dir))
1037 << CMD_HDR_FRAME_TYPE_OFF;
1038 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
1039
1040 if (CMD_IS_UNCONSTRAINT(task->ata_task.fis.command))
1041 dw1 |= 1 << CMD_HDR_UNCON_CMD_OFF;
1042
1043 hdr->dw1 = cpu_to_le32(dw1);
1044
1045 /* dw2 */
1046 if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) {
1047 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
1048 dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
1049 }
1050
1051 dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF |
1052 2 << CMD_HDR_SG_MOD_OFF;
1053 hdr->dw2 = cpu_to_le32(dw2);
1054
1055 /* dw3 */
1056 hdr->transfer_tags = cpu_to_le32(slot->idx);
1057
1058 if (has_data) {
1059 rc = prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
1060 slot->n_elem);
1061 if (rc)
1062 return rc;
1063 }
1064
1065 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
1066 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
1067 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
1068
1069 buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot);
1070
1071 if (likely(!task->ata_task.device_control_reg_update))
1072 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
1073 /* fill in command FIS */
1074 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
1075
1076 return 0;
1077 }
1078
1079 static int prep_abort_v3_hw(struct hisi_hba *hisi_hba,
1080 struct hisi_sas_slot *slot,
1081 int device_id, int abort_flag, int tag_to_abort)
1082 {
1083 struct sas_task *task = slot->task;
1084 struct domain_device *dev = task->dev;
1085 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
1086 struct hisi_sas_port *port = slot->port;
1087
1088 /* dw0 */
1089 hdr->dw0 = cpu_to_le32((5 << CMD_HDR_CMD_OFF) | /*abort*/
1090 (port->id << CMD_HDR_PORT_OFF) |
1091 ((dev_is_sata(dev) ? 1:0)
1092 << CMD_HDR_ABORT_DEVICE_TYPE_OFF) |
1093 (abort_flag
1094 << CMD_HDR_ABORT_FLAG_OFF));
1095
1096 /* dw1 */
1097 hdr->dw1 = cpu_to_le32(device_id
1098 << CMD_HDR_DEV_ID_OFF);
1099
1100 /* dw7 */
1101 hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
1102 hdr->transfer_tags = cpu_to_le32(slot->idx);
1103
1104 return 0;
1105 }
1106
1107 static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
1108 {
1109 int i, res = 0;
1110 u32 context, port_id, link_rate, hard_phy_linkrate;
1111 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1112 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1113 struct device *dev = hisi_hba->dev;
1114
1115 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
1116
1117 port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
1118 port_id = (port_id >> (4 * phy_no)) & 0xf;
1119 link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
1120 link_rate = (link_rate >> (phy_no * 4)) & 0xf;
1121
1122 if (port_id == 0xf) {
1123 dev_err(dev, "phyup: phy%d invalid portid\n", phy_no);
1124 res = IRQ_NONE;
1125 goto end;
1126 }
1127 sas_phy->linkrate = link_rate;
1128 hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no,
1129 HARD_PHY_LINKRATE);
1130 phy->maximum_linkrate = hard_phy_linkrate & 0xf;
1131 phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf;
1132 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
1133
1134 /* Check for SATA dev */
1135 context = hisi_sas_read32(hisi_hba, PHY_CONTEXT);
1136 if (context & (1 << phy_no)) {
1137 struct hisi_sas_initial_fis *initial_fis;
1138 struct dev_to_host_fis *fis;
1139 u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
1140
1141 dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
1142 initial_fis = &hisi_hba->initial_fis[phy_no];
1143 fis = &initial_fis->fis;
1144 sas_phy->oob_mode = SATA_OOB_MODE;
1145 attached_sas_addr[0] = 0x50;
1146 attached_sas_addr[7] = phy_no;
1147 memcpy(sas_phy->attached_sas_addr,
1148 attached_sas_addr,
1149 SAS_ADDR_SIZE);
1150 memcpy(sas_phy->frame_rcvd, fis,
1151 sizeof(struct dev_to_host_fis));
1152 phy->phy_type |= PORT_TYPE_SATA;
1153 phy->identify.device_type = SAS_SATA_DEV;
1154 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
1155 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
1156 } else {
1157 u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
1158 struct sas_identify_frame *id =
1159 (struct sas_identify_frame *)frame_rcvd;
1160
1161 dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
1162 for (i = 0; i < 6; i++) {
1163 u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no,
1164 RX_IDAF_DWORD0 + (i * 4));
1165 frame_rcvd[i] = __swab32(idaf);
1166 }
1167 sas_phy->oob_mode = SAS_OOB_MODE;
1168 memcpy(sas_phy->attached_sas_addr,
1169 &id->sas_addr,
1170 SAS_ADDR_SIZE);
1171 phy->phy_type |= PORT_TYPE_SAS;
1172 phy->identify.device_type = id->dev_type;
1173 phy->frame_rcvd_size = sizeof(struct sas_identify_frame);
1174 if (phy->identify.device_type == SAS_END_DEVICE)
1175 phy->identify.target_port_protocols =
1176 SAS_PROTOCOL_SSP;
1177 else if (phy->identify.device_type != SAS_PHY_UNUSED)
1178 phy->identify.target_port_protocols =
1179 SAS_PROTOCOL_SMP;
1180 }
1181
1182 phy->port_id = port_id;
1183 phy->phy_attached = 1;
1184 queue_work(hisi_hba->wq, &phy->phyup_ws);
1185
1186 end:
1187 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
1188 CHL_INT0_SL_PHY_ENABLE_MSK);
1189 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0);
1190
1191 return res;
1192 }
1193
1194 static int phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
1195 {
1196 u32 phy_state, sl_ctrl, txid_auto;
1197 struct device *dev = hisi_hba->dev;
1198
1199 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
1200
1201 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
1202 dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state);
1203 hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
1204
1205 sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
1206 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL,
1207 sl_ctrl&(~SL_CTA_MSK));
1208
1209 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
1210 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
1211 txid_auto | CT3_MSK);
1212
1213 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK);
1214 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0);
1215
1216 return 0;
1217 }
1218
1219 static void phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
1220 {
1221 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1222 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1223 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1224
1225 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
1226 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
1227 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
1228 CHL_INT0_SL_RX_BCST_ACK_MSK);
1229 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
1230 }
1231
1232 static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p)
1233 {
1234 struct hisi_hba *hisi_hba = p;
1235 u32 irq_msk;
1236 int phy_no = 0;
1237 irqreturn_t res = IRQ_NONE;
1238
1239 irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS)
1240 & 0x11111111;
1241 while (irq_msk) {
1242 if (irq_msk & 1) {
1243 u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no,
1244 CHL_INT0);
1245 u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
1246 int rdy = phy_state & (1 << phy_no);
1247
1248 if (rdy) {
1249 if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK)
1250 /* phy up */
1251 if (phy_up_v3_hw(phy_no, hisi_hba)
1252 == IRQ_HANDLED)
1253 res = IRQ_HANDLED;
1254 if (irq_value & CHL_INT0_SL_RX_BCST_ACK_MSK)
1255 /* phy bcast */
1256 phy_bcast_v3_hw(phy_no, hisi_hba);
1257 } else {
1258 if (irq_value & CHL_INT0_NOT_RDY_MSK)
1259 /* phy down */
1260 if (phy_down_v3_hw(phy_no, hisi_hba)
1261 == IRQ_HANDLED)
1262 res = IRQ_HANDLED;
1263 }
1264 }
1265 irq_msk >>= 4;
1266 phy_no++;
1267 }
1268
1269 return res;
1270 }
1271
1272 static const struct hisi_sas_hw_error port_axi_error[] = {
1273 {
1274 .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF),
1275 .msg = "dma_tx_axi_wr_err",
1276 },
1277 {
1278 .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF),
1279 .msg = "dma_tx_axi_rd_err",
1280 },
1281 {
1282 .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF),
1283 .msg = "dma_rx_axi_wr_err",
1284 },
1285 {
1286 .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF),
1287 .msg = "dma_rx_axi_rd_err",
1288 },
1289 };
1290
1291 static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
1292 {
1293 struct hisi_hba *hisi_hba = p;
1294 struct device *dev = hisi_hba->dev;
1295 u32 ent_msk, ent_tmp, irq_msk;
1296 int phy_no = 0;
1297
1298 ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
1299 ent_tmp = ent_msk;
1300 ent_msk |= ENT_INT_SRC_MSK3_ENT95_MSK_MSK;
1301 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_msk);
1302
1303 irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS)
1304 & 0xeeeeeeee;
1305
1306 while (irq_msk) {
1307 u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
1308 CHL_INT0);
1309 u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
1310 CHL_INT1);
1311 u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
1312 CHL_INT2);
1313
1314 if ((irq_msk & (4 << (phy_no * 4))) &&
1315 irq_value1) {
1316 int i;
1317
1318 for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) {
1319 const struct hisi_sas_hw_error *error =
1320 &port_axi_error[i];
1321
1322 if (!(irq_value1 & error->irq_msk))
1323 continue;
1324
1325 dev_warn(dev, "%s error (phy%d 0x%x) found!\n",
1326 error->msg, phy_no, irq_value1);
1327 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
1328 }
1329
1330 hisi_sas_phy_write32(hisi_hba, phy_no,
1331 CHL_INT1, irq_value1);
1332 }
1333
1334 if (irq_msk & (8 << (phy_no * 4)) && irq_value2)
1335 hisi_sas_phy_write32(hisi_hba, phy_no,
1336 CHL_INT2, irq_value2);
1337
1338
1339 if (irq_msk & (2 << (phy_no * 4)) && irq_value0) {
1340 hisi_sas_phy_write32(hisi_hba, phy_no,
1341 CHL_INT0, irq_value0
1342 & (~CHL_INT0_SL_RX_BCST_ACK_MSK)
1343 & (~CHL_INT0_SL_PHY_ENABLE_MSK)
1344 & (~CHL_INT0_NOT_RDY_MSK));
1345 }
1346 irq_msk &= ~(0xe << (phy_no * 4));
1347 phy_no++;
1348 }
1349
1350 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_tmp);
1351
1352 return IRQ_HANDLED;
1353 }
1354
1355 static const struct hisi_sas_hw_error axi_error[] = {
1356 { .msk = BIT(0), .msg = "IOST_AXI_W_ERR" },
1357 { .msk = BIT(1), .msg = "IOST_AXI_R_ERR" },
1358 { .msk = BIT(2), .msg = "ITCT_AXI_W_ERR" },
1359 { .msk = BIT(3), .msg = "ITCT_AXI_R_ERR" },
1360 { .msk = BIT(4), .msg = "SATA_AXI_W_ERR" },
1361 { .msk = BIT(5), .msg = "SATA_AXI_R_ERR" },
1362 { .msk = BIT(6), .msg = "DQE_AXI_R_ERR" },
1363 { .msk = BIT(7), .msg = "CQE_AXI_W_ERR" },
1364 {},
1365 };
1366
1367 static const struct hisi_sas_hw_error fifo_error[] = {
1368 { .msk = BIT(8), .msg = "CQE_WINFO_FIFO" },
1369 { .msk = BIT(9), .msg = "CQE_MSG_FIFIO" },
1370 { .msk = BIT(10), .msg = "GETDQE_FIFO" },
1371 { .msk = BIT(11), .msg = "CMDP_FIFO" },
1372 { .msk = BIT(12), .msg = "AWTCTRL_FIFO" },
1373 {},
1374 };
1375
1376 static const struct hisi_sas_hw_error fatal_axi_error[] = {
1377 {
1378 .irq_msk = BIT(ENT_INT_SRC3_WP_DEPTH_OFF),
1379 .msg = "write pointer and depth",
1380 },
1381 {
1382 .irq_msk = BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF),
1383 .msg = "iptt no match slot",
1384 },
1385 {
1386 .irq_msk = BIT(ENT_INT_SRC3_RP_DEPTH_OFF),
1387 .msg = "read pointer and depth",
1388 },
1389 {
1390 .irq_msk = BIT(ENT_INT_SRC3_AXI_OFF),
1391 .reg = HGC_AXI_FIFO_ERR_INFO,
1392 .sub = axi_error,
1393 },
1394 {
1395 .irq_msk = BIT(ENT_INT_SRC3_FIFO_OFF),
1396 .reg = HGC_AXI_FIFO_ERR_INFO,
1397 .sub = fifo_error,
1398 },
1399 {
1400 .irq_msk = BIT(ENT_INT_SRC3_LM_OFF),
1401 .msg = "LM add/fetch list",
1402 },
1403 {
1404 .irq_msk = BIT(ENT_INT_SRC3_ABT_OFF),
1405 .msg = "SAS_HGC_ABT fetch LM list",
1406 },
1407 };
1408
1409 static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
1410 {
1411 u32 irq_value, irq_msk;
1412 struct hisi_hba *hisi_hba = p;
1413 struct device *dev = hisi_hba->dev;
1414 int i;
1415
1416 irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
1417 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0x1df00);
1418
1419 irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
1420
1421 for (i = 0; i < ARRAY_SIZE(fatal_axi_error); i++) {
1422 const struct hisi_sas_hw_error *error = &fatal_axi_error[i];
1423
1424 if (!(irq_value & error->irq_msk))
1425 continue;
1426
1427 if (error->sub) {
1428 const struct hisi_sas_hw_error *sub = error->sub;
1429 u32 err_value = hisi_sas_read32(hisi_hba, error->reg);
1430
1431 for (; sub->msk || sub->msg; sub++) {
1432 if (!(err_value & sub->msk))
1433 continue;
1434
1435 dev_warn(dev, "%s error (0x%x) found!\n",
1436 sub->msg, irq_value);
1437 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
1438 }
1439 } else {
1440 dev_warn(dev, "%s error (0x%x) found!\n",
1441 error->msg, irq_value);
1442 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
1443 }
1444 }
1445
1446 if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) {
1447 u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
1448 u32 dev_id = reg_val & ITCT_DEV_MSK;
1449 struct hisi_sas_device *sas_dev =
1450 &hisi_hba->devices[dev_id];
1451
1452 hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
1453 dev_dbg(dev, "clear ITCT ok\n");
1454 complete(sas_dev->completion);
1455 }
1456
1457 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, irq_value & 0x1df00);
1458 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk);
1459
1460 return IRQ_HANDLED;
1461 }
1462
1463 static void
1464 slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
1465 struct hisi_sas_slot *slot)
1466 {
1467 struct task_status_struct *ts = &task->task_status;
1468 struct hisi_sas_complete_v3_hdr *complete_queue =
1469 hisi_hba->complete_hdr[slot->cmplt_queue];
1470 struct hisi_sas_complete_v3_hdr *complete_hdr =
1471 &complete_queue[slot->cmplt_queue_slot];
1472 struct hisi_sas_err_record_v3 *record =
1473 hisi_sas_status_buf_addr_mem(slot);
1474 u32 dma_rx_err_type = record->dma_rx_err_type;
1475 u32 trans_tx_fail_type = record->trans_tx_fail_type;
1476
1477 switch (task->task_proto) {
1478 case SAS_PROTOCOL_SSP:
1479 if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
1480 ts->residual = trans_tx_fail_type;
1481 ts->stat = SAS_DATA_UNDERRUN;
1482 } else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
1483 ts->stat = SAS_QUEUE_FULL;
1484 slot->abort = 1;
1485 } else {
1486 ts->stat = SAS_OPEN_REJECT;
1487 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1488 }
1489 break;
1490 case SAS_PROTOCOL_SATA:
1491 case SAS_PROTOCOL_STP:
1492 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1493 if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
1494 ts->residual = trans_tx_fail_type;
1495 ts->stat = SAS_DATA_UNDERRUN;
1496 } else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
1497 ts->stat = SAS_PHY_DOWN;
1498 slot->abort = 1;
1499 } else {
1500 ts->stat = SAS_OPEN_REJECT;
1501 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1502 }
1503 hisi_sas_sata_done(task, slot);
1504 break;
1505 case SAS_PROTOCOL_SMP:
1506 ts->stat = SAM_STAT_CHECK_CONDITION;
1507 break;
1508 default:
1509 break;
1510 }
1511 }
1512
1513 static int
1514 slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
1515 {
1516 struct sas_task *task = slot->task;
1517 struct hisi_sas_device *sas_dev;
1518 struct device *dev = hisi_hba->dev;
1519 struct task_status_struct *ts;
1520 struct domain_device *device;
1521 enum exec_status sts;
1522 struct hisi_sas_complete_v3_hdr *complete_queue =
1523 hisi_hba->complete_hdr[slot->cmplt_queue];
1524 struct hisi_sas_complete_v3_hdr *complete_hdr =
1525 &complete_queue[slot->cmplt_queue_slot];
1526 int aborted;
1527 unsigned long flags;
1528
1529 if (unlikely(!task || !task->lldd_task || !task->dev))
1530 return -EINVAL;
1531
1532 ts = &task->task_status;
1533 device = task->dev;
1534 sas_dev = device->lldd_dev;
1535
1536 spin_lock_irqsave(&task->task_state_lock, flags);
1537 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1538 task->task_state_flags &=
1539 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1540 spin_unlock_irqrestore(&task->task_state_lock, flags);
1541
1542 memset(ts, 0, sizeof(*ts));
1543 ts->resp = SAS_TASK_COMPLETE;
1544 if (unlikely(aborted)) {
1545 ts->stat = SAS_ABORTED_TASK;
1546 spin_lock_irqsave(&hisi_hba->lock, flags);
1547 hisi_sas_slot_task_free(hisi_hba, task, slot);
1548 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1549 return -1;
1550 }
1551
1552 if (unlikely(!sas_dev)) {
1553 dev_dbg(dev, "slot complete: port has not device\n");
1554 ts->stat = SAS_PHY_DOWN;
1555 goto out;
1556 }
1557
1558 /*
1559 * Use SAS+TMF status codes
1560 */
1561 switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK)
1562 >> CMPLT_HDR_ABORT_STAT_OFF) {
1563 case STAT_IO_ABORTED:
1564 /* this IO has been aborted by abort command */
1565 ts->stat = SAS_ABORTED_TASK;
1566 goto out;
1567 case STAT_IO_COMPLETE:
1568 /* internal abort command complete */
1569 ts->stat = TMF_RESP_FUNC_SUCC;
1570 goto out;
1571 case STAT_IO_NO_DEVICE:
1572 ts->stat = TMF_RESP_FUNC_COMPLETE;
1573 goto out;
1574 case STAT_IO_NOT_VALID:
1575 /*
1576 * abort single IO, the controller can't find the IO
1577 */
1578 ts->stat = TMF_RESP_FUNC_FAILED;
1579 goto out;
1580 default:
1581 break;
1582 }
1583
1584 /* check for erroneous completion */
1585 if ((complete_hdr->dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) {
1586 slot_err_v3_hw(hisi_hba, task, slot);
1587 if (unlikely(slot->abort))
1588 return ts->stat;
1589 goto out;
1590 }
1591
1592 switch (task->task_proto) {
1593 case SAS_PROTOCOL_SSP: {
1594 struct ssp_response_iu *iu =
1595 hisi_sas_status_buf_addr_mem(slot) +
1596 sizeof(struct hisi_sas_err_record);
1597
1598 sas_ssp_task_response(dev, task, iu);
1599 break;
1600 }
1601 case SAS_PROTOCOL_SMP: {
1602 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1603 void *to;
1604
1605 ts->stat = SAM_STAT_GOOD;
1606 to = kmap_atomic(sg_page(sg_resp));
1607
1608 dma_unmap_sg(dev, &task->smp_task.smp_resp, 1,
1609 DMA_FROM_DEVICE);
1610 dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
1611 DMA_TO_DEVICE);
1612 memcpy(to + sg_resp->offset,
1613 hisi_sas_status_buf_addr_mem(slot) +
1614 sizeof(struct hisi_sas_err_record),
1615 sg_dma_len(sg_resp));
1616 kunmap_atomic(to);
1617 break;
1618 }
1619 case SAS_PROTOCOL_SATA:
1620 case SAS_PROTOCOL_STP:
1621 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1622 ts->stat = SAM_STAT_GOOD;
1623 hisi_sas_sata_done(task, slot);
1624 break;
1625 default:
1626 ts->stat = SAM_STAT_CHECK_CONDITION;
1627 break;
1628 }
1629
1630 if (!slot->port->port_attached) {
1631 dev_err(dev, "slot complete: port %d has removed\n",
1632 slot->port->sas_port.id);
1633 ts->stat = SAS_PHY_DOWN;
1634 }
1635
1636 out:
1637 spin_lock_irqsave(&task->task_state_lock, flags);
1638 task->task_state_flags |= SAS_TASK_STATE_DONE;
1639 spin_unlock_irqrestore(&task->task_state_lock, flags);
1640 spin_lock_irqsave(&hisi_hba->lock, flags);
1641 hisi_sas_slot_task_free(hisi_hba, task, slot);
1642 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1643 sts = ts->stat;
1644
1645 if (task->task_done)
1646 task->task_done(task);
1647
1648 return sts;
1649 }
1650
1651 static void cq_tasklet_v3_hw(unsigned long val)
1652 {
1653 struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val;
1654 struct hisi_hba *hisi_hba = cq->hisi_hba;
1655 struct hisi_sas_slot *slot;
1656 struct hisi_sas_itct *itct;
1657 struct hisi_sas_complete_v3_hdr *complete_queue;
1658 u32 rd_point = cq->rd_point, wr_point, dev_id;
1659 int queue = cq->id;
1660 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
1661
1662 complete_queue = hisi_hba->complete_hdr[queue];
1663
1664 spin_lock(&dq->lock);
1665 wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
1666 (0x14 * queue));
1667
1668 while (rd_point != wr_point) {
1669 struct hisi_sas_complete_v3_hdr *complete_hdr;
1670 int iptt;
1671
1672 complete_hdr = &complete_queue[rd_point];
1673
1674 /* Check for NCQ completion */
1675 if (complete_hdr->act) {
1676 u32 act_tmp = complete_hdr->act;
1677 int ncq_tag_count = ffs(act_tmp);
1678
1679 dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >>
1680 CMPLT_HDR_DEV_ID_OFF;
1681 itct = &hisi_hba->itct[dev_id];
1682
1683 /* The NCQ tags are held in the itct header */
1684 while (ncq_tag_count) {
1685 __le64 *ncq_tag = &itct->qw4_15[0];
1686
1687 ncq_tag_count -= 1;
1688 iptt = (ncq_tag[ncq_tag_count / 5]
1689 >> (ncq_tag_count % 5) * 12) & 0xfff;
1690
1691 slot = &hisi_hba->slot_info[iptt];
1692 slot->cmplt_queue_slot = rd_point;
1693 slot->cmplt_queue = queue;
1694 slot_complete_v3_hw(hisi_hba, slot);
1695
1696 act_tmp &= ~(1 << ncq_tag_count);
1697 ncq_tag_count = ffs(act_tmp);
1698 }
1699 } else {
1700 iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
1701 slot = &hisi_hba->slot_info[iptt];
1702 slot->cmplt_queue_slot = rd_point;
1703 slot->cmplt_queue = queue;
1704 slot_complete_v3_hw(hisi_hba, slot);
1705 }
1706
1707 if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
1708 rd_point = 0;
1709 }
1710
1711 /* update rd_point */
1712 cq->rd_point = rd_point;
1713 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
1714 spin_unlock(&dq->lock);
1715 }
1716
1717 static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p)
1718 {
1719 struct hisi_sas_cq *cq = p;
1720 struct hisi_hba *hisi_hba = cq->hisi_hba;
1721 int queue = cq->id;
1722
1723 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
1724
1725 tasklet_schedule(&cq->tasklet);
1726
1727 return IRQ_HANDLED;
1728 }
1729
1730 static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
1731 {
1732 struct device *dev = hisi_hba->dev;
1733 struct pci_dev *pdev = hisi_hba->pci_dev;
1734 int vectors, rc;
1735 int i, k;
1736 int max_msi = HISI_SAS_MSI_COUNT_V3_HW;
1737
1738 vectors = pci_alloc_irq_vectors(hisi_hba->pci_dev, 1,
1739 max_msi, PCI_IRQ_MSI);
1740 if (vectors < max_msi) {
1741 dev_err(dev, "could not allocate all msi (%d)\n", vectors);
1742 return -ENOENT;
1743 }
1744
1745 rc = devm_request_irq(dev, pci_irq_vector(pdev, 1),
1746 int_phy_up_down_bcast_v3_hw, 0,
1747 DRV_NAME " phy", hisi_hba);
1748 if (rc) {
1749 dev_err(dev, "could not request phy interrupt, rc=%d\n", rc);
1750 rc = -ENOENT;
1751 goto free_irq_vectors;
1752 }
1753
1754 rc = devm_request_irq(dev, pci_irq_vector(pdev, 2),
1755 int_chnl_int_v3_hw, 0,
1756 DRV_NAME " channel", hisi_hba);
1757 if (rc) {
1758 dev_err(dev, "could not request chnl interrupt, rc=%d\n", rc);
1759 rc = -ENOENT;
1760 goto free_phy_irq;
1761 }
1762
1763 rc = devm_request_irq(dev, pci_irq_vector(pdev, 11),
1764 fatal_axi_int_v3_hw, 0,
1765 DRV_NAME " fatal", hisi_hba);
1766 if (rc) {
1767 dev_err(dev, "could not request fatal interrupt, rc=%d\n", rc);
1768 rc = -ENOENT;
1769 goto free_chnl_interrupt;
1770 }
1771
1772 /* Init tasklets for cq only */
1773 for (i = 0; i < hisi_hba->queue_count; i++) {
1774 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1775 struct tasklet_struct *t = &cq->tasklet;
1776
1777 rc = devm_request_irq(dev, pci_irq_vector(pdev, i+16),
1778 cq_interrupt_v3_hw, 0,
1779 DRV_NAME " cq", cq);
1780 if (rc) {
1781 dev_err(dev,
1782 "could not request cq%d interrupt, rc=%d\n",
1783 i, rc);
1784 rc = -ENOENT;
1785 goto free_cq_irqs;
1786 }
1787
1788 tasklet_init(t, cq_tasklet_v3_hw, (unsigned long)cq);
1789 }
1790
1791 return 0;
1792
1793 free_cq_irqs:
1794 for (k = 0; k < i; k++) {
1795 struct hisi_sas_cq *cq = &hisi_hba->cq[k];
1796
1797 free_irq(pci_irq_vector(pdev, k+16), cq);
1798 }
1799 free_irq(pci_irq_vector(pdev, 11), hisi_hba);
1800 free_chnl_interrupt:
1801 free_irq(pci_irq_vector(pdev, 2), hisi_hba);
1802 free_phy_irq:
1803 free_irq(pci_irq_vector(pdev, 1), hisi_hba);
1804 free_irq_vectors:
1805 pci_free_irq_vectors(pdev);
1806 return rc;
1807 }
1808
1809 static int hisi_sas_v3_init(struct hisi_hba *hisi_hba)
1810 {
1811 int rc;
1812
1813 rc = hw_init_v3_hw(hisi_hba);
1814 if (rc)
1815 return rc;
1816
1817 rc = interrupt_init_v3_hw(hisi_hba);
1818 if (rc)
1819 return rc;
1820
1821 return 0;
1822 }
1823
1824 static void phy_set_linkrate_v3_hw(struct hisi_hba *hisi_hba, int phy_no,
1825 struct sas_phy_linkrates *r)
1826 {
1827 u32 prog_phy_link_rate =
1828 hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
1829 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1830 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1831 int i;
1832 enum sas_linkrate min, max;
1833 u32 rate_mask = 0;
1834
1835 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
1836 max = sas_phy->phy->maximum_linkrate;
1837 min = r->minimum_linkrate;
1838 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
1839 max = r->maximum_linkrate;
1840 min = sas_phy->phy->minimum_linkrate;
1841 } else
1842 return;
1843
1844 sas_phy->phy->maximum_linkrate = max;
1845 sas_phy->phy->minimum_linkrate = min;
1846
1847 min -= SAS_LINK_RATE_1_5_GBPS;
1848 max -= SAS_LINK_RATE_1_5_GBPS;
1849
1850 for (i = 0; i <= max; i++)
1851 rate_mask |= 1 << (i * 2);
1852
1853 prog_phy_link_rate &= ~0xff;
1854 prog_phy_link_rate |= rate_mask;
1855
1856 hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
1857 prog_phy_link_rate);
1858
1859 phy_hard_reset_v3_hw(hisi_hba, phy_no);
1860 }
1861
1862 static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba)
1863 {
1864 struct pci_dev *pdev = hisi_hba->pci_dev;
1865 int i;
1866
1867 synchronize_irq(pci_irq_vector(pdev, 1));
1868 synchronize_irq(pci_irq_vector(pdev, 2));
1869 synchronize_irq(pci_irq_vector(pdev, 11));
1870 for (i = 0; i < hisi_hba->queue_count; i++) {
1871 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1);
1872 synchronize_irq(pci_irq_vector(pdev, i + 16));
1873 }
1874
1875 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff);
1876 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff);
1877 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff);
1878 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff);
1879
1880 for (i = 0; i < hisi_hba->n_phy; i++) {
1881 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
1882 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffffff);
1883 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x1);
1884 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x1);
1885 hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x1);
1886 }
1887 }
1888
1889 static u32 get_phys_state_v3_hw(struct hisi_hba *hisi_hba)
1890 {
1891 return hisi_sas_read32(hisi_hba, PHY_STATE);
1892 }
1893
1894 static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
1895 {
1896 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1897 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1898 struct sas_phy *sphy = sas_phy->phy;
1899 u32 reg_value;
1900
1901 /* loss dword sync */
1902 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DWS_LOST);
1903 sphy->loss_of_dword_sync_count += reg_value;
1904
1905 /* phy reset problem */
1906 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_RESET_PROB);
1907 sphy->phy_reset_problem_count += reg_value;
1908
1909 /* invalid dword */
1910 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW);
1911 sphy->invalid_dword_count += reg_value;
1912
1913 /* disparity err */
1914 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR);
1915 sphy->running_disparity_error_count += reg_value;
1916
1917 }
1918
1919 static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
1920 {
1921 struct device *dev = hisi_hba->dev;
1922 int rc;
1923 u32 status;
1924
1925 interrupt_disable_v3_hw(hisi_hba);
1926 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
1927 hisi_sas_kill_tasklets(hisi_hba);
1928
1929 hisi_sas_stop_phys(hisi_hba);
1930
1931 mdelay(10);
1932
1933 hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, 0x1);
1934
1935 /* wait until bus idle */
1936 rc = readl_poll_timeout(hisi_hba->regs + AXI_MASTER_CFG_BASE +
1937 AM_CURR_TRANS_RETURN, status, status == 0x3, 10, 100);
1938 if (rc) {
1939 dev_err(dev, "axi bus is not idle, rc = %d\n", rc);
1940 return rc;
1941 }
1942
1943 hisi_sas_init_mem(hisi_hba);
1944
1945 return hw_init_v3_hw(hisi_hba);
1946 }
1947
1948 static const struct hisi_sas_hw hisi_sas_v3_hw = {
1949 .hw_init = hisi_sas_v3_init,
1950 .setup_itct = setup_itct_v3_hw,
1951 .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW,
1952 .get_wideport_bitmap = get_wideport_bitmap_v3_hw,
1953 .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr),
1954 .free_device = free_device_v3_hw,
1955 .sl_notify = sl_notify_v3_hw,
1956 .prep_ssp = prep_ssp_v3_hw,
1957 .prep_smp = prep_smp_v3_hw,
1958 .prep_stp = prep_ata_v3_hw,
1959 .prep_abort = prep_abort_v3_hw,
1960 .get_free_slot = get_free_slot_v3_hw,
1961 .start_delivery = start_delivery_v3_hw,
1962 .slot_complete = slot_complete_v3_hw,
1963 .phys_init = phys_init_v3_hw,
1964 .phy_start = start_phy_v3_hw,
1965 .phy_disable = disable_phy_v3_hw,
1966 .phy_hard_reset = phy_hard_reset_v3_hw,
1967 .phy_get_max_linkrate = phy_get_max_linkrate_v3_hw,
1968 .phy_set_linkrate = phy_set_linkrate_v3_hw,
1969 .dereg_device = dereg_device_v3_hw,
1970 .soft_reset = soft_reset_v3_hw,
1971 .get_phys_state = get_phys_state_v3_hw,
1972 .get_events = phy_get_events_v3_hw,
1973 };
1974
1975 static struct Scsi_Host *
1976 hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
1977 {
1978 struct Scsi_Host *shost;
1979 struct hisi_hba *hisi_hba;
1980 struct device *dev = &pdev->dev;
1981
1982 shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
1983 if (!shost) {
1984 dev_err(dev, "shost alloc failed\n");
1985 return NULL;
1986 }
1987 hisi_hba = shost_priv(shost);
1988
1989 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
1990 hisi_hba->hw = &hisi_sas_v3_hw;
1991 hisi_hba->pci_dev = pdev;
1992 hisi_hba->dev = dev;
1993 hisi_hba->shost = shost;
1994 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
1995
1996 timer_setup(&hisi_hba->timer, NULL, 0);
1997
1998 if (hisi_sas_get_fw_info(hisi_hba) < 0)
1999 goto err_out;
2000
2001 if (hisi_sas_alloc(hisi_hba, shost)) {
2002 hisi_sas_free(hisi_hba);
2003 goto err_out;
2004 }
2005
2006 return shost;
2007 err_out:
2008 scsi_host_put(shost);
2009 dev_err(dev, "shost alloc failed\n");
2010 return NULL;
2011 }
2012
2013 static int
2014 hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2015 {
2016 struct Scsi_Host *shost;
2017 struct hisi_hba *hisi_hba;
2018 struct device *dev = &pdev->dev;
2019 struct asd_sas_phy **arr_phy;
2020 struct asd_sas_port **arr_port;
2021 struct sas_ha_struct *sha;
2022 int rc, phy_nr, port_nr, i;
2023
2024 rc = pci_enable_device(pdev);
2025 if (rc)
2026 goto err_out;
2027
2028 pci_set_master(pdev);
2029
2030 rc = pci_request_regions(pdev, DRV_NAME);
2031 if (rc)
2032 goto err_out_disable_device;
2033
2034 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) ||
2035 (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) {
2036 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
2037 (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
2038 dev_err(dev, "No usable DMA addressing method\n");
2039 rc = -EIO;
2040 goto err_out_regions;
2041 }
2042 }
2043
2044 shost = hisi_sas_shost_alloc_pci(pdev);
2045 if (!shost) {
2046 rc = -ENOMEM;
2047 goto err_out_regions;
2048 }
2049
2050 sha = SHOST_TO_SAS_HA(shost);
2051 hisi_hba = shost_priv(shost);
2052 dev_set_drvdata(dev, sha);
2053
2054 hisi_hba->regs = pcim_iomap(pdev, 5, 0);
2055 if (!hisi_hba->regs) {
2056 dev_err(dev, "cannot map register.\n");
2057 rc = -ENOMEM;
2058 goto err_out_ha;
2059 }
2060
2061 phy_nr = port_nr = hisi_hba->n_phy;
2062
2063 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2064 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2065 if (!arr_phy || !arr_port) {
2066 rc = -ENOMEM;
2067 goto err_out_ha;
2068 }
2069
2070 sha->sas_phy = arr_phy;
2071 sha->sas_port = arr_port;
2072 sha->core.shost = shost;
2073 sha->lldd_ha = hisi_hba;
2074
2075 shost->transportt = hisi_sas_stt;
2076 shost->max_id = HISI_SAS_MAX_DEVICES;
2077 shost->max_lun = ~0;
2078 shost->max_channel = 1;
2079 shost->max_cmd_len = 16;
2080 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
2081 shost->can_queue = hisi_hba->hw->max_command_entries;
2082 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
2083
2084 sha->sas_ha_name = DRV_NAME;
2085 sha->dev = dev;
2086 sha->lldd_module = THIS_MODULE;
2087 sha->sas_addr = &hisi_hba->sas_addr[0];
2088 sha->num_phys = hisi_hba->n_phy;
2089 sha->core.shost = hisi_hba->shost;
2090
2091 for (i = 0; i < hisi_hba->n_phy; i++) {
2092 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2093 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2094 }
2095
2096 hisi_sas_init_add(hisi_hba);
2097
2098 rc = scsi_add_host(shost, dev);
2099 if (rc)
2100 goto err_out_ha;
2101
2102 rc = sas_register_ha(sha);
2103 if (rc)
2104 goto err_out_register_ha;
2105
2106 rc = hisi_hba->hw->hw_init(hisi_hba);
2107 if (rc)
2108 goto err_out_register_ha;
2109
2110 scsi_scan_host(shost);
2111
2112 return 0;
2113
2114 err_out_register_ha:
2115 scsi_remove_host(shost);
2116 err_out_ha:
2117 scsi_host_put(shost);
2118 err_out_regions:
2119 pci_release_regions(pdev);
2120 err_out_disable_device:
2121 pci_disable_device(pdev);
2122 err_out:
2123 return rc;
2124 }
2125
2126 static void
2127 hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
2128 {
2129 int i;
2130
2131 free_irq(pci_irq_vector(pdev, 1), hisi_hba);
2132 free_irq(pci_irq_vector(pdev, 2), hisi_hba);
2133 free_irq(pci_irq_vector(pdev, 11), hisi_hba);
2134 for (i = 0; i < hisi_hba->queue_count; i++) {
2135 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2136
2137 free_irq(pci_irq_vector(pdev, i+16), cq);
2138 }
2139 pci_free_irq_vectors(pdev);
2140 }
2141
2142 static void hisi_sas_v3_remove(struct pci_dev *pdev)
2143 {
2144 struct device *dev = &pdev->dev;
2145 struct sas_ha_struct *sha = dev_get_drvdata(dev);
2146 struct hisi_hba *hisi_hba = sha->lldd_ha;
2147 struct Scsi_Host *shost = sha->core.shost;
2148
2149 sas_unregister_ha(sha);
2150 sas_remove_host(sha->core.shost);
2151
2152 hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
2153 hisi_sas_kill_tasklets(hisi_hba);
2154 pci_release_regions(pdev);
2155 pci_disable_device(pdev);
2156 hisi_sas_free(hisi_hba);
2157 scsi_host_put(shost);
2158 }
2159
2160 enum {
2161 /* instances of the controller */
2162 hip08,
2163 };
2164
2165 static const struct pci_device_id sas_v3_pci_table[] = {
2166 { PCI_VDEVICE(HUAWEI, 0xa230), hip08 },
2167 {}
2168 };
2169
2170 static struct pci_driver sas_v3_pci_driver = {
2171 .name = DRV_NAME,
2172 .id_table = sas_v3_pci_table,
2173 .probe = hisi_sas_v3_probe,
2174 .remove = hisi_sas_v3_remove,
2175 };
2176
2177 module_pci_driver(sas_v3_pci_driver);
2178
2179 MODULE_LICENSE("GPL");
2180 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2181 MODULE_DESCRIPTION("HISILICON SAS controller v3 hw driver based on pci device");
2182 MODULE_ALIAS("platform:" DRV_NAME);