]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
scsi: hisi_sas: add v3 code to send ATA frame
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / hisi_sas / hisi_sas_v3_hw.c
CommitLineData
92f61e3b
JG
1/*
2 * Copyright (c) 2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 */
10
11#include "hisi_sas.h"
12#define DRV_NAME "hisi_sas_v3_hw"
13
c94d8ca2
XC
14/* global registers need init*/
15#define DLVRY_QUEUE_ENABLE 0x0
16#define IOST_BASE_ADDR_LO 0x8
17#define IOST_BASE_ADDR_HI 0xc
18#define ITCT_BASE_ADDR_LO 0x10
19#define ITCT_BASE_ADDR_HI 0x14
20#define IO_BROKEN_MSG_ADDR_LO 0x18
21#define IO_BROKEN_MSG_ADDR_HI 0x1c
3975f605
XC
22#define PHY_CONTEXT 0x20
23#define PHY_STATE 0x24
24#define PHY_PORT_NUM_MA 0x28
25#define PHY_CONN_RATE 0x30
c94d8ca2
XC
26#define AXI_AHB_CLK_CFG 0x3c
27#define AXI_USER1 0x48
28#define AXI_USER2 0x4c
29#define IO_SATA_BROKEN_MSG_ADDR_LO 0x58
30#define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c
31#define SATA_INITI_D2H_STORE_ADDR_LO 0x60
32#define SATA_INITI_D2H_STORE_ADDR_HI 0x64
33#define CFG_MAX_TAG 0x68
34#define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84
35#define HGC_SAS_TXFAIL_RETRY_CTRL 0x88
36#define HGC_GET_ITV_TIME 0x90
37#define DEVICE_MSG_WORK_MODE 0x94
38#define OPENA_WT_CONTI_TIME 0x9c
39#define I_T_NEXUS_LOSS_TIME 0xa0
40#define MAX_CON_TIME_LIMIT_TIME 0xa4
41#define BUS_INACTIVE_LIMIT_TIME 0xa8
42#define REJECT_TO_OPEN_LIMIT_TIME 0xac
43#define CFG_AGING_TIME 0xbc
44#define HGC_DFX_CFG2 0xc0
45#define CFG_ABT_SET_QUERY_IPTT 0xd4
46#define CFG_SET_ABORTED_IPTT_OFF 0
47#define CFG_SET_ABORTED_IPTT_MSK (0xfff << CFG_SET_ABORTED_IPTT_OFF)
48#define CFG_1US_TIMER_TRSH 0xcc
3975f605 49#define CHNL_INT_STATUS 0x148
c94d8ca2
XC
50#define INT_COAL_EN 0x19c
51#define OQ_INT_COAL_TIME 0x1a0
52#define OQ_INT_COAL_CNT 0x1a4
53#define ENT_INT_COAL_TIME 0x1a8
54#define ENT_INT_COAL_CNT 0x1ac
55#define OQ_INT_SRC 0x1b0
56#define OQ_INT_SRC_MSK 0x1b4
57#define ENT_INT_SRC1 0x1b8
58#define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0
59#define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF)
60#define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8
61#define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF)
62#define ENT_INT_SRC2 0x1bc
63#define ENT_INT_SRC3 0x1c0
64#define ENT_INT_SRC3_WP_DEPTH_OFF 8
65#define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9
66#define ENT_INT_SRC3_RP_DEPTH_OFF 10
67#define ENT_INT_SRC3_AXI_OFF 11
68#define ENT_INT_SRC3_FIFO_OFF 12
69#define ENT_INT_SRC3_LM_OFF 14
70#define ENT_INT_SRC3_ITC_INT_OFF 15
71#define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF)
72#define ENT_INT_SRC3_ABT_OFF 16
73#define ENT_INT_SRC_MSK1 0x1c4
74#define ENT_INT_SRC_MSK2 0x1c8
75#define ENT_INT_SRC_MSK3 0x1cc
3975f605 76#define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31
c94d8ca2
XC
77#define CHNL_PHYUPDOWN_INT_MSK 0x1d0
78#define CHNL_ENT_INT_MSK 0x1d4
79#define HGC_COM_INT_MSK 0x1d8
3975f605 80#define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
c94d8ca2
XC
81#define SAS_ECC_INTR 0x1e8
82#define SAS_ECC_INTR_MSK 0x1ec
83#define HGC_ERR_STAT_EN 0x238
84#define DLVRY_Q_0_BASE_ADDR_LO 0x260
85#define DLVRY_Q_0_BASE_ADDR_HI 0x264
86#define DLVRY_Q_0_DEPTH 0x268
87#define DLVRY_Q_0_WR_PTR 0x26c
88#define DLVRY_Q_0_RD_PTR 0x270
89#define HYPER_STREAM_ID_EN_CFG 0xc80
90#define OQ0_INT_SRC_MSK 0xc90
91#define COMPL_Q_0_BASE_ADDR_LO 0x4e0
92#define COMPL_Q_0_BASE_ADDR_HI 0x4e4
93#define COMPL_Q_0_DEPTH 0x4e8
94#define COMPL_Q_0_WR_PTR 0x4ec
95#define COMPL_Q_0_RD_PTR 0x4f0
96#define AWQOS_AWCACHE_CFG 0xc84
97#define ARQOS_ARCACHE_CFG 0xc88
98
99/* phy registers requiring init */
100#define PORT_BASE (0x2000)
3975f605
XC
101#define PHY_CFG (PORT_BASE + 0x0)
102#define HARD_PHY_LINKRATE (PORT_BASE + 0x4)
103#define PHY_CFG_ENA_OFF 0
104#define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF)
105#define PHY_CFG_DC_OPT_OFF 2
106#define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF)
c94d8ca2
XC
107#define PROG_PHY_LINK_RATE (PORT_BASE + 0x8)
108#define PHY_CTRL (PORT_BASE + 0x14)
109#define PHY_CTRL_RESET_OFF 0
110#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
111#define SL_CFG (PORT_BASE + 0x84)
3975f605
XC
112#define SL_CONTROL (PORT_BASE + 0x94)
113#define SL_CONTROL_NOTIFY_EN_OFF 0
114#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
115#define SL_CTA_OFF 17
116#define SL_CTA_MSK (0x1 << SL_CTA_OFF)
117#define TX_ID_DWORD0 (PORT_BASE + 0x9c)
118#define TX_ID_DWORD1 (PORT_BASE + 0xa0)
119#define TX_ID_DWORD2 (PORT_BASE + 0xa4)
120#define TX_ID_DWORD3 (PORT_BASE + 0xa8)
121#define TX_ID_DWORD4 (PORT_BASE + 0xaC)
122#define TX_ID_DWORD5 (PORT_BASE + 0xb0)
123#define TX_ID_DWORD6 (PORT_BASE + 0xb4)
124#define TXID_AUTO (PORT_BASE + 0xb8)
125#define CT3_OFF 1
126#define CT3_MSK (0x1 << CT3_OFF)
127#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
c94d8ca2
XC
128#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
129#define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134)
130#define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138)
131#define SAS_STP_CON_TIMER_CFG (PORT_BASE + 0x13c)
132#define CHL_INT0 (PORT_BASE + 0x1b4)
133#define CHL_INT0_HOTPLUG_TOUT_OFF 0
134#define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF)
135#define CHL_INT0_SL_RX_BCST_ACK_OFF 1
136#define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF)
137#define CHL_INT0_SL_PHY_ENABLE_OFF 2
138#define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF)
139#define CHL_INT0_NOT_RDY_OFF 4
140#define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF)
141#define CHL_INT0_PHY_RDY_OFF 5
142#define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF)
143#define CHL_INT1 (PORT_BASE + 0x1b8)
144#define CHL_INT1_DMAC_TX_ECC_ERR_OFF 15
145#define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
146#define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17
147#define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
148#define CHL_INT2 (PORT_BASE + 0x1bc)
149#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
150#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
151#define CHL_INT2_MSK (PORT_BASE + 0x1c8)
152#define CHL_INT_COAL_EN (PORT_BASE + 0x1d0)
153#define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0)
154#define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4)
155#define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8)
156#define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc)
157#define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0)
158#define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4)
159
a2204723
XC
160/* HW dma structures */
161/* Delivery queue header */
162/* dw0 */
163#define CMD_HDR_RESP_REPORT_OFF 5
164#define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF)
165#define CMD_HDR_TLR_CTRL_OFF 6
166#define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF)
167#define CMD_HDR_PORT_OFF 18
168#define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF)
169#define CMD_HDR_PRIORITY_OFF 27
170#define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF)
171#define CMD_HDR_CMD_OFF 29
172#define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF)
173/* dw1 */
ce60689e 174#define CMD_HDR_UNCON_CMD_OFF 3
a2204723
XC
175#define CMD_HDR_DIR_OFF 5
176#define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF)
ce60689e
XC
177#define CMD_HDR_RESET_OFF 7
178#define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF)
a2204723
XC
179#define CMD_HDR_VDTL_OFF 10
180#define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF)
181#define CMD_HDR_FRAME_TYPE_OFF 11
182#define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF)
183#define CMD_HDR_DEV_ID_OFF 16
184#define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF)
185/* dw2 */
186#define CMD_HDR_CFL_OFF 0
187#define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF)
ce60689e
XC
188#define CMD_HDR_NCQ_TAG_OFF 10
189#define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF)
a2204723
XC
190#define CMD_HDR_MRFL_OFF 15
191#define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF)
192#define CMD_HDR_SG_MOD_OFF 24
193#define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF)
fa913de2
XC
194/* dw3 */
195#define CMD_HDR_IPTT_OFF 0
196#define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF)
a2204723
XC
197/* dw6 */
198#define CMD_HDR_DIF_SGL_LEN_OFF 0
199#define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF)
200#define CMD_HDR_DATA_SGL_LEN_OFF 16
201#define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF)
202
60b4a5ee
XC
203/* Completion header */
204/* dw0 */
205#define CMPLT_HDR_CMPLT_OFF 0
206#define CMPLT_HDR_CMPLT_MSK (0x3 << CMPLT_HDR_CMPLT_OFF)
207#define CMPLT_HDR_ERROR_PHASE_OFF 2
208#define CMPLT_HDR_ERROR_PHASE_MSK (0xff << CMPLT_HDR_ERROR_PHASE_OFF)
209#define CMPLT_HDR_RSPNS_XFRD_OFF 10
210#define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
211#define CMPLT_HDR_ERX_OFF 12
212#define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF)
213#define CMPLT_HDR_ABORT_STAT_OFF 13
214#define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF)
215/* abort_stat */
216#define STAT_IO_NOT_VALID 0x1
217#define STAT_IO_NO_DEVICE 0x2
218#define STAT_IO_COMPLETE 0x3
219#define STAT_IO_ABORTED 0x4
220/* dw1 */
221#define CMPLT_HDR_IPTT_OFF 0
222#define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF)
223#define CMPLT_HDR_DEV_ID_OFF 16
224#define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF)
225/* dw3 */
226#define CMPLT_HDR_IO_IN_TARGET_OFF 17
227#define CMPLT_HDR_IO_IN_TARGET_MSK (0x1 << CMPLT_HDR_IO_IN_TARGET_OFF)
228
c94d8ca2
XC
229struct hisi_sas_complete_v3_hdr {
230 __le32 dw0;
231 __le32 dw1;
232 __le32 act;
233 __le32 dw3;
234};
235
60b4a5ee
XC
236struct hisi_sas_err_record_v3 {
237 /* dw0 */
238 __le32 trans_tx_fail_type;
239
240 /* dw1 */
241 __le32 trans_rx_fail_type;
242
243 /* dw2 */
244 __le16 dma_tx_err_type;
245 __le16 sipc_rx_err_type;
246
247 /* dw3 */
248 __le32 dma_rx_err_type;
249};
250
251#define RX_DATA_LEN_UNDERFLOW_OFF 6
252#define RX_DATA_LEN_UNDERFLOW_MSK (1 << RX_DATA_LEN_UNDERFLOW_OFF)
253
c94d8ca2 254#define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096
3975f605
XC
255#define HISI_SAS_MSI_COUNT_V3_HW 32
256
257enum {
258 HISI_SAS_PHY_PHY_UPDOWN,
259 HISI_SAS_PHY_CHNL_INT,
260 HISI_SAS_PHY_INT_NR
261};
c94d8ca2 262
a2204723
XC
263#define DIR_NO_DATA 0
264#define DIR_TO_INI 1
265#define DIR_TO_DEVICE 2
266#define DIR_RESERVED 3
267
ce60689e
XC
268#define CMD_IS_UNCONSTRAINT(cmd) \
269 ((cmd == ATA_CMD_READ_LOG_EXT) || \
270 (cmd == ATA_CMD_READ_LOG_DMA_EXT) || \
271 (cmd == ATA_CMD_DEV_RESET))
272
54edeee1
XC
273static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
274{
275 void __iomem *regs = hisi_hba->regs + off;
276
277 return readl(regs);
278}
279
a2204723
XC
280static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off)
281{
282 void __iomem *regs = hisi_hba->regs + off;
283
284 return readl_relaxed(regs);
285}
286
c94d8ca2
XC
287static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val)
288{
289 void __iomem *regs = hisi_hba->regs + off;
290
291 writel(val, regs);
292}
293
294static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no,
295 u32 off, u32 val)
296{
297 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off;
298
299 writel(val, regs);
300}
301
3975f605
XC
302static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
303 int phy_no, u32 off)
304{
305 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off;
306
307 return readl(regs);
308}
309
c94d8ca2
XC
310static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
311{
312 int i;
313
314 /* Global registers init */
315 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
316 (u32)((1ULL << hisi_hba->queue_count) - 1));
317 hisi_sas_write32(hisi_hba, AXI_USER1, 0x0);
318 hisi_sas_write32(hisi_hba, AXI_USER2, 0x40000060);
319 hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
320 hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0xd);
321 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
322 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
323 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
324 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff);
325 hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff);
326 hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff);
327 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
328 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
329 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
330 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff);
331 hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
332 hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0);
333 hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0);
334 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30);
335 hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0);
336 hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0);
337 for (i = 0; i < hisi_hba->queue_count; i++)
338 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
339
340 hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1);
341 hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
342 hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff07fff);
343
344 for (i = 0; i < hisi_hba->n_phy; i++) {
345 hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x801);
346 hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
347 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
348 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
349 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
350 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
351 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
352 hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x83f801fc);
353 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
354 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
355 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0);
356 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0);
357 hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0);
358 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0);
359 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199b4fa);
360 hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG,
361 0xa0064);
362 hisi_sas_phy_write32(hisi_hba, i, SAS_STP_CON_TIMER_CFG,
363 0xa0064);
364 }
365 for (i = 0; i < hisi_hba->queue_count; i++) {
366 /* Delivery queue */
367 hisi_sas_write32(hisi_hba,
368 DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14),
369 upper_32_bits(hisi_hba->cmd_hdr_dma[i]));
370
371 hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14),
372 lower_32_bits(hisi_hba->cmd_hdr_dma[i]));
373
374 hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14),
375 HISI_SAS_QUEUE_SLOTS);
376
377 /* Completion queue */
378 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14),
379 upper_32_bits(hisi_hba->complete_hdr_dma[i]));
380
381 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14),
382 lower_32_bits(hisi_hba->complete_hdr_dma[i]));
383
384 hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14),
385 HISI_SAS_QUEUE_SLOTS);
386 }
387
388 /* itct */
389 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO,
390 lower_32_bits(hisi_hba->itct_dma));
391
392 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI,
393 upper_32_bits(hisi_hba->itct_dma));
394
395 /* iost */
396 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO,
397 lower_32_bits(hisi_hba->iost_dma));
398
399 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI,
400 upper_32_bits(hisi_hba->iost_dma));
401
402 /* breakpoint */
403 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO,
404 lower_32_bits(hisi_hba->breakpoint_dma));
405
406 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI,
407 upper_32_bits(hisi_hba->breakpoint_dma));
408
409 /* SATA broken msg */
410 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO,
411 lower_32_bits(hisi_hba->sata_breakpoint_dma));
412
413 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI,
414 upper_32_bits(hisi_hba->sata_breakpoint_dma));
415
416 /* SATA initial fis */
417 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO,
418 lower_32_bits(hisi_hba->initial_fis_dma));
419
420 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI,
421 upper_32_bits(hisi_hba->initial_fis_dma));
422}
423
3975f605
XC
424static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
425{
426 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
427
428 cfg &= ~PHY_CFG_DC_OPT_MSK;
429 cfg |= 1 << PHY_CFG_DC_OPT_OFF;
430 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
431}
432
433static void config_id_frame_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
434{
435 struct sas_identify_frame identify_frame;
436 u32 *identify_buffer;
437
438 memset(&identify_frame, 0, sizeof(identify_frame));
439 identify_frame.dev_type = SAS_END_DEVICE;
440 identify_frame.frame_type = 0;
441 identify_frame._un1 = 1;
442 identify_frame.initiator_bits = SAS_PROTOCOL_ALL;
443 identify_frame.target_bits = SAS_PROTOCOL_NONE;
444 memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE);
445 memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE);
446 identify_frame.phy_id = phy_no;
447 identify_buffer = (u32 *)(&identify_frame);
448
449 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0,
450 __swab32(identify_buffer[0]));
451 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1,
452 __swab32(identify_buffer[1]));
453 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2,
454 __swab32(identify_buffer[2]));
455 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3,
456 __swab32(identify_buffer[3]));
457 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4,
458 __swab32(identify_buffer[4]));
459 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5,
460 __swab32(identify_buffer[5]));
461}
462
c94d8ca2
XC
463static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
464{
465 init_reg_v3_hw(hisi_hba);
466
467 return 0;
468}
469
3975f605
XC
470static void enable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
471{
472 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
473
474 cfg |= PHY_CFG_ENA_MSK;
475 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
476}
477
478static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
479{
480 config_id_frame_v3_hw(hisi_hba, phy_no);
481 config_phy_opt_mode_v3_hw(hisi_hba, phy_no);
482 enable_phy_v3_hw(hisi_hba, phy_no);
483}
484
485static void start_phys_v3_hw(struct hisi_hba *hisi_hba)
486{
487 int i;
488
489 for (i = 0; i < hisi_hba->n_phy; i++)
490 start_phy_v3_hw(hisi_hba, i);
491}
492
493static void phys_init_v3_hw(struct hisi_hba *hisi_hba)
494{
495 start_phys_v3_hw(hisi_hba);
496}
497
498static void sl_notify_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
499{
500 u32 sl_control;
501
502 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
503 sl_control |= SL_CONTROL_NOTIFY_EN_MSK;
504 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
505 msleep(1);
506 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
507 sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK;
508 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
509}
510
a2204723
XC
511/**
512 * The callpath to this function and upto writing the write
513 * queue pointer should be safe from interruption.
514 */
515static int
516get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
517{
518 struct device *dev = hisi_hba->dev;
519 int queue = dq->id;
520 u32 r, w;
521
522 w = dq->wr_point;
523 r = hisi_sas_read32_relaxed(hisi_hba,
524 DLVRY_Q_0_RD_PTR + (queue * 0x14));
525 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
526 dev_warn(dev, "full queue=%d r=%d w=%d\n\n",
527 queue, r, w);
528 return -EAGAIN;
529 }
530
531 return 0;
532}
533
534static void start_delivery_v3_hw(struct hisi_sas_dq *dq)
535{
536 struct hisi_hba *hisi_hba = dq->hisi_hba;
537 int dlvry_queue = dq->slot_prep->dlvry_queue;
538 int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot;
539
540 dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
541 hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
542 dq->wr_point);
543}
544
545static int prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba,
546 struct hisi_sas_slot *slot,
547 struct hisi_sas_cmd_hdr *hdr,
548 struct scatterlist *scatter,
549 int n_elem)
550{
551 struct device *dev = hisi_hba->dev;
552 struct scatterlist *sg;
553 int i;
554
555 if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
556 dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
557 n_elem);
558 return -EINVAL;
559 }
560
561 slot->sge_page = dma_pool_alloc(hisi_hba->sge_page_pool, GFP_ATOMIC,
562 &slot->sge_page_dma);
563 if (!slot->sge_page)
564 return -ENOMEM;
565
566 for_each_sg(scatter, sg, n_elem, i) {
567 struct hisi_sas_sge *entry = &slot->sge_page->sge[i];
568
569 entry->addr = cpu_to_le64(sg_dma_address(sg));
570 entry->page_ctrl_0 = entry->page_ctrl_1 = 0;
571 entry->data_len = cpu_to_le32(sg_dma_len(sg));
572 entry->data_off = 0;
573 }
574
575 hdr->prd_table_addr = cpu_to_le64(slot->sge_page_dma);
576 hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
577
578 return 0;
579}
580
581static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
582 struct hisi_sas_slot *slot, int is_tmf,
583 struct hisi_sas_tmf_task *tmf)
584{
585 struct sas_task *task = slot->task;
586 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
587 struct domain_device *device = task->dev;
588 struct hisi_sas_device *sas_dev = device->lldd_dev;
589 struct hisi_sas_port *port = slot->port;
590 struct sas_ssp_task *ssp_task = &task->ssp_task;
591 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
592 int has_data = 0, rc, priority = is_tmf;
593 u8 *buf_cmd;
594 u32 dw1 = 0, dw2 = 0;
595
596 hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) |
597 (2 << CMD_HDR_TLR_CTRL_OFF) |
598 (port->id << CMD_HDR_PORT_OFF) |
599 (priority << CMD_HDR_PRIORITY_OFF) |
600 (1 << CMD_HDR_CMD_OFF)); /* ssp */
601
602 dw1 = 1 << CMD_HDR_VDTL_OFF;
603 if (is_tmf) {
604 dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF;
605 dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF;
606 } else {
607 dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF;
608 switch (scsi_cmnd->sc_data_direction) {
609 case DMA_TO_DEVICE:
610 has_data = 1;
611 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF;
612 break;
613 case DMA_FROM_DEVICE:
614 has_data = 1;
615 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF;
616 break;
617 default:
618 dw1 &= ~CMD_HDR_DIR_MSK;
619 }
620 }
621
622 /* map itct entry */
623 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
624 hdr->dw1 = cpu_to_le32(dw1);
625
626 dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr)
627 + 3) / 4) << CMD_HDR_CFL_OFF) |
628 ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) |
629 (2 << CMD_HDR_SG_MOD_OFF);
630 hdr->dw2 = cpu_to_le32(dw2);
631 hdr->transfer_tags = cpu_to_le32(slot->idx);
632
633 if (has_data) {
634 rc = prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
635 slot->n_elem);
636 if (rc)
637 return rc;
638 }
639
640 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
641 hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma);
642 hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
643
644 buf_cmd = slot->command_table + sizeof(struct ssp_frame_hdr);
645 memcpy(buf_cmd, ssp_task->LUN, 8);
646
647 if (!is_tmf) {
648 buf_cmd[9] = ssp_task->task_attr | (ssp_task->task_prio << 3);
649 memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
650 } else {
651 buf_cmd[10] = tmf->tmf;
652 switch (tmf->tmf) {
653 case TMF_ABORT_TASK:
654 case TMF_QUERY_TASK:
655 buf_cmd[12] =
656 (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
657 buf_cmd[13] =
658 tmf->tag_of_task_to_be_managed & 0xff;
659 break;
660 default:
661 break;
662 }
663 }
664
665 return 0;
666}
667
fa913de2
XC
668static int prep_smp_v3_hw(struct hisi_hba *hisi_hba,
669 struct hisi_sas_slot *slot)
670{
671 struct sas_task *task = slot->task;
672 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
673 struct domain_device *device = task->dev;
674 struct device *dev = hisi_hba->dev;
675 struct hisi_sas_port *port = slot->port;
676 struct scatterlist *sg_req, *sg_resp;
677 struct hisi_sas_device *sas_dev = device->lldd_dev;
678 dma_addr_t req_dma_addr;
679 unsigned int req_len, resp_len;
680 int elem, rc;
681
682 /*
683 * DMA-map SMP request, response buffers
684 */
685 /* req */
686 sg_req = &task->smp_task.smp_req;
687 elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE);
688 if (!elem)
689 return -ENOMEM;
690 req_len = sg_dma_len(sg_req);
691 req_dma_addr = sg_dma_address(sg_req);
692
693 /* resp */
694 sg_resp = &task->smp_task.smp_resp;
695 elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE);
696 if (!elem) {
697 rc = -ENOMEM;
698 goto err_out_req;
699 }
700 resp_len = sg_dma_len(sg_resp);
701 if ((req_len & 0x3) || (resp_len & 0x3)) {
702 rc = -EINVAL;
703 goto err_out_resp;
704 }
705
706 /* create header */
707 /* dw0 */
708 hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) |
709 (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */
710 (2 << CMD_HDR_CMD_OFF)); /* smp */
711
712 /* map itct entry */
713 hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) |
714 (1 << CMD_HDR_FRAME_TYPE_OFF) |
715 (DIR_NO_DATA << CMD_HDR_DIR_OFF));
716
717 /* dw2 */
718 hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) |
719 (HISI_SAS_MAX_SMP_RESP_SZ / 4 <<
720 CMD_HDR_MRFL_OFF));
721
722 hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF);
723
724 hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
725 hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
726
727 return 0;
728
729err_out_resp:
730 dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1,
731 DMA_FROM_DEVICE);
732err_out_req:
733 dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1,
734 DMA_TO_DEVICE);
735 return rc;
736}
737
ce60689e
XC
738static int get_ncq_tag_v3_hw(struct sas_task *task, u32 *tag)
739{
740 struct ata_queued_cmd *qc = task->uldd_task;
741
742 if (qc) {
743 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
744 qc->tf.command == ATA_CMD_FPDMA_READ) {
745 *tag = qc->tag;
746 return 1;
747 }
748 }
749 return 0;
750}
751
752static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
753 struct hisi_sas_slot *slot)
754{
755 struct sas_task *task = slot->task;
756 struct domain_device *device = task->dev;
757 struct domain_device *parent_dev = device->parent;
758 struct hisi_sas_device *sas_dev = device->lldd_dev;
759 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
760 struct asd_sas_port *sas_port = device->port;
761 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
762 u8 *buf_cmd;
763 int has_data = 0, rc = 0, hdr_tag = 0;
764 u32 dw1 = 0, dw2 = 0;
765
766 hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
767 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
768 hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF);
769 else
770 hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF);
771
772 switch (task->data_dir) {
773 case DMA_TO_DEVICE:
774 has_data = 1;
775 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF;
776 break;
777 case DMA_FROM_DEVICE:
778 has_data = 1;
779 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF;
780 break;
781 default:
782 dw1 &= ~CMD_HDR_DIR_MSK;
783 }
784
785 if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) &&
786 (task->ata_task.fis.control & ATA_SRST))
787 dw1 |= 1 << CMD_HDR_RESET_OFF;
788
789 dw1 |= (hisi_sas_get_ata_protocol(
790 task->ata_task.fis.command, task->data_dir))
791 << CMD_HDR_FRAME_TYPE_OFF;
792 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
793
794 if (CMD_IS_UNCONSTRAINT(task->ata_task.fis.command))
795 dw1 |= 1 << CMD_HDR_UNCON_CMD_OFF;
796
797 hdr->dw1 = cpu_to_le32(dw1);
798
799 /* dw2 */
800 if (task->ata_task.use_ncq && get_ncq_tag_v3_hw(task, &hdr_tag)) {
801 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
802 dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
803 }
804
805 dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF |
806 2 << CMD_HDR_SG_MOD_OFF;
807 hdr->dw2 = cpu_to_le32(dw2);
808
809 /* dw3 */
810 hdr->transfer_tags = cpu_to_le32(slot->idx);
811
812 if (has_data) {
813 rc = prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
814 slot->n_elem);
815 if (rc)
816 return rc;
817 }
818
819 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
820 hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma);
821 hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
822
823 buf_cmd = slot->command_table;
824
825 if (likely(!task->ata_task.device_control_reg_update))
826 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
827 /* fill in command FIS */
828 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
829
830 return 0;
831}
832
54edeee1
XC
833static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
834{
835 int i, res = 0;
836 u32 context, port_id, link_rate, hard_phy_linkrate;
837 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
838 struct asd_sas_phy *sas_phy = &phy->sas_phy;
839 struct device *dev = hisi_hba->dev;
840
841 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
842
843 port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
844 port_id = (port_id >> (4 * phy_no)) & 0xf;
845 link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
846 link_rate = (link_rate >> (phy_no * 4)) & 0xf;
847
848 if (port_id == 0xf) {
849 dev_err(dev, "phyup: phy%d invalid portid\n", phy_no);
850 res = IRQ_NONE;
851 goto end;
852 }
853 sas_phy->linkrate = link_rate;
854 hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no,
855 HARD_PHY_LINKRATE);
856 phy->maximum_linkrate = hard_phy_linkrate & 0xf;
857 phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf;
858 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
859
860 /* Check for SATA dev */
861 context = hisi_sas_read32(hisi_hba, PHY_CONTEXT);
862 if (context & (1 << phy_no)) {
863 struct hisi_sas_initial_fis *initial_fis;
864 struct dev_to_host_fis *fis;
865 u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
866
867 dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
868 initial_fis = &hisi_hba->initial_fis[phy_no];
869 fis = &initial_fis->fis;
870 sas_phy->oob_mode = SATA_OOB_MODE;
871 attached_sas_addr[0] = 0x50;
872 attached_sas_addr[7] = phy_no;
873 memcpy(sas_phy->attached_sas_addr,
874 attached_sas_addr,
875 SAS_ADDR_SIZE);
876 memcpy(sas_phy->frame_rcvd, fis,
877 sizeof(struct dev_to_host_fis));
878 phy->phy_type |= PORT_TYPE_SATA;
879 phy->identify.device_type = SAS_SATA_DEV;
880 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
881 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
882 } else {
883 u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
884 struct sas_identify_frame *id =
885 (struct sas_identify_frame *)frame_rcvd;
886
887 dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
888 for (i = 0; i < 6; i++) {
889 u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no,
890 RX_IDAF_DWORD0 + (i * 4));
891 frame_rcvd[i] = __swab32(idaf);
892 }
893 sas_phy->oob_mode = SAS_OOB_MODE;
894 memcpy(sas_phy->attached_sas_addr,
895 &id->sas_addr,
896 SAS_ADDR_SIZE);
897 phy->phy_type |= PORT_TYPE_SAS;
898 phy->identify.device_type = id->dev_type;
899 phy->frame_rcvd_size = sizeof(struct sas_identify_frame);
900 if (phy->identify.device_type == SAS_END_DEVICE)
901 phy->identify.target_port_protocols =
902 SAS_PROTOCOL_SSP;
903 else if (phy->identify.device_type != SAS_PHY_UNUSED)
904 phy->identify.target_port_protocols =
905 SAS_PROTOCOL_SMP;
906 }
907
908 phy->port_id = port_id;
909 phy->phy_attached = 1;
910 queue_work(hisi_hba->wq, &phy->phyup_ws);
911
912end:
913 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
914 CHL_INT0_SL_PHY_ENABLE_MSK);
915 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0);
916
917 return res;
918}
919
920static int phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
921{
922 int res = 0;
923 u32 phy_state, sl_ctrl, txid_auto;
924 struct device *dev = hisi_hba->dev;
925
926 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
927
928 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
929 dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state);
930 hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
931
932 sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
933 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL,
934 sl_ctrl&(~SL_CTA_MSK));
935
936 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
937 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
938 txid_auto | CT3_MSK);
939
940 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK);
941 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0);
942
943 return res;
944}
945
946static void phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
947{
948 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
949 struct asd_sas_phy *sas_phy = &phy->sas_phy;
950 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
951
952 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
953 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
954 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
955 CHL_INT0_SL_RX_BCST_ACK_MSK);
956 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
957}
958
959static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p)
960{
961 struct hisi_hba *hisi_hba = p;
962 u32 irq_msk;
963 int phy_no = 0;
964 irqreturn_t res = IRQ_NONE;
965
966 irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS)
967 & 0x11111111;
968 while (irq_msk) {
969 if (irq_msk & 1) {
970 u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no,
971 CHL_INT0);
972 u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
973 int rdy = phy_state & (1 << phy_no);
974
975 if (rdy) {
976 if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK)
977 /* phy up */
978 if (phy_up_v3_hw(phy_no, hisi_hba)
979 == IRQ_HANDLED)
980 res = IRQ_HANDLED;
981 if (irq_value & CHL_INT0_SL_RX_BCST_ACK_MSK)
982 /* phy bcast */
983 phy_bcast_v3_hw(phy_no, hisi_hba);
984 } else {
985 if (irq_value & CHL_INT0_NOT_RDY_MSK)
986 /* phy down */
987 if (phy_down_v3_hw(phy_no, hisi_hba)
988 == IRQ_HANDLED)
989 res = IRQ_HANDLED;
990 }
991 }
992 irq_msk >>= 4;
993 phy_no++;
994 }
995
996 return res;
997}
998
999static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
1000{
1001 struct hisi_hba *hisi_hba = p;
1002 struct device *dev = hisi_hba->dev;
1003 u32 ent_msk, ent_tmp, irq_msk;
1004 int phy_no = 0;
1005
1006 ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
1007 ent_tmp = ent_msk;
1008 ent_msk |= ENT_INT_SRC_MSK3_ENT95_MSK_MSK;
1009 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_msk);
1010
1011 irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS)
1012 & 0xeeeeeeee;
1013
1014 while (irq_msk) {
1015 u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
1016 CHL_INT0);
1017 u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
1018 CHL_INT1);
1019 u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
1020 CHL_INT2);
1021
1022 if ((irq_msk & (4 << (phy_no * 4))) &&
1023 irq_value1) {
1024 if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
1025 CHL_INT1_DMAC_TX_ECC_ERR_MSK))
1026 panic("%s: DMAC RX/TX ecc bad error! (0x%x)",
1027 dev_name(dev), irq_value1);
1028
1029 hisi_sas_phy_write32(hisi_hba, phy_no,
1030 CHL_INT1, irq_value1);
1031 }
1032
1033 if (irq_msk & (8 << (phy_no * 4)) && irq_value2)
1034 hisi_sas_phy_write32(hisi_hba, phy_no,
1035 CHL_INT2, irq_value2);
1036
1037
1038 if (irq_msk & (2 << (phy_no * 4)) && irq_value0) {
1039 hisi_sas_phy_write32(hisi_hba, phy_no,
1040 CHL_INT0, irq_value0
1041 & (~CHL_INT0_HOTPLUG_TOUT_MSK)
1042 & (~CHL_INT0_SL_PHY_ENABLE_MSK)
1043 & (~CHL_INT0_NOT_RDY_MSK));
1044 }
1045 irq_msk &= ~(0xe << (phy_no * 4));
1046 phy_no++;
1047 }
1048
1049 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_tmp);
1050
1051 return IRQ_HANDLED;
1052}
1053
60b4a5ee
XC
1054static void
1055slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
1056 struct hisi_sas_slot *slot)
1057{
1058 struct task_status_struct *ts = &task->task_status;
1059 struct hisi_sas_complete_v3_hdr *complete_queue =
1060 hisi_hba->complete_hdr[slot->cmplt_queue];
1061 struct hisi_sas_complete_v3_hdr *complete_hdr =
1062 &complete_queue[slot->cmplt_queue_slot];
1063 struct hisi_sas_err_record_v3 *record = slot->status_buffer;
1064 u32 dma_rx_err_type = record->dma_rx_err_type;
1065 u32 trans_tx_fail_type = record->trans_tx_fail_type;
1066
1067 switch (task->task_proto) {
1068 case SAS_PROTOCOL_SSP:
1069 if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
1070 ts->residual = trans_tx_fail_type;
1071 ts->stat = SAS_DATA_UNDERRUN;
1072 } else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
1073 ts->stat = SAS_QUEUE_FULL;
1074 slot->abort = 1;
1075 } else {
1076 ts->stat = SAS_OPEN_REJECT;
1077 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1078 }
1079 break;
1080 case SAS_PROTOCOL_SATA:
1081 case SAS_PROTOCOL_STP:
1082 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1083 if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
1084 ts->residual = trans_tx_fail_type;
1085 ts->stat = SAS_DATA_UNDERRUN;
1086 } else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
1087 ts->stat = SAS_PHY_DOWN;
1088 slot->abort = 1;
1089 } else {
1090 ts->stat = SAS_OPEN_REJECT;
1091 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1092 }
1093 hisi_sas_sata_done(task, slot);
1094 break;
1095 case SAS_PROTOCOL_SMP:
1096 ts->stat = SAM_STAT_CHECK_CONDITION;
1097 break;
1098 default:
1099 break;
1100 }
1101}
1102
1103static int
1104slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
1105{
1106 struct sas_task *task = slot->task;
1107 struct hisi_sas_device *sas_dev;
1108 struct device *dev = hisi_hba->dev;
1109 struct task_status_struct *ts;
1110 struct domain_device *device;
1111 enum exec_status sts;
1112 struct hisi_sas_complete_v3_hdr *complete_queue =
1113 hisi_hba->complete_hdr[slot->cmplt_queue];
1114 struct hisi_sas_complete_v3_hdr *complete_hdr =
1115 &complete_queue[slot->cmplt_queue_slot];
1116 int aborted;
1117 unsigned long flags;
1118
1119 if (unlikely(!task || !task->lldd_task || !task->dev))
1120 return -EINVAL;
1121
1122 ts = &task->task_status;
1123 device = task->dev;
1124 sas_dev = device->lldd_dev;
1125
1126 spin_lock_irqsave(&task->task_state_lock, flags);
1127 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1128 task->task_state_flags &=
1129 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1130 spin_unlock_irqrestore(&task->task_state_lock, flags);
1131
1132 memset(ts, 0, sizeof(*ts));
1133 ts->resp = SAS_TASK_COMPLETE;
1134 if (unlikely(aborted)) {
1135 ts->stat = SAS_ABORTED_TASK;
1136 hisi_sas_slot_task_free(hisi_hba, task, slot);
1137 return -1;
1138 }
1139
1140 if (unlikely(!sas_dev)) {
1141 dev_dbg(dev, "slot complete: port has not device\n");
1142 ts->stat = SAS_PHY_DOWN;
1143 goto out;
1144 }
1145
1146 /*
1147 * Use SAS+TMF status codes
1148 */
1149 switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK)
1150 >> CMPLT_HDR_ABORT_STAT_OFF) {
1151 case STAT_IO_ABORTED:
1152 /* this IO has been aborted by abort command */
1153 ts->stat = SAS_ABORTED_TASK;
1154 goto out;
1155 case STAT_IO_COMPLETE:
1156 /* internal abort command complete */
1157 ts->stat = TMF_RESP_FUNC_SUCC;
1158 goto out;
1159 case STAT_IO_NO_DEVICE:
1160 ts->stat = TMF_RESP_FUNC_COMPLETE;
1161 goto out;
1162 case STAT_IO_NOT_VALID:
1163 /*
1164 * abort single IO, the controller can't find the IO
1165 */
1166 ts->stat = TMF_RESP_FUNC_FAILED;
1167 goto out;
1168 default:
1169 break;
1170 }
1171
1172 /* check for erroneous completion */
1173 if ((complete_hdr->dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) {
1174 slot_err_v3_hw(hisi_hba, task, slot);
1175 if (unlikely(slot->abort))
1176 return ts->stat;
1177 goto out;
1178 }
1179
1180 switch (task->task_proto) {
1181 case SAS_PROTOCOL_SSP: {
1182 struct ssp_response_iu *iu = slot->status_buffer +
1183 sizeof(struct hisi_sas_err_record);
1184
1185 sas_ssp_task_response(dev, task, iu);
1186 break;
1187 }
1188 case SAS_PROTOCOL_SMP: {
1189 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1190 void *to;
1191
1192 ts->stat = SAM_STAT_GOOD;
1193 to = kmap_atomic(sg_page(sg_resp));
1194
1195 dma_unmap_sg(dev, &task->smp_task.smp_resp, 1,
1196 DMA_FROM_DEVICE);
1197 dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
1198 DMA_TO_DEVICE);
1199 memcpy(to + sg_resp->offset,
1200 slot->status_buffer +
1201 sizeof(struct hisi_sas_err_record),
1202 sg_dma_len(sg_resp));
1203 kunmap_atomic(to);
1204 break;
1205 }
1206 case SAS_PROTOCOL_SATA:
1207 case SAS_PROTOCOL_STP:
1208 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1209 ts->stat = SAM_STAT_GOOD;
1210 hisi_sas_sata_done(task, slot);
1211 break;
1212 default:
1213 ts->stat = SAM_STAT_CHECK_CONDITION;
1214 break;
1215 }
1216
1217 if (!slot->port->port_attached) {
1218 dev_err(dev, "slot complete: port %d has removed\n",
1219 slot->port->sas_port.id);
1220 ts->stat = SAS_PHY_DOWN;
1221 }
1222
1223out:
1224 spin_lock_irqsave(&task->task_state_lock, flags);
1225 task->task_state_flags |= SAS_TASK_STATE_DONE;
1226 spin_unlock_irqrestore(&task->task_state_lock, flags);
1227 spin_lock_irqsave(&hisi_hba->lock, flags);
1228 hisi_sas_slot_task_free(hisi_hba, task, slot);
1229 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1230 sts = ts->stat;
1231
1232 if (task->task_done)
1233 task->task_done(task);
1234
1235 return sts;
1236}
1237
1238static void cq_tasklet_v3_hw(unsigned long val)
1239{
1240 struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val;
1241 struct hisi_hba *hisi_hba = cq->hisi_hba;
1242 struct hisi_sas_slot *slot;
1243 struct hisi_sas_itct *itct;
1244 struct hisi_sas_complete_v3_hdr *complete_queue;
1245 u32 rd_point = cq->rd_point, wr_point, dev_id;
1246 int queue = cq->id;
1247 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
1248
1249 complete_queue = hisi_hba->complete_hdr[queue];
1250
1251 spin_lock(&dq->lock);
1252 wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
1253 (0x14 * queue));
1254
1255 while (rd_point != wr_point) {
1256 struct hisi_sas_complete_v3_hdr *complete_hdr;
1257 int iptt;
1258
1259 complete_hdr = &complete_queue[rd_point];
1260
1261 /* Check for NCQ completion */
1262 if (complete_hdr->act) {
1263 u32 act_tmp = complete_hdr->act;
1264 int ncq_tag_count = ffs(act_tmp);
1265
1266 dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >>
1267 CMPLT_HDR_DEV_ID_OFF;
1268 itct = &hisi_hba->itct[dev_id];
1269
1270 /* The NCQ tags are held in the itct header */
1271 while (ncq_tag_count) {
1272 __le64 *ncq_tag = &itct->qw4_15[0];
1273
1274 ncq_tag_count -= 1;
1275 iptt = (ncq_tag[ncq_tag_count / 5]
1276 >> (ncq_tag_count % 5) * 12) & 0xfff;
1277
1278 slot = &hisi_hba->slot_info[iptt];
1279 slot->cmplt_queue_slot = rd_point;
1280 slot->cmplt_queue = queue;
1281 slot_complete_v3_hw(hisi_hba, slot);
1282
1283 act_tmp &= ~(1 << ncq_tag_count);
1284 ncq_tag_count = ffs(act_tmp);
1285 }
1286 } else {
1287 iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
1288 slot = &hisi_hba->slot_info[iptt];
1289 slot->cmplt_queue_slot = rd_point;
1290 slot->cmplt_queue = queue;
1291 slot_complete_v3_hw(hisi_hba, slot);
1292 }
1293
1294 if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
1295 rd_point = 0;
1296 }
1297
1298 /* update rd_point */
1299 cq->rd_point = rd_point;
1300 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
1301 spin_unlock(&dq->lock);
1302}
1303
1304static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p)
1305{
1306 struct hisi_sas_cq *cq = p;
1307 struct hisi_hba *hisi_hba = cq->hisi_hba;
1308 int queue = cq->id;
1309
1310 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
1311
1312 tasklet_schedule(&cq->tasklet);
1313
1314 return IRQ_HANDLED;
1315}
1316
54edeee1
XC
1317static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
1318{
1319 struct device *dev = hisi_hba->dev;
1320 struct pci_dev *pdev = hisi_hba->pci_dev;
1321 int vectors, rc;
60b4a5ee 1322 int i, k;
54edeee1
XC
1323 int max_msi = HISI_SAS_MSI_COUNT_V3_HW;
1324
1325 vectors = pci_alloc_irq_vectors(hisi_hba->pci_dev, 1,
1326 max_msi, PCI_IRQ_MSI);
1327 if (vectors < max_msi) {
1328 dev_err(dev, "could not allocate all msi (%d)\n", vectors);
1329 return -ENOENT;
1330 }
1331
1332 rc = devm_request_irq(dev, pci_irq_vector(pdev, 1),
1333 int_phy_up_down_bcast_v3_hw, 0,
1334 DRV_NAME " phy", hisi_hba);
1335 if (rc) {
1336 dev_err(dev, "could not request phy interrupt, rc=%d\n", rc);
1337 rc = -ENOENT;
1338 goto free_irq_vectors;
1339 }
1340
1341 rc = devm_request_irq(dev, pci_irq_vector(pdev, 2),
1342 int_chnl_int_v3_hw, 0,
1343 DRV_NAME " channel", hisi_hba);
1344 if (rc) {
1345 dev_err(dev, "could not request chnl interrupt, rc=%d\n", rc);
1346 rc = -ENOENT;
1347 goto free_phy_irq;
1348 }
1349
60b4a5ee
XC
1350 /* Init tasklets for cq only */
1351 for (i = 0; i < hisi_hba->queue_count; i++) {
1352 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1353 struct tasklet_struct *t = &cq->tasklet;
1354
1355 rc = devm_request_irq(dev, pci_irq_vector(pdev, i+16),
1356 cq_interrupt_v3_hw, 0,
1357 DRV_NAME " cq", cq);
1358 if (rc) {
1359 dev_err(dev,
1360 "could not request cq%d interrupt, rc=%d\n",
1361 i, rc);
1362 rc = -ENOENT;
1363 goto free_cq_irqs;
1364 }
1365
1366 tasklet_init(t, cq_tasklet_v3_hw, (unsigned long)cq);
1367 }
54edeee1
XC
1368
1369 return 0;
1370
60b4a5ee
XC
1371free_cq_irqs:
1372 for (k = 0; k < i; k++) {
1373 struct hisi_sas_cq *cq = &hisi_hba->cq[k];
1374
1375 free_irq(pci_irq_vector(pdev, k+16), cq);
1376 }
1377 free_irq(pci_irq_vector(pdev, 2), hisi_hba);
54edeee1
XC
1378free_phy_irq:
1379 free_irq(pci_irq_vector(pdev, 1), hisi_hba);
1380free_irq_vectors:
1381 pci_free_irq_vectors(pdev);
1382 return rc;
1383}
1384
c94d8ca2
XC
1385static int hisi_sas_v3_init(struct hisi_hba *hisi_hba)
1386{
1387 int rc;
1388
1389 rc = hw_init_v3_hw(hisi_hba);
1390 if (rc)
1391 return rc;
1392
54edeee1
XC
1393 rc = interrupt_init_v3_hw(hisi_hba);
1394 if (rc)
1395 return rc;
1396
c94d8ca2
XC
1397 return 0;
1398}
1399
e21fe3a5 1400static const struct hisi_sas_hw hisi_sas_v3_hw = {
c94d8ca2
XC
1401 .hw_init = hisi_sas_v3_init,
1402 .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW,
1403 .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr),
3975f605 1404 .sl_notify = sl_notify_v3_hw,
a2204723 1405 .prep_ssp = prep_ssp_v3_hw,
fa913de2 1406 .prep_smp = prep_smp_v3_hw,
ce60689e 1407 .prep_stp = prep_ata_v3_hw,
a2204723
XC
1408 .get_free_slot = get_free_slot_v3_hw,
1409 .start_delivery = start_delivery_v3_hw,
1410 .slot_complete = slot_complete_v3_hw,
3975f605 1411 .phys_init = phys_init_v3_hw,
e21fe3a5
JG
1412};
1413
1414static struct Scsi_Host *
1415hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
1416{
1417 struct Scsi_Host *shost;
1418 struct hisi_hba *hisi_hba;
1419 struct device *dev = &pdev->dev;
1420
1421 shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
1422 if (!shost)
1423 goto err_out;
1424 hisi_hba = shost_priv(shost);
1425
1426 hisi_hba->hw = &hisi_sas_v3_hw;
1427 hisi_hba->pci_dev = pdev;
1428 hisi_hba->dev = dev;
1429 hisi_hba->shost = shost;
1430 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
1431
1432 init_timer(&hisi_hba->timer);
1433
1434 if (hisi_sas_get_fw_info(hisi_hba) < 0)
1435 goto err_out;
1436
1437 if (hisi_sas_alloc(hisi_hba, shost)) {
1438 hisi_sas_free(hisi_hba);
1439 goto err_out;
1440 }
1441
1442 return shost;
1443err_out:
1444 dev_err(dev, "shost alloc failed\n");
1445 return NULL;
1446}
1447
92f61e3b
JG
1448static int
1449hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1450{
e21fe3a5
JG
1451 struct Scsi_Host *shost;
1452 struct hisi_hba *hisi_hba;
1453 struct device *dev = &pdev->dev;
1454 struct asd_sas_phy **arr_phy;
1455 struct asd_sas_port **arr_port;
1456 struct sas_ha_struct *sha;
1457 int rc, phy_nr, port_nr, i;
1458
1459 rc = pci_enable_device(pdev);
1460 if (rc)
1461 goto err_out;
1462
1463 pci_set_master(pdev);
1464
1465 rc = pci_request_regions(pdev, DRV_NAME);
1466 if (rc)
1467 goto err_out_disable_device;
1468
1469 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) ||
1470 (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) {
1471 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
1472 (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
1473 dev_err(dev, "No usable DMA addressing method\n");
1474 rc = -EIO;
1475 goto err_out_regions;
1476 }
1477 }
1478
1479 shost = hisi_sas_shost_alloc_pci(pdev);
1480 if (!shost) {
1481 rc = -ENOMEM;
1482 goto err_out_regions;
1483 }
1484
1485 sha = SHOST_TO_SAS_HA(shost);
1486 hisi_hba = shost_priv(shost);
1487 dev_set_drvdata(dev, sha);
1488
1489 hisi_hba->regs = pcim_iomap(pdev, 5, 0);
1490 if (!hisi_hba->regs) {
1491 dev_err(dev, "cannot map register.\n");
1492 rc = -ENOMEM;
1493 goto err_out_ha;
1494 }
1495
1496 phy_nr = port_nr = hisi_hba->n_phy;
1497
1498 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
1499 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
1500 if (!arr_phy || !arr_port) {
1501 rc = -ENOMEM;
1502 goto err_out_ha;
1503 }
1504
1505 sha->sas_phy = arr_phy;
1506 sha->sas_port = arr_port;
1507 sha->core.shost = shost;
1508 sha->lldd_ha = hisi_hba;
1509
1510 shost->transportt = hisi_sas_stt;
1511 shost->max_id = HISI_SAS_MAX_DEVICES;
1512 shost->max_lun = ~0;
1513 shost->max_channel = 1;
1514 shost->max_cmd_len = 16;
1515 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
1516 shost->can_queue = hisi_hba->hw->max_command_entries;
1517 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
1518
1519 sha->sas_ha_name = DRV_NAME;
1520 sha->dev = dev;
1521 sha->lldd_module = THIS_MODULE;
1522 sha->sas_addr = &hisi_hba->sas_addr[0];
1523 sha->num_phys = hisi_hba->n_phy;
1524 sha->core.shost = hisi_hba->shost;
1525
1526 for (i = 0; i < hisi_hba->n_phy; i++) {
1527 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
1528 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
1529 }
1530
1531 hisi_sas_init_add(hisi_hba);
1532
1533 rc = scsi_add_host(shost, dev);
1534 if (rc)
1535 goto err_out_ha;
1536
1537 rc = sas_register_ha(sha);
1538 if (rc)
1539 goto err_out_register_ha;
1540
1541 rc = hisi_hba->hw->hw_init(hisi_hba);
1542 if (rc)
1543 goto err_out_register_ha;
1544
1545 scsi_scan_host(shost);
1546
92f61e3b 1547 return 0;
e21fe3a5
JG
1548
1549err_out_register_ha:
1550 scsi_remove_host(shost);
1551err_out_ha:
1552 kfree(shost);
1553err_out_regions:
1554 pci_release_regions(pdev);
1555err_out_disable_device:
1556 pci_disable_device(pdev);
1557err_out:
1558 return rc;
92f61e3b
JG
1559}
1560
54edeee1
XC
1561static void
1562hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
1563{
60b4a5ee
XC
1564 int i;
1565
54edeee1
XC
1566 free_irq(pci_irq_vector(pdev, 1), hisi_hba);
1567 free_irq(pci_irq_vector(pdev, 2), hisi_hba);
60b4a5ee
XC
1568 for (i = 0; i < hisi_hba->queue_count; i++) {
1569 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1570
1571 free_irq(pci_irq_vector(pdev, i+16), cq);
1572 }
54edeee1
XC
1573 pci_free_irq_vectors(pdev);
1574}
1575
92f61e3b
JG
1576static void hisi_sas_v3_remove(struct pci_dev *pdev)
1577{
e21fe3a5
JG
1578 struct device *dev = &pdev->dev;
1579 struct sas_ha_struct *sha = dev_get_drvdata(dev);
1580 struct hisi_hba *hisi_hba = sha->lldd_ha;
1581
1582 sas_unregister_ha(sha);
1583 sas_remove_host(sha->core.shost);
1584
1585 hisi_sas_free(hisi_hba);
54edeee1 1586 hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
e21fe3a5
JG
1587 pci_release_regions(pdev);
1588 pci_disable_device(pdev);
92f61e3b
JG
1589}
1590
1591enum {
1592 /* instances of the controller */
1593 hip08,
1594};
1595
1596static const struct pci_device_id sas_v3_pci_table[] = {
1597 { PCI_VDEVICE(HUAWEI, 0xa230), hip08 },
1598 {}
1599};
1600
1601static struct pci_driver sas_v3_pci_driver = {
1602 .name = DRV_NAME,
1603 .id_table = sas_v3_pci_table,
1604 .probe = hisi_sas_v3_probe,
1605 .remove = hisi_sas_v3_remove,
1606};
1607
1608module_pci_driver(sas_v3_pci_driver);
1609
1610MODULE_VERSION(DRV_VERSION);
1611MODULE_LICENSE("GPL");
1612MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
1613MODULE_DESCRIPTION("HISILICON SAS controller v3 hw driver based on pci device");
1614MODULE_ALIAS("platform:" DRV_NAME);