]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
scsi: hisi_sas: add v3 code to send SSP frame
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / hisi_sas / hisi_sas_v3_hw.c
CommitLineData
92f61e3b
JG
1/*
2 * Copyright (c) 2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 */
10
11#include "hisi_sas.h"
12#define DRV_NAME "hisi_sas_v3_hw"
13
c94d8ca2
XC
14/* global registers need init*/
15#define DLVRY_QUEUE_ENABLE 0x0
16#define IOST_BASE_ADDR_LO 0x8
17#define IOST_BASE_ADDR_HI 0xc
18#define ITCT_BASE_ADDR_LO 0x10
19#define ITCT_BASE_ADDR_HI 0x14
20#define IO_BROKEN_MSG_ADDR_LO 0x18
21#define IO_BROKEN_MSG_ADDR_HI 0x1c
3975f605
XC
22#define PHY_CONTEXT 0x20
23#define PHY_STATE 0x24
24#define PHY_PORT_NUM_MA 0x28
25#define PHY_CONN_RATE 0x30
c94d8ca2
XC
26#define AXI_AHB_CLK_CFG 0x3c
27#define AXI_USER1 0x48
28#define AXI_USER2 0x4c
29#define IO_SATA_BROKEN_MSG_ADDR_LO 0x58
30#define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c
31#define SATA_INITI_D2H_STORE_ADDR_LO 0x60
32#define SATA_INITI_D2H_STORE_ADDR_HI 0x64
33#define CFG_MAX_TAG 0x68
34#define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84
35#define HGC_SAS_TXFAIL_RETRY_CTRL 0x88
36#define HGC_GET_ITV_TIME 0x90
37#define DEVICE_MSG_WORK_MODE 0x94
38#define OPENA_WT_CONTI_TIME 0x9c
39#define I_T_NEXUS_LOSS_TIME 0xa0
40#define MAX_CON_TIME_LIMIT_TIME 0xa4
41#define BUS_INACTIVE_LIMIT_TIME 0xa8
42#define REJECT_TO_OPEN_LIMIT_TIME 0xac
43#define CFG_AGING_TIME 0xbc
44#define HGC_DFX_CFG2 0xc0
45#define CFG_ABT_SET_QUERY_IPTT 0xd4
46#define CFG_SET_ABORTED_IPTT_OFF 0
47#define CFG_SET_ABORTED_IPTT_MSK (0xfff << CFG_SET_ABORTED_IPTT_OFF)
48#define CFG_1US_TIMER_TRSH 0xcc
3975f605 49#define CHNL_INT_STATUS 0x148
c94d8ca2
XC
50#define INT_COAL_EN 0x19c
51#define OQ_INT_COAL_TIME 0x1a0
52#define OQ_INT_COAL_CNT 0x1a4
53#define ENT_INT_COAL_TIME 0x1a8
54#define ENT_INT_COAL_CNT 0x1ac
55#define OQ_INT_SRC 0x1b0
56#define OQ_INT_SRC_MSK 0x1b4
57#define ENT_INT_SRC1 0x1b8
58#define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0
59#define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF)
60#define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8
61#define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF)
62#define ENT_INT_SRC2 0x1bc
63#define ENT_INT_SRC3 0x1c0
64#define ENT_INT_SRC3_WP_DEPTH_OFF 8
65#define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9
66#define ENT_INT_SRC3_RP_DEPTH_OFF 10
67#define ENT_INT_SRC3_AXI_OFF 11
68#define ENT_INT_SRC3_FIFO_OFF 12
69#define ENT_INT_SRC3_LM_OFF 14
70#define ENT_INT_SRC3_ITC_INT_OFF 15
71#define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF)
72#define ENT_INT_SRC3_ABT_OFF 16
73#define ENT_INT_SRC_MSK1 0x1c4
74#define ENT_INT_SRC_MSK2 0x1c8
75#define ENT_INT_SRC_MSK3 0x1cc
3975f605 76#define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31
c94d8ca2
XC
77#define CHNL_PHYUPDOWN_INT_MSK 0x1d0
78#define CHNL_ENT_INT_MSK 0x1d4
79#define HGC_COM_INT_MSK 0x1d8
3975f605 80#define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
c94d8ca2
XC
81#define SAS_ECC_INTR 0x1e8
82#define SAS_ECC_INTR_MSK 0x1ec
83#define HGC_ERR_STAT_EN 0x238
84#define DLVRY_Q_0_BASE_ADDR_LO 0x260
85#define DLVRY_Q_0_BASE_ADDR_HI 0x264
86#define DLVRY_Q_0_DEPTH 0x268
87#define DLVRY_Q_0_WR_PTR 0x26c
88#define DLVRY_Q_0_RD_PTR 0x270
89#define HYPER_STREAM_ID_EN_CFG 0xc80
90#define OQ0_INT_SRC_MSK 0xc90
91#define COMPL_Q_0_BASE_ADDR_LO 0x4e0
92#define COMPL_Q_0_BASE_ADDR_HI 0x4e4
93#define COMPL_Q_0_DEPTH 0x4e8
94#define COMPL_Q_0_WR_PTR 0x4ec
95#define COMPL_Q_0_RD_PTR 0x4f0
96#define AWQOS_AWCACHE_CFG 0xc84
97#define ARQOS_ARCACHE_CFG 0xc88
98
99/* phy registers requiring init */
100#define PORT_BASE (0x2000)
3975f605
XC
101#define PHY_CFG (PORT_BASE + 0x0)
102#define HARD_PHY_LINKRATE (PORT_BASE + 0x4)
103#define PHY_CFG_ENA_OFF 0
104#define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF)
105#define PHY_CFG_DC_OPT_OFF 2
106#define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF)
c94d8ca2
XC
107#define PROG_PHY_LINK_RATE (PORT_BASE + 0x8)
108#define PHY_CTRL (PORT_BASE + 0x14)
109#define PHY_CTRL_RESET_OFF 0
110#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
111#define SL_CFG (PORT_BASE + 0x84)
3975f605
XC
112#define SL_CONTROL (PORT_BASE + 0x94)
113#define SL_CONTROL_NOTIFY_EN_OFF 0
114#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
115#define SL_CTA_OFF 17
116#define SL_CTA_MSK (0x1 << SL_CTA_OFF)
117#define TX_ID_DWORD0 (PORT_BASE + 0x9c)
118#define TX_ID_DWORD1 (PORT_BASE + 0xa0)
119#define TX_ID_DWORD2 (PORT_BASE + 0xa4)
120#define TX_ID_DWORD3 (PORT_BASE + 0xa8)
121#define TX_ID_DWORD4 (PORT_BASE + 0xaC)
122#define TX_ID_DWORD5 (PORT_BASE + 0xb0)
123#define TX_ID_DWORD6 (PORT_BASE + 0xb4)
124#define TXID_AUTO (PORT_BASE + 0xb8)
125#define CT3_OFF 1
126#define CT3_MSK (0x1 << CT3_OFF)
127#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
c94d8ca2
XC
128#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
129#define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134)
130#define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138)
131#define SAS_STP_CON_TIMER_CFG (PORT_BASE + 0x13c)
132#define CHL_INT0 (PORT_BASE + 0x1b4)
133#define CHL_INT0_HOTPLUG_TOUT_OFF 0
134#define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF)
135#define CHL_INT0_SL_RX_BCST_ACK_OFF 1
136#define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF)
137#define CHL_INT0_SL_PHY_ENABLE_OFF 2
138#define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF)
139#define CHL_INT0_NOT_RDY_OFF 4
140#define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF)
141#define CHL_INT0_PHY_RDY_OFF 5
142#define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF)
143#define CHL_INT1 (PORT_BASE + 0x1b8)
144#define CHL_INT1_DMAC_TX_ECC_ERR_OFF 15
145#define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
146#define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17
147#define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
148#define CHL_INT2 (PORT_BASE + 0x1bc)
149#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
150#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
151#define CHL_INT2_MSK (PORT_BASE + 0x1c8)
152#define CHL_INT_COAL_EN (PORT_BASE + 0x1d0)
153#define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0)
154#define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4)
155#define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8)
156#define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc)
157#define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0)
158#define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4)
159
a2204723
XC
160/* HW dma structures */
161/* Delivery queue header */
162/* dw0 */
163#define CMD_HDR_RESP_REPORT_OFF 5
164#define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF)
165#define CMD_HDR_TLR_CTRL_OFF 6
166#define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF)
167#define CMD_HDR_PORT_OFF 18
168#define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF)
169#define CMD_HDR_PRIORITY_OFF 27
170#define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF)
171#define CMD_HDR_CMD_OFF 29
172#define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF)
173/* dw1 */
174#define CMD_HDR_DIR_OFF 5
175#define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF)
176#define CMD_HDR_VDTL_OFF 10
177#define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF)
178#define CMD_HDR_FRAME_TYPE_OFF 11
179#define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF)
180#define CMD_HDR_DEV_ID_OFF 16
181#define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF)
182/* dw2 */
183#define CMD_HDR_CFL_OFF 0
184#define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF)
185#define CMD_HDR_MRFL_OFF 15
186#define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF)
187#define CMD_HDR_SG_MOD_OFF 24
188#define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF)
189/* dw6 */
190#define CMD_HDR_DIF_SGL_LEN_OFF 0
191#define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF)
192#define CMD_HDR_DATA_SGL_LEN_OFF 16
193#define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF)
194
60b4a5ee
XC
195/* Completion header */
196/* dw0 */
197#define CMPLT_HDR_CMPLT_OFF 0
198#define CMPLT_HDR_CMPLT_MSK (0x3 << CMPLT_HDR_CMPLT_OFF)
199#define CMPLT_HDR_ERROR_PHASE_OFF 2
200#define CMPLT_HDR_ERROR_PHASE_MSK (0xff << CMPLT_HDR_ERROR_PHASE_OFF)
201#define CMPLT_HDR_RSPNS_XFRD_OFF 10
202#define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
203#define CMPLT_HDR_ERX_OFF 12
204#define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF)
205#define CMPLT_HDR_ABORT_STAT_OFF 13
206#define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF)
207/* abort_stat */
208#define STAT_IO_NOT_VALID 0x1
209#define STAT_IO_NO_DEVICE 0x2
210#define STAT_IO_COMPLETE 0x3
211#define STAT_IO_ABORTED 0x4
212/* dw1 */
213#define CMPLT_HDR_IPTT_OFF 0
214#define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF)
215#define CMPLT_HDR_DEV_ID_OFF 16
216#define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF)
217/* dw3 */
218#define CMPLT_HDR_IO_IN_TARGET_OFF 17
219#define CMPLT_HDR_IO_IN_TARGET_MSK (0x1 << CMPLT_HDR_IO_IN_TARGET_OFF)
220
c94d8ca2
XC
221struct hisi_sas_complete_v3_hdr {
222 __le32 dw0;
223 __le32 dw1;
224 __le32 act;
225 __le32 dw3;
226};
227
60b4a5ee
XC
228struct hisi_sas_err_record_v3 {
229 /* dw0 */
230 __le32 trans_tx_fail_type;
231
232 /* dw1 */
233 __le32 trans_rx_fail_type;
234
235 /* dw2 */
236 __le16 dma_tx_err_type;
237 __le16 sipc_rx_err_type;
238
239 /* dw3 */
240 __le32 dma_rx_err_type;
241};
242
243#define RX_DATA_LEN_UNDERFLOW_OFF 6
244#define RX_DATA_LEN_UNDERFLOW_MSK (1 << RX_DATA_LEN_UNDERFLOW_OFF)
245
c94d8ca2 246#define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096
3975f605
XC
247#define HISI_SAS_MSI_COUNT_V3_HW 32
248
249enum {
250 HISI_SAS_PHY_PHY_UPDOWN,
251 HISI_SAS_PHY_CHNL_INT,
252 HISI_SAS_PHY_INT_NR
253};
c94d8ca2 254
a2204723
XC
255#define DIR_NO_DATA 0
256#define DIR_TO_INI 1
257#define DIR_TO_DEVICE 2
258#define DIR_RESERVED 3
259
54edeee1
XC
260static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
261{
262 void __iomem *regs = hisi_hba->regs + off;
263
264 return readl(regs);
265}
266
a2204723
XC
267static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off)
268{
269 void __iomem *regs = hisi_hba->regs + off;
270
271 return readl_relaxed(regs);
272}
273
c94d8ca2
XC
274static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val)
275{
276 void __iomem *regs = hisi_hba->regs + off;
277
278 writel(val, regs);
279}
280
281static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no,
282 u32 off, u32 val)
283{
284 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off;
285
286 writel(val, regs);
287}
288
3975f605
XC
289static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
290 int phy_no, u32 off)
291{
292 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off;
293
294 return readl(regs);
295}
296
c94d8ca2
XC
297static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
298{
299 int i;
300
301 /* Global registers init */
302 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
303 (u32)((1ULL << hisi_hba->queue_count) - 1));
304 hisi_sas_write32(hisi_hba, AXI_USER1, 0x0);
305 hisi_sas_write32(hisi_hba, AXI_USER2, 0x40000060);
306 hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
307 hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0xd);
308 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
309 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
310 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
311 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff);
312 hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff);
313 hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff);
314 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
315 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
316 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
317 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff);
318 hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
319 hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0);
320 hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0);
321 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30);
322 hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0);
323 hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0);
324 for (i = 0; i < hisi_hba->queue_count; i++)
325 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
326
327 hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1);
328 hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
329 hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff07fff);
330
331 for (i = 0; i < hisi_hba->n_phy; i++) {
332 hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x801);
333 hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
334 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
335 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
336 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
337 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
338 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
339 hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x83f801fc);
340 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
341 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
342 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0);
343 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0);
344 hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0);
345 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0);
346 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199b4fa);
347 hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG,
348 0xa0064);
349 hisi_sas_phy_write32(hisi_hba, i, SAS_STP_CON_TIMER_CFG,
350 0xa0064);
351 }
352 for (i = 0; i < hisi_hba->queue_count; i++) {
353 /* Delivery queue */
354 hisi_sas_write32(hisi_hba,
355 DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14),
356 upper_32_bits(hisi_hba->cmd_hdr_dma[i]));
357
358 hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14),
359 lower_32_bits(hisi_hba->cmd_hdr_dma[i]));
360
361 hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14),
362 HISI_SAS_QUEUE_SLOTS);
363
364 /* Completion queue */
365 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14),
366 upper_32_bits(hisi_hba->complete_hdr_dma[i]));
367
368 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14),
369 lower_32_bits(hisi_hba->complete_hdr_dma[i]));
370
371 hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14),
372 HISI_SAS_QUEUE_SLOTS);
373 }
374
375 /* itct */
376 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO,
377 lower_32_bits(hisi_hba->itct_dma));
378
379 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI,
380 upper_32_bits(hisi_hba->itct_dma));
381
382 /* iost */
383 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO,
384 lower_32_bits(hisi_hba->iost_dma));
385
386 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI,
387 upper_32_bits(hisi_hba->iost_dma));
388
389 /* breakpoint */
390 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO,
391 lower_32_bits(hisi_hba->breakpoint_dma));
392
393 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI,
394 upper_32_bits(hisi_hba->breakpoint_dma));
395
396 /* SATA broken msg */
397 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO,
398 lower_32_bits(hisi_hba->sata_breakpoint_dma));
399
400 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI,
401 upper_32_bits(hisi_hba->sata_breakpoint_dma));
402
403 /* SATA initial fis */
404 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO,
405 lower_32_bits(hisi_hba->initial_fis_dma));
406
407 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI,
408 upper_32_bits(hisi_hba->initial_fis_dma));
409}
410
3975f605
XC
411static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
412{
413 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
414
415 cfg &= ~PHY_CFG_DC_OPT_MSK;
416 cfg |= 1 << PHY_CFG_DC_OPT_OFF;
417 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
418}
419
420static void config_id_frame_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
421{
422 struct sas_identify_frame identify_frame;
423 u32 *identify_buffer;
424
425 memset(&identify_frame, 0, sizeof(identify_frame));
426 identify_frame.dev_type = SAS_END_DEVICE;
427 identify_frame.frame_type = 0;
428 identify_frame._un1 = 1;
429 identify_frame.initiator_bits = SAS_PROTOCOL_ALL;
430 identify_frame.target_bits = SAS_PROTOCOL_NONE;
431 memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE);
432 memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE);
433 identify_frame.phy_id = phy_no;
434 identify_buffer = (u32 *)(&identify_frame);
435
436 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0,
437 __swab32(identify_buffer[0]));
438 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1,
439 __swab32(identify_buffer[1]));
440 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2,
441 __swab32(identify_buffer[2]));
442 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3,
443 __swab32(identify_buffer[3]));
444 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4,
445 __swab32(identify_buffer[4]));
446 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5,
447 __swab32(identify_buffer[5]));
448}
449
c94d8ca2
XC
450static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
451{
452 init_reg_v3_hw(hisi_hba);
453
454 return 0;
455}
456
3975f605
XC
457static void enable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
458{
459 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
460
461 cfg |= PHY_CFG_ENA_MSK;
462 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
463}
464
465static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
466{
467 config_id_frame_v3_hw(hisi_hba, phy_no);
468 config_phy_opt_mode_v3_hw(hisi_hba, phy_no);
469 enable_phy_v3_hw(hisi_hba, phy_no);
470}
471
472static void start_phys_v3_hw(struct hisi_hba *hisi_hba)
473{
474 int i;
475
476 for (i = 0; i < hisi_hba->n_phy; i++)
477 start_phy_v3_hw(hisi_hba, i);
478}
479
480static void phys_init_v3_hw(struct hisi_hba *hisi_hba)
481{
482 start_phys_v3_hw(hisi_hba);
483}
484
485static void sl_notify_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
486{
487 u32 sl_control;
488
489 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
490 sl_control |= SL_CONTROL_NOTIFY_EN_MSK;
491 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
492 msleep(1);
493 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
494 sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK;
495 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
496}
497
a2204723
XC
498/**
499 * The callpath to this function and upto writing the write
500 * queue pointer should be safe from interruption.
501 */
502static int
503get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
504{
505 struct device *dev = hisi_hba->dev;
506 int queue = dq->id;
507 u32 r, w;
508
509 w = dq->wr_point;
510 r = hisi_sas_read32_relaxed(hisi_hba,
511 DLVRY_Q_0_RD_PTR + (queue * 0x14));
512 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
513 dev_warn(dev, "full queue=%d r=%d w=%d\n\n",
514 queue, r, w);
515 return -EAGAIN;
516 }
517
518 return 0;
519}
520
521static void start_delivery_v3_hw(struct hisi_sas_dq *dq)
522{
523 struct hisi_hba *hisi_hba = dq->hisi_hba;
524 int dlvry_queue = dq->slot_prep->dlvry_queue;
525 int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot;
526
527 dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
528 hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
529 dq->wr_point);
530}
531
532static int prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba,
533 struct hisi_sas_slot *slot,
534 struct hisi_sas_cmd_hdr *hdr,
535 struct scatterlist *scatter,
536 int n_elem)
537{
538 struct device *dev = hisi_hba->dev;
539 struct scatterlist *sg;
540 int i;
541
542 if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
543 dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
544 n_elem);
545 return -EINVAL;
546 }
547
548 slot->sge_page = dma_pool_alloc(hisi_hba->sge_page_pool, GFP_ATOMIC,
549 &slot->sge_page_dma);
550 if (!slot->sge_page)
551 return -ENOMEM;
552
553 for_each_sg(scatter, sg, n_elem, i) {
554 struct hisi_sas_sge *entry = &slot->sge_page->sge[i];
555
556 entry->addr = cpu_to_le64(sg_dma_address(sg));
557 entry->page_ctrl_0 = entry->page_ctrl_1 = 0;
558 entry->data_len = cpu_to_le32(sg_dma_len(sg));
559 entry->data_off = 0;
560 }
561
562 hdr->prd_table_addr = cpu_to_le64(slot->sge_page_dma);
563 hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
564
565 return 0;
566}
567
568static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
569 struct hisi_sas_slot *slot, int is_tmf,
570 struct hisi_sas_tmf_task *tmf)
571{
572 struct sas_task *task = slot->task;
573 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
574 struct domain_device *device = task->dev;
575 struct hisi_sas_device *sas_dev = device->lldd_dev;
576 struct hisi_sas_port *port = slot->port;
577 struct sas_ssp_task *ssp_task = &task->ssp_task;
578 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
579 int has_data = 0, rc, priority = is_tmf;
580 u8 *buf_cmd;
581 u32 dw1 = 0, dw2 = 0;
582
583 hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) |
584 (2 << CMD_HDR_TLR_CTRL_OFF) |
585 (port->id << CMD_HDR_PORT_OFF) |
586 (priority << CMD_HDR_PRIORITY_OFF) |
587 (1 << CMD_HDR_CMD_OFF)); /* ssp */
588
589 dw1 = 1 << CMD_HDR_VDTL_OFF;
590 if (is_tmf) {
591 dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF;
592 dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF;
593 } else {
594 dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF;
595 switch (scsi_cmnd->sc_data_direction) {
596 case DMA_TO_DEVICE:
597 has_data = 1;
598 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF;
599 break;
600 case DMA_FROM_DEVICE:
601 has_data = 1;
602 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF;
603 break;
604 default:
605 dw1 &= ~CMD_HDR_DIR_MSK;
606 }
607 }
608
609 /* map itct entry */
610 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
611 hdr->dw1 = cpu_to_le32(dw1);
612
613 dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr)
614 + 3) / 4) << CMD_HDR_CFL_OFF) |
615 ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) |
616 (2 << CMD_HDR_SG_MOD_OFF);
617 hdr->dw2 = cpu_to_le32(dw2);
618 hdr->transfer_tags = cpu_to_le32(slot->idx);
619
620 if (has_data) {
621 rc = prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
622 slot->n_elem);
623 if (rc)
624 return rc;
625 }
626
627 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
628 hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma);
629 hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
630
631 buf_cmd = slot->command_table + sizeof(struct ssp_frame_hdr);
632 memcpy(buf_cmd, ssp_task->LUN, 8);
633
634 if (!is_tmf) {
635 buf_cmd[9] = ssp_task->task_attr | (ssp_task->task_prio << 3);
636 memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
637 } else {
638 buf_cmd[10] = tmf->tmf;
639 switch (tmf->tmf) {
640 case TMF_ABORT_TASK:
641 case TMF_QUERY_TASK:
642 buf_cmd[12] =
643 (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
644 buf_cmd[13] =
645 tmf->tag_of_task_to_be_managed & 0xff;
646 break;
647 default:
648 break;
649 }
650 }
651
652 return 0;
653}
654
54edeee1
XC
655static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
656{
657 int i, res = 0;
658 u32 context, port_id, link_rate, hard_phy_linkrate;
659 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
660 struct asd_sas_phy *sas_phy = &phy->sas_phy;
661 struct device *dev = hisi_hba->dev;
662
663 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
664
665 port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
666 port_id = (port_id >> (4 * phy_no)) & 0xf;
667 link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
668 link_rate = (link_rate >> (phy_no * 4)) & 0xf;
669
670 if (port_id == 0xf) {
671 dev_err(dev, "phyup: phy%d invalid portid\n", phy_no);
672 res = IRQ_NONE;
673 goto end;
674 }
675 sas_phy->linkrate = link_rate;
676 hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no,
677 HARD_PHY_LINKRATE);
678 phy->maximum_linkrate = hard_phy_linkrate & 0xf;
679 phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf;
680 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
681
682 /* Check for SATA dev */
683 context = hisi_sas_read32(hisi_hba, PHY_CONTEXT);
684 if (context & (1 << phy_no)) {
685 struct hisi_sas_initial_fis *initial_fis;
686 struct dev_to_host_fis *fis;
687 u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
688
689 dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
690 initial_fis = &hisi_hba->initial_fis[phy_no];
691 fis = &initial_fis->fis;
692 sas_phy->oob_mode = SATA_OOB_MODE;
693 attached_sas_addr[0] = 0x50;
694 attached_sas_addr[7] = phy_no;
695 memcpy(sas_phy->attached_sas_addr,
696 attached_sas_addr,
697 SAS_ADDR_SIZE);
698 memcpy(sas_phy->frame_rcvd, fis,
699 sizeof(struct dev_to_host_fis));
700 phy->phy_type |= PORT_TYPE_SATA;
701 phy->identify.device_type = SAS_SATA_DEV;
702 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
703 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
704 } else {
705 u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
706 struct sas_identify_frame *id =
707 (struct sas_identify_frame *)frame_rcvd;
708
709 dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
710 for (i = 0; i < 6; i++) {
711 u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no,
712 RX_IDAF_DWORD0 + (i * 4));
713 frame_rcvd[i] = __swab32(idaf);
714 }
715 sas_phy->oob_mode = SAS_OOB_MODE;
716 memcpy(sas_phy->attached_sas_addr,
717 &id->sas_addr,
718 SAS_ADDR_SIZE);
719 phy->phy_type |= PORT_TYPE_SAS;
720 phy->identify.device_type = id->dev_type;
721 phy->frame_rcvd_size = sizeof(struct sas_identify_frame);
722 if (phy->identify.device_type == SAS_END_DEVICE)
723 phy->identify.target_port_protocols =
724 SAS_PROTOCOL_SSP;
725 else if (phy->identify.device_type != SAS_PHY_UNUSED)
726 phy->identify.target_port_protocols =
727 SAS_PROTOCOL_SMP;
728 }
729
730 phy->port_id = port_id;
731 phy->phy_attached = 1;
732 queue_work(hisi_hba->wq, &phy->phyup_ws);
733
734end:
735 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
736 CHL_INT0_SL_PHY_ENABLE_MSK);
737 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0);
738
739 return res;
740}
741
742static int phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
743{
744 int res = 0;
745 u32 phy_state, sl_ctrl, txid_auto;
746 struct device *dev = hisi_hba->dev;
747
748 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
749
750 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
751 dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state);
752 hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
753
754 sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
755 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL,
756 sl_ctrl&(~SL_CTA_MSK));
757
758 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
759 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
760 txid_auto | CT3_MSK);
761
762 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK);
763 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0);
764
765 return res;
766}
767
768static void phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
769{
770 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
771 struct asd_sas_phy *sas_phy = &phy->sas_phy;
772 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
773
774 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
775 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
776 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
777 CHL_INT0_SL_RX_BCST_ACK_MSK);
778 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
779}
780
781static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p)
782{
783 struct hisi_hba *hisi_hba = p;
784 u32 irq_msk;
785 int phy_no = 0;
786 irqreturn_t res = IRQ_NONE;
787
788 irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS)
789 & 0x11111111;
790 while (irq_msk) {
791 if (irq_msk & 1) {
792 u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no,
793 CHL_INT0);
794 u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
795 int rdy = phy_state & (1 << phy_no);
796
797 if (rdy) {
798 if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK)
799 /* phy up */
800 if (phy_up_v3_hw(phy_no, hisi_hba)
801 == IRQ_HANDLED)
802 res = IRQ_HANDLED;
803 if (irq_value & CHL_INT0_SL_RX_BCST_ACK_MSK)
804 /* phy bcast */
805 phy_bcast_v3_hw(phy_no, hisi_hba);
806 } else {
807 if (irq_value & CHL_INT0_NOT_RDY_MSK)
808 /* phy down */
809 if (phy_down_v3_hw(phy_no, hisi_hba)
810 == IRQ_HANDLED)
811 res = IRQ_HANDLED;
812 }
813 }
814 irq_msk >>= 4;
815 phy_no++;
816 }
817
818 return res;
819}
820
821static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
822{
823 struct hisi_hba *hisi_hba = p;
824 struct device *dev = hisi_hba->dev;
825 u32 ent_msk, ent_tmp, irq_msk;
826 int phy_no = 0;
827
828 ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
829 ent_tmp = ent_msk;
830 ent_msk |= ENT_INT_SRC_MSK3_ENT95_MSK_MSK;
831 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_msk);
832
833 irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS)
834 & 0xeeeeeeee;
835
836 while (irq_msk) {
837 u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
838 CHL_INT0);
839 u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
840 CHL_INT1);
841 u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
842 CHL_INT2);
843
844 if ((irq_msk & (4 << (phy_no * 4))) &&
845 irq_value1) {
846 if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
847 CHL_INT1_DMAC_TX_ECC_ERR_MSK))
848 panic("%s: DMAC RX/TX ecc bad error! (0x%x)",
849 dev_name(dev), irq_value1);
850
851 hisi_sas_phy_write32(hisi_hba, phy_no,
852 CHL_INT1, irq_value1);
853 }
854
855 if (irq_msk & (8 << (phy_no * 4)) && irq_value2)
856 hisi_sas_phy_write32(hisi_hba, phy_no,
857 CHL_INT2, irq_value2);
858
859
860 if (irq_msk & (2 << (phy_no * 4)) && irq_value0) {
861 hisi_sas_phy_write32(hisi_hba, phy_no,
862 CHL_INT0, irq_value0
863 & (~CHL_INT0_HOTPLUG_TOUT_MSK)
864 & (~CHL_INT0_SL_PHY_ENABLE_MSK)
865 & (~CHL_INT0_NOT_RDY_MSK));
866 }
867 irq_msk &= ~(0xe << (phy_no * 4));
868 phy_no++;
869 }
870
871 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_tmp);
872
873 return IRQ_HANDLED;
874}
875
60b4a5ee
XC
876static void
877slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
878 struct hisi_sas_slot *slot)
879{
880 struct task_status_struct *ts = &task->task_status;
881 struct hisi_sas_complete_v3_hdr *complete_queue =
882 hisi_hba->complete_hdr[slot->cmplt_queue];
883 struct hisi_sas_complete_v3_hdr *complete_hdr =
884 &complete_queue[slot->cmplt_queue_slot];
885 struct hisi_sas_err_record_v3 *record = slot->status_buffer;
886 u32 dma_rx_err_type = record->dma_rx_err_type;
887 u32 trans_tx_fail_type = record->trans_tx_fail_type;
888
889 switch (task->task_proto) {
890 case SAS_PROTOCOL_SSP:
891 if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
892 ts->residual = trans_tx_fail_type;
893 ts->stat = SAS_DATA_UNDERRUN;
894 } else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
895 ts->stat = SAS_QUEUE_FULL;
896 slot->abort = 1;
897 } else {
898 ts->stat = SAS_OPEN_REJECT;
899 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
900 }
901 break;
902 case SAS_PROTOCOL_SATA:
903 case SAS_PROTOCOL_STP:
904 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
905 if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
906 ts->residual = trans_tx_fail_type;
907 ts->stat = SAS_DATA_UNDERRUN;
908 } else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
909 ts->stat = SAS_PHY_DOWN;
910 slot->abort = 1;
911 } else {
912 ts->stat = SAS_OPEN_REJECT;
913 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
914 }
915 hisi_sas_sata_done(task, slot);
916 break;
917 case SAS_PROTOCOL_SMP:
918 ts->stat = SAM_STAT_CHECK_CONDITION;
919 break;
920 default:
921 break;
922 }
923}
924
925static int
926slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
927{
928 struct sas_task *task = slot->task;
929 struct hisi_sas_device *sas_dev;
930 struct device *dev = hisi_hba->dev;
931 struct task_status_struct *ts;
932 struct domain_device *device;
933 enum exec_status sts;
934 struct hisi_sas_complete_v3_hdr *complete_queue =
935 hisi_hba->complete_hdr[slot->cmplt_queue];
936 struct hisi_sas_complete_v3_hdr *complete_hdr =
937 &complete_queue[slot->cmplt_queue_slot];
938 int aborted;
939 unsigned long flags;
940
941 if (unlikely(!task || !task->lldd_task || !task->dev))
942 return -EINVAL;
943
944 ts = &task->task_status;
945 device = task->dev;
946 sas_dev = device->lldd_dev;
947
948 spin_lock_irqsave(&task->task_state_lock, flags);
949 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
950 task->task_state_flags &=
951 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
952 spin_unlock_irqrestore(&task->task_state_lock, flags);
953
954 memset(ts, 0, sizeof(*ts));
955 ts->resp = SAS_TASK_COMPLETE;
956 if (unlikely(aborted)) {
957 ts->stat = SAS_ABORTED_TASK;
958 hisi_sas_slot_task_free(hisi_hba, task, slot);
959 return -1;
960 }
961
962 if (unlikely(!sas_dev)) {
963 dev_dbg(dev, "slot complete: port has not device\n");
964 ts->stat = SAS_PHY_DOWN;
965 goto out;
966 }
967
968 /*
969 * Use SAS+TMF status codes
970 */
971 switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK)
972 >> CMPLT_HDR_ABORT_STAT_OFF) {
973 case STAT_IO_ABORTED:
974 /* this IO has been aborted by abort command */
975 ts->stat = SAS_ABORTED_TASK;
976 goto out;
977 case STAT_IO_COMPLETE:
978 /* internal abort command complete */
979 ts->stat = TMF_RESP_FUNC_SUCC;
980 goto out;
981 case STAT_IO_NO_DEVICE:
982 ts->stat = TMF_RESP_FUNC_COMPLETE;
983 goto out;
984 case STAT_IO_NOT_VALID:
985 /*
986 * abort single IO, the controller can't find the IO
987 */
988 ts->stat = TMF_RESP_FUNC_FAILED;
989 goto out;
990 default:
991 break;
992 }
993
994 /* check for erroneous completion */
995 if ((complete_hdr->dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) {
996 slot_err_v3_hw(hisi_hba, task, slot);
997 if (unlikely(slot->abort))
998 return ts->stat;
999 goto out;
1000 }
1001
1002 switch (task->task_proto) {
1003 case SAS_PROTOCOL_SSP: {
1004 struct ssp_response_iu *iu = slot->status_buffer +
1005 sizeof(struct hisi_sas_err_record);
1006
1007 sas_ssp_task_response(dev, task, iu);
1008 break;
1009 }
1010 case SAS_PROTOCOL_SMP: {
1011 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1012 void *to;
1013
1014 ts->stat = SAM_STAT_GOOD;
1015 to = kmap_atomic(sg_page(sg_resp));
1016
1017 dma_unmap_sg(dev, &task->smp_task.smp_resp, 1,
1018 DMA_FROM_DEVICE);
1019 dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
1020 DMA_TO_DEVICE);
1021 memcpy(to + sg_resp->offset,
1022 slot->status_buffer +
1023 sizeof(struct hisi_sas_err_record),
1024 sg_dma_len(sg_resp));
1025 kunmap_atomic(to);
1026 break;
1027 }
1028 case SAS_PROTOCOL_SATA:
1029 case SAS_PROTOCOL_STP:
1030 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1031 ts->stat = SAM_STAT_GOOD;
1032 hisi_sas_sata_done(task, slot);
1033 break;
1034 default:
1035 ts->stat = SAM_STAT_CHECK_CONDITION;
1036 break;
1037 }
1038
1039 if (!slot->port->port_attached) {
1040 dev_err(dev, "slot complete: port %d has removed\n",
1041 slot->port->sas_port.id);
1042 ts->stat = SAS_PHY_DOWN;
1043 }
1044
1045out:
1046 spin_lock_irqsave(&task->task_state_lock, flags);
1047 task->task_state_flags |= SAS_TASK_STATE_DONE;
1048 spin_unlock_irqrestore(&task->task_state_lock, flags);
1049 spin_lock_irqsave(&hisi_hba->lock, flags);
1050 hisi_sas_slot_task_free(hisi_hba, task, slot);
1051 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1052 sts = ts->stat;
1053
1054 if (task->task_done)
1055 task->task_done(task);
1056
1057 return sts;
1058}
1059
1060static void cq_tasklet_v3_hw(unsigned long val)
1061{
1062 struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val;
1063 struct hisi_hba *hisi_hba = cq->hisi_hba;
1064 struct hisi_sas_slot *slot;
1065 struct hisi_sas_itct *itct;
1066 struct hisi_sas_complete_v3_hdr *complete_queue;
1067 u32 rd_point = cq->rd_point, wr_point, dev_id;
1068 int queue = cq->id;
1069 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
1070
1071 complete_queue = hisi_hba->complete_hdr[queue];
1072
1073 spin_lock(&dq->lock);
1074 wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
1075 (0x14 * queue));
1076
1077 while (rd_point != wr_point) {
1078 struct hisi_sas_complete_v3_hdr *complete_hdr;
1079 int iptt;
1080
1081 complete_hdr = &complete_queue[rd_point];
1082
1083 /* Check for NCQ completion */
1084 if (complete_hdr->act) {
1085 u32 act_tmp = complete_hdr->act;
1086 int ncq_tag_count = ffs(act_tmp);
1087
1088 dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >>
1089 CMPLT_HDR_DEV_ID_OFF;
1090 itct = &hisi_hba->itct[dev_id];
1091
1092 /* The NCQ tags are held in the itct header */
1093 while (ncq_tag_count) {
1094 __le64 *ncq_tag = &itct->qw4_15[0];
1095
1096 ncq_tag_count -= 1;
1097 iptt = (ncq_tag[ncq_tag_count / 5]
1098 >> (ncq_tag_count % 5) * 12) & 0xfff;
1099
1100 slot = &hisi_hba->slot_info[iptt];
1101 slot->cmplt_queue_slot = rd_point;
1102 slot->cmplt_queue = queue;
1103 slot_complete_v3_hw(hisi_hba, slot);
1104
1105 act_tmp &= ~(1 << ncq_tag_count);
1106 ncq_tag_count = ffs(act_tmp);
1107 }
1108 } else {
1109 iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
1110 slot = &hisi_hba->slot_info[iptt];
1111 slot->cmplt_queue_slot = rd_point;
1112 slot->cmplt_queue = queue;
1113 slot_complete_v3_hw(hisi_hba, slot);
1114 }
1115
1116 if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
1117 rd_point = 0;
1118 }
1119
1120 /* update rd_point */
1121 cq->rd_point = rd_point;
1122 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
1123 spin_unlock(&dq->lock);
1124}
1125
1126static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p)
1127{
1128 struct hisi_sas_cq *cq = p;
1129 struct hisi_hba *hisi_hba = cq->hisi_hba;
1130 int queue = cq->id;
1131
1132 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
1133
1134 tasklet_schedule(&cq->tasklet);
1135
1136 return IRQ_HANDLED;
1137}
1138
54edeee1
XC
1139static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
1140{
1141 struct device *dev = hisi_hba->dev;
1142 struct pci_dev *pdev = hisi_hba->pci_dev;
1143 int vectors, rc;
60b4a5ee 1144 int i, k;
54edeee1
XC
1145 int max_msi = HISI_SAS_MSI_COUNT_V3_HW;
1146
1147 vectors = pci_alloc_irq_vectors(hisi_hba->pci_dev, 1,
1148 max_msi, PCI_IRQ_MSI);
1149 if (vectors < max_msi) {
1150 dev_err(dev, "could not allocate all msi (%d)\n", vectors);
1151 return -ENOENT;
1152 }
1153
1154 rc = devm_request_irq(dev, pci_irq_vector(pdev, 1),
1155 int_phy_up_down_bcast_v3_hw, 0,
1156 DRV_NAME " phy", hisi_hba);
1157 if (rc) {
1158 dev_err(dev, "could not request phy interrupt, rc=%d\n", rc);
1159 rc = -ENOENT;
1160 goto free_irq_vectors;
1161 }
1162
1163 rc = devm_request_irq(dev, pci_irq_vector(pdev, 2),
1164 int_chnl_int_v3_hw, 0,
1165 DRV_NAME " channel", hisi_hba);
1166 if (rc) {
1167 dev_err(dev, "could not request chnl interrupt, rc=%d\n", rc);
1168 rc = -ENOENT;
1169 goto free_phy_irq;
1170 }
1171
60b4a5ee
XC
1172 /* Init tasklets for cq only */
1173 for (i = 0; i < hisi_hba->queue_count; i++) {
1174 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1175 struct tasklet_struct *t = &cq->tasklet;
1176
1177 rc = devm_request_irq(dev, pci_irq_vector(pdev, i+16),
1178 cq_interrupt_v3_hw, 0,
1179 DRV_NAME " cq", cq);
1180 if (rc) {
1181 dev_err(dev,
1182 "could not request cq%d interrupt, rc=%d\n",
1183 i, rc);
1184 rc = -ENOENT;
1185 goto free_cq_irqs;
1186 }
1187
1188 tasklet_init(t, cq_tasklet_v3_hw, (unsigned long)cq);
1189 }
54edeee1
XC
1190
1191 return 0;
1192
60b4a5ee
XC
1193free_cq_irqs:
1194 for (k = 0; k < i; k++) {
1195 struct hisi_sas_cq *cq = &hisi_hba->cq[k];
1196
1197 free_irq(pci_irq_vector(pdev, k+16), cq);
1198 }
1199 free_irq(pci_irq_vector(pdev, 2), hisi_hba);
54edeee1
XC
1200free_phy_irq:
1201 free_irq(pci_irq_vector(pdev, 1), hisi_hba);
1202free_irq_vectors:
1203 pci_free_irq_vectors(pdev);
1204 return rc;
1205}
1206
c94d8ca2
XC
1207static int hisi_sas_v3_init(struct hisi_hba *hisi_hba)
1208{
1209 int rc;
1210
1211 rc = hw_init_v3_hw(hisi_hba);
1212 if (rc)
1213 return rc;
1214
54edeee1
XC
1215 rc = interrupt_init_v3_hw(hisi_hba);
1216 if (rc)
1217 return rc;
1218
c94d8ca2
XC
1219 return 0;
1220}
1221
e21fe3a5 1222static const struct hisi_sas_hw hisi_sas_v3_hw = {
c94d8ca2
XC
1223 .hw_init = hisi_sas_v3_init,
1224 .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW,
1225 .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr),
3975f605 1226 .sl_notify = sl_notify_v3_hw,
a2204723
XC
1227 .prep_ssp = prep_ssp_v3_hw,
1228 .get_free_slot = get_free_slot_v3_hw,
1229 .start_delivery = start_delivery_v3_hw,
1230 .slot_complete = slot_complete_v3_hw,
3975f605 1231 .phys_init = phys_init_v3_hw,
e21fe3a5
JG
1232};
1233
1234static struct Scsi_Host *
1235hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
1236{
1237 struct Scsi_Host *shost;
1238 struct hisi_hba *hisi_hba;
1239 struct device *dev = &pdev->dev;
1240
1241 shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
1242 if (!shost)
1243 goto err_out;
1244 hisi_hba = shost_priv(shost);
1245
1246 hisi_hba->hw = &hisi_sas_v3_hw;
1247 hisi_hba->pci_dev = pdev;
1248 hisi_hba->dev = dev;
1249 hisi_hba->shost = shost;
1250 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
1251
1252 init_timer(&hisi_hba->timer);
1253
1254 if (hisi_sas_get_fw_info(hisi_hba) < 0)
1255 goto err_out;
1256
1257 if (hisi_sas_alloc(hisi_hba, shost)) {
1258 hisi_sas_free(hisi_hba);
1259 goto err_out;
1260 }
1261
1262 return shost;
1263err_out:
1264 dev_err(dev, "shost alloc failed\n");
1265 return NULL;
1266}
1267
92f61e3b
JG
1268static int
1269hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1270{
e21fe3a5
JG
1271 struct Scsi_Host *shost;
1272 struct hisi_hba *hisi_hba;
1273 struct device *dev = &pdev->dev;
1274 struct asd_sas_phy **arr_phy;
1275 struct asd_sas_port **arr_port;
1276 struct sas_ha_struct *sha;
1277 int rc, phy_nr, port_nr, i;
1278
1279 rc = pci_enable_device(pdev);
1280 if (rc)
1281 goto err_out;
1282
1283 pci_set_master(pdev);
1284
1285 rc = pci_request_regions(pdev, DRV_NAME);
1286 if (rc)
1287 goto err_out_disable_device;
1288
1289 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) ||
1290 (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) {
1291 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
1292 (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
1293 dev_err(dev, "No usable DMA addressing method\n");
1294 rc = -EIO;
1295 goto err_out_regions;
1296 }
1297 }
1298
1299 shost = hisi_sas_shost_alloc_pci(pdev);
1300 if (!shost) {
1301 rc = -ENOMEM;
1302 goto err_out_regions;
1303 }
1304
1305 sha = SHOST_TO_SAS_HA(shost);
1306 hisi_hba = shost_priv(shost);
1307 dev_set_drvdata(dev, sha);
1308
1309 hisi_hba->regs = pcim_iomap(pdev, 5, 0);
1310 if (!hisi_hba->regs) {
1311 dev_err(dev, "cannot map register.\n");
1312 rc = -ENOMEM;
1313 goto err_out_ha;
1314 }
1315
1316 phy_nr = port_nr = hisi_hba->n_phy;
1317
1318 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
1319 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
1320 if (!arr_phy || !arr_port) {
1321 rc = -ENOMEM;
1322 goto err_out_ha;
1323 }
1324
1325 sha->sas_phy = arr_phy;
1326 sha->sas_port = arr_port;
1327 sha->core.shost = shost;
1328 sha->lldd_ha = hisi_hba;
1329
1330 shost->transportt = hisi_sas_stt;
1331 shost->max_id = HISI_SAS_MAX_DEVICES;
1332 shost->max_lun = ~0;
1333 shost->max_channel = 1;
1334 shost->max_cmd_len = 16;
1335 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
1336 shost->can_queue = hisi_hba->hw->max_command_entries;
1337 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
1338
1339 sha->sas_ha_name = DRV_NAME;
1340 sha->dev = dev;
1341 sha->lldd_module = THIS_MODULE;
1342 sha->sas_addr = &hisi_hba->sas_addr[0];
1343 sha->num_phys = hisi_hba->n_phy;
1344 sha->core.shost = hisi_hba->shost;
1345
1346 for (i = 0; i < hisi_hba->n_phy; i++) {
1347 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
1348 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
1349 }
1350
1351 hisi_sas_init_add(hisi_hba);
1352
1353 rc = scsi_add_host(shost, dev);
1354 if (rc)
1355 goto err_out_ha;
1356
1357 rc = sas_register_ha(sha);
1358 if (rc)
1359 goto err_out_register_ha;
1360
1361 rc = hisi_hba->hw->hw_init(hisi_hba);
1362 if (rc)
1363 goto err_out_register_ha;
1364
1365 scsi_scan_host(shost);
1366
92f61e3b 1367 return 0;
e21fe3a5
JG
1368
1369err_out_register_ha:
1370 scsi_remove_host(shost);
1371err_out_ha:
1372 kfree(shost);
1373err_out_regions:
1374 pci_release_regions(pdev);
1375err_out_disable_device:
1376 pci_disable_device(pdev);
1377err_out:
1378 return rc;
92f61e3b
JG
1379}
1380
54edeee1
XC
1381static void
1382hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
1383{
60b4a5ee
XC
1384 int i;
1385
54edeee1
XC
1386 free_irq(pci_irq_vector(pdev, 1), hisi_hba);
1387 free_irq(pci_irq_vector(pdev, 2), hisi_hba);
60b4a5ee
XC
1388 for (i = 0; i < hisi_hba->queue_count; i++) {
1389 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1390
1391 free_irq(pci_irq_vector(pdev, i+16), cq);
1392 }
54edeee1
XC
1393 pci_free_irq_vectors(pdev);
1394}
1395
92f61e3b
JG
1396static void hisi_sas_v3_remove(struct pci_dev *pdev)
1397{
e21fe3a5
JG
1398 struct device *dev = &pdev->dev;
1399 struct sas_ha_struct *sha = dev_get_drvdata(dev);
1400 struct hisi_hba *hisi_hba = sha->lldd_ha;
1401
1402 sas_unregister_ha(sha);
1403 sas_remove_host(sha->core.shost);
1404
1405 hisi_sas_free(hisi_hba);
54edeee1 1406 hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
e21fe3a5
JG
1407 pci_release_regions(pdev);
1408 pci_disable_device(pdev);
92f61e3b
JG
1409}
1410
1411enum {
1412 /* instances of the controller */
1413 hip08,
1414};
1415
1416static const struct pci_device_id sas_v3_pci_table[] = {
1417 { PCI_VDEVICE(HUAWEI, 0xa230), hip08 },
1418 {}
1419};
1420
1421static struct pci_driver sas_v3_pci_driver = {
1422 .name = DRV_NAME,
1423 .id_table = sas_v3_pci_table,
1424 .probe = hisi_sas_v3_probe,
1425 .remove = hisi_sas_v3_remove,
1426};
1427
1428module_pci_driver(sas_v3_pci_driver);
1429
1430MODULE_VERSION(DRV_VERSION);
1431MODULE_LICENSE("GPL");
1432MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
1433MODULE_DESCRIPTION("HISILICON SAS controller v3 hw driver based on pci device");
1434MODULE_ALIAS("platform:" DRV_NAME);