]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
net: hns3: Fix for warning uninitialized symbol hw_err_lst3
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_err.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2016-2017 Hisilicon Limited. */
3
4 #include "hclge_err.h"
5
6 static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
7 { .int_msk = BIT(0), .msg = "imp_itcm0_ecc_1bit_err" },
8 { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err" },
9 { .int_msk = BIT(2), .msg = "imp_itcm1_ecc_1bit_err" },
10 { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err" },
11 { .int_msk = BIT(4), .msg = "imp_itcm2_ecc_1bit_err" },
12 { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err" },
13 { .int_msk = BIT(6), .msg = "imp_itcm3_ecc_1bit_err" },
14 { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err" },
15 { .int_msk = BIT(8), .msg = "imp_dtcm0_mem0_ecc_1bit_err" },
16 { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err" },
17 { .int_msk = BIT(10), .msg = "imp_dtcm0_mem1_ecc_1bit_err" },
18 { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err" },
19 { .int_msk = BIT(12), .msg = "imp_dtcm1_mem0_ecc_1bit_err" },
20 { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err" },
21 { .int_msk = BIT(14), .msg = "imp_dtcm1_mem1_ecc_1bit_err" },
22 { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err" },
23 { /* sentinel */ }
24 };
25
26 static const struct hclge_hw_error hclge_imp_itcm4_ecc_int[] = {
27 { .int_msk = BIT(0), .msg = "imp_itcm4_ecc_1bit_err" },
28 { .int_msk = BIT(1), .msg = "imp_itcm4_ecc_mbit_err" },
29 { /* sentinel */ }
30 };
31
32 static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = {
33 { .int_msk = BIT(0), .msg = "cmdq_nic_rx_depth_ecc_1bit_err" },
34 { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err" },
35 { .int_msk = BIT(2), .msg = "cmdq_nic_tx_depth_ecc_1bit_err" },
36 { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err" },
37 { .int_msk = BIT(4), .msg = "cmdq_nic_rx_tail_ecc_1bit_err" },
38 { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err" },
39 { .int_msk = BIT(6), .msg = "cmdq_nic_tx_tail_ecc_1bit_err" },
40 { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err" },
41 { .int_msk = BIT(8), .msg = "cmdq_nic_rx_head_ecc_1bit_err" },
42 { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err" },
43 { .int_msk = BIT(10), .msg = "cmdq_nic_tx_head_ecc_1bit_err" },
44 { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err" },
45 { .int_msk = BIT(12), .msg = "cmdq_nic_rx_addr_ecc_1bit_err" },
46 { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err" },
47 { .int_msk = BIT(14), .msg = "cmdq_nic_tx_addr_ecc_1bit_err" },
48 { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err" },
49 { /* sentinel */ }
50 };
51
52 static const struct hclge_hw_error hclge_cmdq_rocee_mem_ecc_int[] = {
53 { .int_msk = BIT(0), .msg = "cmdq_rocee_rx_depth_ecc_1bit_err" },
54 { .int_msk = BIT(1), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" },
55 { .int_msk = BIT(2), .msg = "cmdq_rocee_tx_depth_ecc_1bit_err" },
56 { .int_msk = BIT(3), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" },
57 { .int_msk = BIT(4), .msg = "cmdq_rocee_rx_tail_ecc_1bit_err" },
58 { .int_msk = BIT(5), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" },
59 { .int_msk = BIT(6), .msg = "cmdq_rocee_tx_tail_ecc_1bit_err" },
60 { .int_msk = BIT(7), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" },
61 { .int_msk = BIT(8), .msg = "cmdq_rocee_rx_head_ecc_1bit_err" },
62 { .int_msk = BIT(9), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" },
63 { .int_msk = BIT(10), .msg = "cmdq_rocee_tx_head_ecc_1bit_err" },
64 { .int_msk = BIT(11), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" },
65 { .int_msk = BIT(12), .msg = "cmdq_rocee_rx_addr_ecc_1bit_err" },
66 { .int_msk = BIT(13), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" },
67 { .int_msk = BIT(14), .msg = "cmdq_rocee_tx_addr_ecc_1bit_err" },
68 { .int_msk = BIT(15), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" },
69 { /* sentinel */ }
70 };
71
72 static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
73 { .int_msk = BIT(0), .msg = "tqp_int_cfg_even_ecc_1bit_err" },
74 { .int_msk = BIT(1), .msg = "tqp_int_cfg_odd_ecc_1bit_err" },
75 { .int_msk = BIT(2), .msg = "tqp_int_ctrl_even_ecc_1bit_err" },
76 { .int_msk = BIT(3), .msg = "tqp_int_ctrl_odd_ecc_1bit_err" },
77 { .int_msk = BIT(4), .msg = "tx_que_scan_int_ecc_1bit_err" },
78 { .int_msk = BIT(5), .msg = "rx_que_scan_int_ecc_1bit_err" },
79 { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err" },
80 { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err" },
81 { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err" },
82 { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err" },
83 { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err" },
84 { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err" },
85 { /* sentinel */ }
86 };
87
88 static const struct hclge_hw_error hclge_igu_com_err_int[] = {
89 { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err" },
90 { .int_msk = BIT(1), .msg = "igu_rx_buf0_ecc_1bit_err" },
91 { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err" },
92 { .int_msk = BIT(3), .msg = "igu_rx_buf1_ecc_1bit_err" },
93 { /* sentinel */ }
94 };
95
96 static const struct hclge_hw_error hclge_igu_egu_tnl_err_int[] = {
97 { .int_msk = BIT(0), .msg = "rx_buf_overflow" },
98 { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow" },
99 { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow" },
100 { .int_msk = BIT(3), .msg = "tx_buf_overflow" },
101 { .int_msk = BIT(4), .msg = "tx_buf_underrun" },
102 { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow" },
103 { /* sentinel */ }
104 };
105
106 static const struct hclge_hw_error hclge_ncsi_err_int[] = {
107 { .int_msk = BIT(0), .msg = "ncsi_tx_ecc_1bit_err" },
108 { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err" },
109 { /* sentinel */ }
110 };
111
112 static const struct hclge_hw_error hclge_ppp_mpf_int0[] = {
113 { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_1bit_err" },
114 { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_1bit_err" },
115 { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_1bit_err" },
116 { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_1bit_err" },
117 { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_1bit_err" },
118 { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_1bit_err" },
119 { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_1bit_err" },
120 { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_1bit_err" },
121 { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_1bit_err" },
122 { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_1bit_err" },
123 { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_1bit_err" },
124 { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_1bit_err" },
125 { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_1bit_err" },
126 { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_1bit_err" },
127 { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_1bit_err" },
128 { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_1bit_err" },
129 { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_1bit_err" },
130 { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_1bit_err" },
131 { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_1bit_err" },
132 { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_1bit_err" },
133 { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_1bit_err" },
134 { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_1bit_err" },
135 { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_1bit_err" },
136 { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_1bit_err" },
137 { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_1bit_err" },
138 { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_1bit_err" },
139 { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_1bit_err" },
140 { .int_msk = BIT(27),
141 .msg = "flow_director_ad_mem0_ecc_1bit_err" },
142 { .int_msk = BIT(28),
143 .msg = "flow_director_ad_mem1_ecc_1bit_err" },
144 { .int_msk = BIT(29),
145 .msg = "rx_vlan_tag_memory_ecc_1bit_err" },
146 { .int_msk = BIT(30),
147 .msg = "Tx_UP_mapping_config_mem_ecc_1bit_err" },
148 { /* sentinel */ }
149 };
150
151 static const struct hclge_hw_error hclge_ppp_mpf_int1[] = {
152 { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err" },
153 { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err" },
154 { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err" },
155 { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" },
156 { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" },
157 { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" },
158 { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_erre" },
159 { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" },
160 { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" },
161 { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" },
162 { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err" },
163 { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err" },
164 { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err" },
165 { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err" },
166 { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err" },
167 { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err" },
168 { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err" },
169 { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err" },
170 { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err" },
171 { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err" },
172 { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err" },
173 { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err" },
174 { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err" },
175 { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err" },
176 { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err" },
177 { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err" },
178 { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err" },
179 { .int_msk = BIT(27),
180 .msg = "flow_director_ad_mem0_ecc_mbit_err" },
181 { .int_msk = BIT(28),
182 .msg = "flow_director_ad_mem1_ecc_mbit_err" },
183 { .int_msk = BIT(29),
184 .msg = "rx_vlan_tag_memory_ecc_mbit_err" },
185 { .int_msk = BIT(30),
186 .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err" },
187 { /* sentinel */ }
188 };
189
190 static const struct hclge_hw_error hclge_ppp_pf_int[] = {
191 { .int_msk = BIT(0), .msg = "Tx_vlan_tag_err" },
192 { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err" },
193 { /* sentinel */ }
194 };
195
196 static const struct hclge_hw_error hclge_ppp_mpf_int2[] = {
197 { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_1bit_err" },
198 { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_1bit_err" },
199 { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_1bit_err" },
200 { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_1bit_err" },
201 { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_1bit_err" },
202 { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_1bit_err" },
203 { /* sentinel */ }
204 };
205
206 static const struct hclge_hw_error hclge_ppp_mpf_int3[] = {
207 { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err" },
208 { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err" },
209 { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err" },
210 { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err" },
211 { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err" },
212 { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err" },
213 { /* sentinel */ }
214 };
215
216 struct hclge_tm_sch_ecc_info {
217 const char *name;
218 };
219
220 static const struct hclge_tm_sch_ecc_info hclge_tm_sch_ecc_err[7][15] = {
221 {
222 { .name = "QSET_QUEUE_CTRL:PRI_LEN TAB" },
223 { .name = "QSET_QUEUE_CTRL:SPA_LEN TAB" },
224 { .name = "QSET_QUEUE_CTRL:SPB_LEN TAB" },
225 { .name = "QSET_QUEUE_CTRL:WRRA_LEN TAB" },
226 { .name = "QSET_QUEUE_CTRL:WRRB_LEN TAB" },
227 { .name = "QSET_QUEUE_CTRL:SPA_HPTR TAB" },
228 { .name = "QSET_QUEUE_CTRL:SPB_HPTR TAB" },
229 { .name = "QSET_QUEUE_CTRL:WRRA_HPTR TAB" },
230 { .name = "QSET_QUEUE_CTRL:WRRB_HPTR TAB" },
231 { .name = "QSET_QUEUE_CTRL:QS_LINKLIST TAB" },
232 { .name = "QSET_QUEUE_CTRL:SPA_TPTR TAB" },
233 { .name = "QSET_QUEUE_CTRL:SPB_TPTR TAB" },
234 { .name = "QSET_QUEUE_CTRL:WRRA_TPTR TAB" },
235 { .name = "QSET_QUEUE_CTRL:WRRB_TPTR TAB" },
236 { .name = "QSET_QUEUE_CTRL:QS_DEFICITCNT TAB" },
237 },
238 {
239 { .name = "ROCE_QUEUE_CTRL:QS_LEN TAB" },
240 { .name = "ROCE_QUEUE_CTRL:QS_TPTR TAB" },
241 { .name = "ROCE_QUEUE_CTRL:QS_HPTR TAB" },
242 { .name = "ROCE_QUEUE_CTRL:QLINKLIST TAB" },
243 { .name = "ROCE_QUEUE_CTRL:QCLEN TAB" },
244 },
245 {
246 { .name = "NIC_QUEUE_CTRL:QS_LEN TAB" },
247 { .name = "NIC_QUEUE_CTRL:QS_TPTR TAB" },
248 { .name = "NIC_QUEUE_CTRL:QS_HPTR TAB" },
249 { .name = "NIC_QUEUE_CTRL:QLINKLIST TAB" },
250 { .name = "NIC_QUEUE_CTRL:QCLEN TAB" },
251 },
252 {
253 { .name = "RAM_CFG_CTRL:CSHAP TAB" },
254 { .name = "RAM_CFG_CTRL:PSHAP TAB" },
255 },
256 {
257 { .name = "SHAPER_CTRL:PSHAP TAB" },
258 },
259 {
260 { .name = "MSCH_CTRL" },
261 },
262 {
263 { .name = "TOP_CTRL" },
264 },
265 };
266
267 static const struct hclge_hw_error hclge_tm_sch_err_int[] = {
268 { .int_msk = BIT(0), .msg = "tm_sch_ecc_1bit_err" },
269 { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err" },
270 { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_full_err" },
271 { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_empty_err" },
272 { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_full_err" },
273 { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_empty_err" },
274 { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_full_err" },
275 { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_empty_err" },
276 { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_full_err" },
277 { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_empty_err" },
278 { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_full_err" },
279 { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_empty_err" },
280 { .int_msk = BIT(12),
281 .msg = "tm_sch_port_shap_offset_fifo_wr_full_err" },
282 { .int_msk = BIT(13),
283 .msg = "tm_sch_port_shap_offset_fifo_rd_empty_err" },
284 { .int_msk = BIT(14),
285 .msg = "tm_sch_pg_pshap_offset_fifo_wr_full_err" },
286 { .int_msk = BIT(15),
287 .msg = "tm_sch_pg_pshap_offset_fifo_rd_empty_err" },
288 { .int_msk = BIT(16),
289 .msg = "tm_sch_pg_cshap_offset_fifo_wr_full_err" },
290 { .int_msk = BIT(17),
291 .msg = "tm_sch_pg_cshap_offset_fifo_rd_empty_err" },
292 { .int_msk = BIT(18),
293 .msg = "tm_sch_pri_pshap_offset_fifo_wr_full_err" },
294 { .int_msk = BIT(19),
295 .msg = "tm_sch_pri_pshap_offset_fifo_rd_empty_err" },
296 { .int_msk = BIT(20),
297 .msg = "tm_sch_pri_cshap_offset_fifo_wr_full_err" },
298 { .int_msk = BIT(21),
299 .msg = "tm_sch_pri_cshap_offset_fifo_rd_empty_err" },
300 { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_full_err" },
301 { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_empty_err" },
302 { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_full_err" },
303 { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_empty_err" },
304 { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_full_err" },
305 { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_empty_err" },
306 { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_full_err" },
307 { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_empty_err" },
308 { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_full_err" },
309 { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_empty_err" },
310 { /* sentinel */ }
311 };
312
313 static const struct hclge_hw_error hclge_qcn_ecc_err_int[] = {
314 { .int_msk = BIT(0), .msg = "qcn_byte_mem_ecc_1bit_err" },
315 { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err" },
316 { .int_msk = BIT(2), .msg = "qcn_time_mem_ecc_1bit_err" },
317 { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err" },
318 { .int_msk = BIT(4), .msg = "qcn_fb_mem_ecc_1bit_err" },
319 { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err" },
320 { .int_msk = BIT(6), .msg = "qcn_link_mem_ecc_1bit_err" },
321 { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err" },
322 { .int_msk = BIT(8), .msg = "qcn_rate_mem_ecc_1bit_err" },
323 { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err" },
324 { .int_msk = BIT(10), .msg = "qcn_tmplt_mem_ecc_1bit_err" },
325 { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err" },
326 { .int_msk = BIT(12), .msg = "qcn_shap_cfg_mem_ecc_1bit_err" },
327 { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err" },
328 { .int_msk = BIT(14), .msg = "qcn_gp0_barrel_mem_ecc_1bit_err" },
329 { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err" },
330 { .int_msk = BIT(16), .msg = "qcn_gp1_barrel_mem_ecc_1bit_err" },
331 { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err" },
332 { .int_msk = BIT(18), .msg = "qcn_gp2_barrel_mem_ecc_1bit_err" },
333 { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err" },
334 { .int_msk = BIT(20), .msg = "qcn_gp3_barral_mem_ecc_1bit_err" },
335 { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err" },
336 { /* sentinel */ }
337 };
338
339 static void hclge_log_error(struct device *dev,
340 const struct hclge_hw_error *err_list,
341 u32 err_sts)
342 {
343 const struct hclge_hw_error *err;
344 int i = 0;
345
346 while (err_list[i].msg) {
347 err = &err_list[i];
348 if (!(err->int_msk & err_sts)) {
349 i++;
350 continue;
351 }
352 dev_warn(dev, "%s [error status=0x%x] found\n",
353 err->msg, err_sts);
354 i++;
355 }
356 }
357
358 /* hclge_cmd_query_error: read the error information
359 * @hdev: pointer to struct hclge_dev
360 * @desc: descriptor for describing the command
361 * @cmd: command opcode
362 * @flag: flag for extended command structure
363 * @w_num: offset for setting the read interrupt type.
364 * @int_type: select which type of the interrupt for which the error
365 * info will be read(RAS-CE/RAS-NFE/RAS-FE etc).
366 *
367 * This function query the error info from hw register/s using command
368 */
369 static int hclge_cmd_query_error(struct hclge_dev *hdev,
370 struct hclge_desc *desc, u32 cmd,
371 u16 flag, u8 w_num,
372 enum hclge_err_int_type int_type)
373 {
374 struct device *dev = &hdev->pdev->dev;
375 int num = 1;
376 int ret;
377
378 hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
379 if (flag) {
380 desc[0].flag |= cpu_to_le16(flag);
381 hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
382 num = 2;
383 }
384 if (w_num)
385 desc[0].data[w_num] = cpu_to_le32(int_type);
386
387 ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
388 if (ret)
389 dev_err(dev, "query error cmd failed (%d)\n", ret);
390
391 return ret;
392 }
393
394 /* hclge_cmd_clear_error: clear the error status
395 * @hdev: pointer to struct hclge_dev
396 * @desc: descriptor for describing the command
397 * @desc_src: prefilled descriptor from the previous command for reusing
398 * @cmd: command opcode
399 * @flag: flag for extended command structure
400 *
401 * This function clear the error status in the hw register/s using command
402 */
403 static int hclge_cmd_clear_error(struct hclge_dev *hdev,
404 struct hclge_desc *desc,
405 struct hclge_desc *desc_src,
406 u32 cmd, u16 flag)
407 {
408 struct device *dev = &hdev->pdev->dev;
409 int num = 1;
410 int ret, i;
411
412 if (cmd) {
413 hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
414 if (flag) {
415 desc[0].flag |= cpu_to_le16(flag);
416 hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
417 num = 2;
418 }
419 if (desc_src) {
420 for (i = 0; i < 6; i++) {
421 desc[0].data[i] = desc_src[0].data[i];
422 if (flag)
423 desc[1].data[i] = desc_src[1].data[i];
424 }
425 }
426 } else {
427 hclge_cmd_reuse_desc(&desc[0], false);
428 if (flag) {
429 desc[0].flag |= cpu_to_le16(flag);
430 hclge_cmd_reuse_desc(&desc[1], false);
431 num = 2;
432 }
433 }
434 ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
435 if (ret)
436 dev_err(dev, "clear error cmd failed (%d)\n", ret);
437
438 return ret;
439 }
440
441 static int hclge_enable_common_error(struct hclge_dev *hdev, bool en)
442 {
443 struct device *dev = &hdev->pdev->dev;
444 struct hclge_desc desc[2];
445 int ret;
446
447 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false);
448 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
449 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false);
450
451 if (en) {
452 /* enable COMMON error interrupts */
453 desc[0].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN);
454 desc[0].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN |
455 HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN);
456 desc[0].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN);
457 desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN);
458 desc[0].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN);
459 } else {
460 /* disable COMMON error interrupts */
461 desc[0].data[0] = 0;
462 desc[0].data[2] = 0;
463 desc[0].data[3] = 0;
464 desc[0].data[4] = 0;
465 desc[0].data[5] = 0;
466 }
467 desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK);
468 desc[1].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK |
469 HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK);
470 desc[1].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK);
471 desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK);
472 desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK);
473
474 ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
475 if (ret)
476 dev_err(dev,
477 "failed(%d) to enable/disable COMMON err interrupts\n",
478 ret);
479
480 return ret;
481 }
482
483 static int hclge_enable_ncsi_error(struct hclge_dev *hdev, bool en)
484 {
485 struct device *dev = &hdev->pdev->dev;
486 struct hclge_desc desc;
487 int ret;
488
489 if (hdev->pdev->revision < 0x21)
490 return 0;
491
492 /* enable/disable NCSI error interrupts */
493 hclge_cmd_setup_basic_desc(&desc, HCLGE_NCSI_INT_EN, false);
494 if (en)
495 desc.data[0] = cpu_to_le32(HCLGE_NCSI_ERR_INT_EN);
496 else
497 desc.data[0] = 0;
498
499 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
500 if (ret)
501 dev_err(dev,
502 "failed(%d) to enable/disable NCSI error interrupts\n",
503 ret);
504
505 return ret;
506 }
507
508 static int hclge_enable_igu_egu_error(struct hclge_dev *hdev, bool en)
509 {
510 struct device *dev = &hdev->pdev->dev;
511 struct hclge_desc desc;
512 int ret;
513
514 /* enable/disable error interrupts */
515 hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false);
516 if (en)
517 desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
518 else
519 desc.data[0] = 0;
520 desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK);
521
522 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
523 if (ret) {
524 dev_err(dev,
525 "failed(%d) to enable/disable IGU common interrupts\n",
526 ret);
527 return ret;
528 }
529
530 hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_EGU_TNL_INT_EN, false);
531 if (en)
532 desc.data[0] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN);
533 else
534 desc.data[0] = 0;
535 desc.data[1] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN_MASK);
536
537 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
538 if (ret) {
539 dev_err(dev,
540 "failed(%d) to enable/disable IGU-EGU TNL interrupts\n",
541 ret);
542 return ret;
543 }
544
545 ret = hclge_enable_ncsi_error(hdev, en);
546 if (ret)
547 dev_err(dev, "fail(%d) to en/disable err int\n", ret);
548
549 return ret;
550 }
551
552 static int hclge_enable_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
553 bool en)
554 {
555 struct device *dev = &hdev->pdev->dev;
556 struct hclge_desc desc[2];
557 int ret;
558
559 /* enable/disable PPP error interrupts */
560 hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
561 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
562 hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
563
564 if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
565 if (en) {
566 desc[0].data[0] =
567 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN);
568 desc[0].data[1] =
569 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN);
570 } else {
571 desc[0].data[0] = 0;
572 desc[0].data[1] = 0;
573 }
574 desc[1].data[0] =
575 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK);
576 desc[1].data[1] =
577 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK);
578 } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
579 if (en) {
580 desc[0].data[0] =
581 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN);
582 desc[0].data[1] =
583 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN);
584 } else {
585 desc[0].data[0] = 0;
586 desc[0].data[1] = 0;
587 }
588 desc[1].data[0] =
589 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK);
590 desc[1].data[1] =
591 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK);
592 }
593
594 ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
595 if (ret)
596 dev_err(dev,
597 "failed(%d) to enable/disable PPP error interrupts\n",
598 ret);
599
600 return ret;
601 }
602
603 static int hclge_enable_ppp_error(struct hclge_dev *hdev, bool en)
604 {
605 struct device *dev = &hdev->pdev->dev;
606 int ret;
607
608 ret = hclge_enable_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD,
609 en);
610 if (ret) {
611 dev_err(dev,
612 "failed(%d) to enable/disable PPP error intr 0,1\n",
613 ret);
614 return ret;
615 }
616
617 ret = hclge_enable_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD,
618 en);
619 if (ret)
620 dev_err(dev,
621 "failed(%d) to enable/disable PPP error intr 2,3\n",
622 ret);
623
624 return ret;
625 }
626
627 int hclge_enable_tm_hw_error(struct hclge_dev *hdev, bool en)
628 {
629 struct device *dev = &hdev->pdev->dev;
630 struct hclge_desc desc;
631 int ret;
632
633 /* enable TM SCH hw errors */
634 hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_SCH_ECC_INT_EN, false);
635 if (en)
636 desc.data[0] = cpu_to_le32(HCLGE_TM_SCH_ECC_ERR_INT_EN);
637 else
638 desc.data[0] = 0;
639
640 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
641 if (ret) {
642 dev_err(dev, "failed(%d) to configure TM SCH errors\n", ret);
643 return ret;
644 }
645
646 /* enable TM QCN hw errors */
647 ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG,
648 0, 0, 0);
649 if (ret) {
650 dev_err(dev, "failed(%d) to read TM QCN CFG status\n", ret);
651 return ret;
652 }
653
654 hclge_cmd_reuse_desc(&desc, false);
655 if (en)
656 desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
657 else
658 desc.data[1] = 0;
659
660 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
661 if (ret)
662 dev_err(dev,
663 "failed(%d) to configure TM QCN mem errors\n", ret);
664
665 return ret;
666 }
667
668 static void hclge_process_common_error(struct hclge_dev *hdev,
669 enum hclge_err_int_type type)
670 {
671 struct device *dev = &hdev->pdev->dev;
672 struct hclge_desc desc[2];
673 u32 err_sts;
674 int ret;
675
676 /* read err sts */
677 ret = hclge_cmd_query_error(hdev, &desc[0],
678 HCLGE_COMMON_ECC_INT_CFG,
679 HCLGE_CMD_FLAG_NEXT, 0, 0);
680 if (ret) {
681 dev_err(dev,
682 "failed(=%d) to query COMMON error interrupt status\n",
683 ret);
684 return;
685 }
686
687 /* log err */
688 err_sts = (le32_to_cpu(desc[0].data[0])) & HCLGE_IMP_TCM_ECC_INT_MASK;
689 hclge_log_error(dev, &hclge_imp_tcm_ecc_int[0], err_sts);
690
691 err_sts = (le32_to_cpu(desc[0].data[1])) & HCLGE_CMDQ_ECC_INT_MASK;
692 hclge_log_error(dev, &hclge_cmdq_nic_mem_ecc_int[0], err_sts);
693
694 err_sts = (le32_to_cpu(desc[0].data[1]) >> HCLGE_CMDQ_ROC_ECC_INT_SHIFT)
695 & HCLGE_CMDQ_ECC_INT_MASK;
696 hclge_log_error(dev, &hclge_cmdq_rocee_mem_ecc_int[0], err_sts);
697
698 if ((le32_to_cpu(desc[0].data[3])) & BIT(0))
699 dev_warn(dev, "imp_rd_data_poison_err found\n");
700
701 err_sts = (le32_to_cpu(desc[0].data[3]) >> HCLGE_TQP_ECC_INT_SHIFT) &
702 HCLGE_TQP_ECC_INT_MASK;
703 hclge_log_error(dev, &hclge_tqp_int_ecc_int[0], err_sts);
704
705 err_sts = (le32_to_cpu(desc[0].data[5])) &
706 HCLGE_IMP_ITCM4_ECC_INT_MASK;
707 hclge_log_error(dev, &hclge_imp_itcm4_ecc_int[0], err_sts);
708
709 /* clear error interrupts */
710 desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_CLR_MASK);
711 desc[1].data[1] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_CLR_MASK |
712 HCLGE_CMDQ_ROCEE_ECC_CLR_MASK);
713 desc[1].data[3] = cpu_to_le32(HCLGE_TQP_IMP_ERR_CLR_MASK);
714 desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_CLR_MASK);
715
716 ret = hclge_cmd_clear_error(hdev, &desc[0], NULL, 0,
717 HCLGE_CMD_FLAG_NEXT);
718 if (ret)
719 dev_err(dev,
720 "failed(%d) to clear COMMON error interrupt status\n",
721 ret);
722 }
723
724 static void hclge_process_ncsi_error(struct hclge_dev *hdev,
725 enum hclge_err_int_type type)
726 {
727 struct device *dev = &hdev->pdev->dev;
728 struct hclge_desc desc_rd;
729 struct hclge_desc desc_wr;
730 u32 err_sts;
731 int ret;
732
733 if (hdev->pdev->revision < 0x21)
734 return;
735
736 /* read NCSI error status */
737 ret = hclge_cmd_query_error(hdev, &desc_rd, HCLGE_NCSI_INT_QUERY,
738 0, 1, HCLGE_NCSI_ERR_INT_TYPE);
739 if (ret) {
740 dev_err(dev,
741 "failed(=%d) to query NCSI error interrupt status\n",
742 ret);
743 return;
744 }
745
746 /* log err */
747 err_sts = le32_to_cpu(desc_rd.data[0]);
748 hclge_log_error(dev, &hclge_ncsi_err_int[0], err_sts);
749
750 /* clear err int */
751 ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
752 HCLGE_NCSI_INT_CLR, 0);
753 if (ret)
754 dev_err(dev, "failed(=%d) to clear NCSI intrerrupt status\n",
755 ret);
756 }
757
758 static void hclge_process_igu_egu_error(struct hclge_dev *hdev,
759 enum hclge_err_int_type int_type)
760 {
761 struct device *dev = &hdev->pdev->dev;
762 struct hclge_desc desc_rd;
763 struct hclge_desc desc_wr;
764 u32 err_sts;
765 int ret;
766
767 /* read IGU common err sts */
768 ret = hclge_cmd_query_error(hdev, &desc_rd,
769 HCLGE_IGU_COMMON_INT_QUERY,
770 0, 1, int_type);
771 if (ret) {
772 dev_err(dev, "failed(=%d) to query IGU common int status\n",
773 ret);
774 return;
775 }
776
777 /* log err */
778 err_sts = le32_to_cpu(desc_rd.data[0]) &
779 HCLGE_IGU_COM_INT_MASK;
780 hclge_log_error(dev, &hclge_igu_com_err_int[0], err_sts);
781
782 /* clear err int */
783 ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
784 HCLGE_IGU_COMMON_INT_CLR, 0);
785 if (ret) {
786 dev_err(dev, "failed(=%d) to clear IGU common int status\n",
787 ret);
788 return;
789 }
790
791 /* read IGU-EGU TNL err sts */
792 ret = hclge_cmd_query_error(hdev, &desc_rd,
793 HCLGE_IGU_EGU_TNL_INT_QUERY,
794 0, 1, int_type);
795 if (ret) {
796 dev_err(dev, "failed(=%d) to query IGU-EGU TNL int status\n",
797 ret);
798 return;
799 }
800
801 /* log err */
802 err_sts = le32_to_cpu(desc_rd.data[0]) &
803 HCLGE_IGU_EGU_TNL_INT_MASK;
804 hclge_log_error(dev, &hclge_igu_egu_tnl_err_int[0], err_sts);
805
806 /* clear err int */
807 ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
808 HCLGE_IGU_EGU_TNL_INT_CLR, 0);
809 if (ret) {
810 dev_err(dev, "failed(=%d) to clear IGU-EGU TNL int status\n",
811 ret);
812 return;
813 }
814
815 hclge_process_ncsi_error(hdev, HCLGE_ERR_INT_RAS_NFE);
816 }
817
818 static int hclge_log_and_clear_ppp_error(struct hclge_dev *hdev, u32 cmd,
819 enum hclge_err_int_type int_type)
820 {
821 enum hnae3_reset_type reset_level = HNAE3_NONE_RESET;
822 struct device *dev = &hdev->pdev->dev;
823 const struct hclge_hw_error *hw_err_lst1, *hw_err_lst2, *hw_err_lst3;
824 struct hclge_desc desc[2];
825 u32 err_sts;
826 int ret;
827
828 /* read PPP INT sts */
829 ret = hclge_cmd_query_error(hdev, &desc[0], cmd,
830 HCLGE_CMD_FLAG_NEXT, 5, int_type);
831 if (ret) {
832 dev_err(dev, "failed(=%d) to query PPP interrupt status\n",
833 ret);
834 return -EIO;
835 }
836
837 /* log error */
838 if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
839 hw_err_lst1 = &hclge_ppp_mpf_int0[0];
840 hw_err_lst2 = &hclge_ppp_mpf_int1[0];
841 hw_err_lst3 = &hclge_ppp_pf_int[0];
842 } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
843 hw_err_lst1 = &hclge_ppp_mpf_int2[0];
844 hw_err_lst2 = &hclge_ppp_mpf_int3[0];
845 } else {
846 dev_err(dev, "invalid command(=%d)\n", cmd);
847 return -EINVAL;
848 }
849
850 err_sts = le32_to_cpu(desc[0].data[2]);
851 if (err_sts) {
852 hclge_log_error(dev, hw_err_lst1, err_sts);
853 reset_level = HNAE3_FUNC_RESET;
854 }
855
856 err_sts = le32_to_cpu(desc[0].data[3]);
857 if (err_sts) {
858 hclge_log_error(dev, hw_err_lst2, err_sts);
859 reset_level = HNAE3_FUNC_RESET;
860 }
861
862 if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
863 err_sts = (le32_to_cpu(desc[0].data[4]) >> 8) & 0x3;
864 if (err_sts) {
865 hclge_log_error(dev, hw_err_lst3, err_sts);
866 reset_level = HNAE3_FUNC_RESET;
867 }
868 }
869
870 /* clear PPP INT */
871 ret = hclge_cmd_clear_error(hdev, &desc[0], NULL, 0,
872 HCLGE_CMD_FLAG_NEXT);
873 if (ret) {
874 dev_err(dev, "failed(=%d) to clear PPP interrupt status\n",
875 ret);
876 return -EIO;
877 }
878
879 return 0;
880 }
881
882 static void hclge_process_ppp_error(struct hclge_dev *hdev,
883 enum hclge_err_int_type int_type)
884 {
885 struct device *dev = &hdev->pdev->dev;
886 int ret;
887
888 /* read PPP INT0,1 sts */
889 ret = hclge_log_and_clear_ppp_error(hdev, HCLGE_PPP_CMD0_INT_CMD,
890 int_type);
891 if (ret < 0) {
892 dev_err(dev, "failed(=%d) to clear PPP interrupt 0,1 status\n",
893 ret);
894 return;
895 }
896
897 /* read err PPP INT2,3 sts */
898 ret = hclge_log_and_clear_ppp_error(hdev, HCLGE_PPP_CMD1_INT_CMD,
899 int_type);
900 if (ret < 0)
901 dev_err(dev, "failed(=%d) to clear PPP interrupt 2,3 status\n",
902 ret);
903 }
904
905 static void hclge_process_tm_sch_error(struct hclge_dev *hdev)
906 {
907 struct device *dev = &hdev->pdev->dev;
908 const struct hclge_tm_sch_ecc_info *tm_sch_ecc_info;
909 struct hclge_desc desc;
910 u32 ecc_info;
911 u8 module_no;
912 u8 ram_no;
913 int ret;
914
915 /* read TM scheduler errors */
916 ret = hclge_cmd_query_error(hdev, &desc,
917 HCLGE_TM_SCH_MBIT_ECC_INFO_CMD, 0, 0, 0);
918 if (ret) {
919 dev_err(dev, "failed(%d) to read SCH mbit ECC err info\n", ret);
920 return;
921 }
922 ecc_info = le32_to_cpu(desc.data[0]);
923
924 ret = hclge_cmd_query_error(hdev, &desc,
925 HCLGE_TM_SCH_ECC_ERR_RINT_CMD, 0, 0, 0);
926 if (ret) {
927 dev_err(dev, "failed(%d) to read SCH ECC err status\n", ret);
928 return;
929 }
930
931 /* log TM scheduler errors */
932 if (le32_to_cpu(desc.data[0])) {
933 hclge_log_error(dev, &hclge_tm_sch_err_int[0],
934 le32_to_cpu(desc.data[0]));
935 if (le32_to_cpu(desc.data[0]) & 0x2) {
936 module_no = (ecc_info >> 20) & 0xF;
937 ram_no = (ecc_info >> 16) & 0xF;
938 tm_sch_ecc_info =
939 &hclge_tm_sch_ecc_err[module_no][ram_no];
940 dev_warn(dev, "ecc err module:ram=%s\n",
941 tm_sch_ecc_info->name);
942 dev_warn(dev, "ecc memory address = 0x%x\n",
943 ecc_info & 0xFFFF);
944 }
945 }
946
947 /* clear TM scheduler errors */
948 ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
949 if (ret) {
950 dev_err(dev, "failed(%d) to clear TM SCH error status\n", ret);
951 return;
952 }
953
954 ret = hclge_cmd_query_error(hdev, &desc,
955 HCLGE_TM_SCH_ECC_ERR_RINT_CE, 0, 0, 0);
956 if (ret) {
957 dev_err(dev, "failed(%d) to read SCH CE status\n", ret);
958 return;
959 }
960
961 ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
962 if (ret) {
963 dev_err(dev, "failed(%d) to clear TM SCH CE status\n", ret);
964 return;
965 }
966
967 ret = hclge_cmd_query_error(hdev, &desc,
968 HCLGE_TM_SCH_ECC_ERR_RINT_NFE, 0, 0, 0);
969 if (ret) {
970 dev_err(dev, "failed(%d) to read SCH NFE status\n", ret);
971 return;
972 }
973
974 ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
975 if (ret) {
976 dev_err(dev, "failed(%d) to clear TM SCH NFE status\n", ret);
977 return;
978 }
979
980 ret = hclge_cmd_query_error(hdev, &desc,
981 HCLGE_TM_SCH_ECC_ERR_RINT_FE, 0, 0, 0);
982 if (ret) {
983 dev_err(dev, "failed(%d) to read SCH FE status\n", ret);
984 return;
985 }
986
987 ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
988 if (ret)
989 dev_err(dev, "failed(%d) to clear TM SCH FE status\n", ret);
990 }
991
992 static void hclge_process_tm_qcn_error(struct hclge_dev *hdev)
993 {
994 struct device *dev = &hdev->pdev->dev;
995 struct hclge_desc desc;
996 int ret;
997
998 /* read QCN errors */
999 ret = hclge_cmd_query_error(hdev, &desc,
1000 HCLGE_TM_QCN_MEM_INT_INFO_CMD, 0, 0, 0);
1001 if (ret) {
1002 dev_err(dev, "failed(%d) to read QCN ECC err status\n", ret);
1003 return;
1004 }
1005
1006 /* log QCN errors */
1007 if (le32_to_cpu(desc.data[0]))
1008 hclge_log_error(dev, &hclge_qcn_ecc_err_int[0],
1009 le32_to_cpu(desc.data[0]));
1010
1011 /* clear QCN errors */
1012 ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
1013 if (ret)
1014 dev_err(dev, "failed(%d) to clear QCN error status\n", ret);
1015 }
1016
1017 static void hclge_process_tm_error(struct hclge_dev *hdev,
1018 enum hclge_err_int_type type)
1019 {
1020 hclge_process_tm_sch_error(hdev);
1021 hclge_process_tm_qcn_error(hdev);
1022 }
1023
1024 static const struct hclge_hw_blk hw_blk[] = {
1025 { .msk = BIT(0), .name = "IGU_EGU",
1026 .enable_error = hclge_enable_igu_egu_error,
1027 .process_error = hclge_process_igu_egu_error, },
1028 { .msk = BIT(5), .name = "COMMON",
1029 .enable_error = hclge_enable_common_error,
1030 .process_error = hclge_process_common_error, },
1031 { .msk = BIT(4), .name = "TM",
1032 .enable_error = hclge_enable_tm_hw_error,
1033 .process_error = hclge_process_tm_error, },
1034 { .msk = BIT(1), .name = "PPP",
1035 .enable_error = hclge_enable_ppp_error,
1036 .process_error = hclge_process_ppp_error, },
1037 { /* sentinel */ }
1038 };
1039
1040 int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state)
1041 {
1042 struct device *dev = &hdev->pdev->dev;
1043 int ret = 0;
1044 int i = 0;
1045
1046 while (hw_blk[i].name) {
1047 if (!hw_blk[i].enable_error) {
1048 i++;
1049 continue;
1050 }
1051 ret = hw_blk[i].enable_error(hdev, state);
1052 if (ret) {
1053 dev_err(dev, "fail(%d) to en/disable err int\n", ret);
1054 return ret;
1055 }
1056 i++;
1057 }
1058
1059 return ret;
1060 }
1061
1062 pci_ers_result_t hclge_process_ras_hw_error(struct hnae3_ae_dev *ae_dev)
1063 {
1064 struct hclge_dev *hdev = ae_dev->priv;
1065 struct device *dev = &hdev->pdev->dev;
1066 u32 sts, val;
1067 int i = 0;
1068
1069 sts = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
1070
1071 /* Processing Non-fatal errors */
1072 if (sts & HCLGE_RAS_REG_NFE_MASK) {
1073 val = (sts >> HCLGE_RAS_REG_NFE_SHIFT) & 0xFF;
1074 i = 0;
1075 while (hw_blk[i].name) {
1076 if (!(hw_blk[i].msk & val)) {
1077 i++;
1078 continue;
1079 }
1080 dev_warn(dev, "%s ras non-fatal error identified\n",
1081 hw_blk[i].name);
1082 if (hw_blk[i].process_error)
1083 hw_blk[i].process_error(hdev,
1084 HCLGE_ERR_INT_RAS_NFE);
1085 i++;
1086 }
1087 }
1088
1089 return PCI_ERS_RESULT_NEED_RESET;
1090 }