2 * Copyright (c) 2016-2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/acpi.h>
11 #include <linux/device.h>
12 #include <linux/etherdevice.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/pci.h>
19 #include <linux/platform_device.h>
21 #include "hclge_cmd.h"
22 #include "hclge_dcb.h"
23 #include "hclge_main.h"
24 #include "hclge_mdio.h"
28 #define HCLGE_NAME "hclge"
29 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
30 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
31 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
32 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
34 static int hclge_set_mta_filter_mode(struct hclge_dev
*hdev
,
35 enum hclge_mta_dmac_sel_type mta_mac_sel
,
37 static int hclge_init_vlan_config(struct hclge_dev
*hdev
);
38 static int hclge_reset_ae_dev(struct hnae3_ae_dev
*ae_dev
);
40 static struct hnae3_ae_algo ae_algo
;
42 static const struct pci_device_id ae_algo_pci_tbl
[] = {
43 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
44 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
45 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
), 0},
46 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
), 0},
47 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
), 0},
48 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
), 0},
49 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
), 0},
50 /* required last entry */
54 static const char hns3_nic_test_strs
[][ETH_GSTRING_LEN
] = {
56 "Serdes Loopback test",
60 static const struct hclge_comm_stats_str g_all_64bit_stats_string
[] = {
61 {"igu_rx_oversize_pkt",
62 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt
)},
63 {"igu_rx_undersize_pkt",
64 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt
)},
65 {"igu_rx_out_all_pkt",
66 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt
)},
68 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt
)},
70 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt
)},
72 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt
)},
73 {"egu_tx_out_all_pkt",
74 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt
)},
76 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt
)},
78 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt
)},
80 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt
)},
81 {"ssu_ppp_mac_key_num",
82 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num
)},
83 {"ssu_ppp_host_key_num",
84 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num
)},
85 {"ppp_ssu_mac_rlt_num",
86 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num
)},
87 {"ppp_ssu_host_rlt_num",
88 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num
)},
90 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num
)},
92 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num
)},
94 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num
)},
96 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num
)}
99 static const struct hclge_comm_stats_str g_all_32bit_stats_string
[] = {
101 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt
)},
102 {"igu_rx_no_eof_pkt",
103 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt
)},
104 {"igu_rx_no_sof_pkt",
105 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt
)},
107 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt
)},
108 {"ssu_full_drop_num",
109 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num
)},
110 {"ssu_part_drop_num",
111 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num
)},
113 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num
)},
115 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num
)},
117 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num
)},
119 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt
)},
121 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt
)},
123 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt
)},
124 {"qcn_fb_invaild_cnt",
125 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt
)},
126 {"rx_packet_tc0_in_cnt",
127 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt
)},
128 {"rx_packet_tc1_in_cnt",
129 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt
)},
130 {"rx_packet_tc2_in_cnt",
131 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt
)},
132 {"rx_packet_tc3_in_cnt",
133 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt
)},
134 {"rx_packet_tc4_in_cnt",
135 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt
)},
136 {"rx_packet_tc5_in_cnt",
137 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt
)},
138 {"rx_packet_tc6_in_cnt",
139 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt
)},
140 {"rx_packet_tc7_in_cnt",
141 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt
)},
142 {"rx_packet_tc0_out_cnt",
143 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt
)},
144 {"rx_packet_tc1_out_cnt",
145 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt
)},
146 {"rx_packet_tc2_out_cnt",
147 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt
)},
148 {"rx_packet_tc3_out_cnt",
149 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt
)},
150 {"rx_packet_tc4_out_cnt",
151 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt
)},
152 {"rx_packet_tc5_out_cnt",
153 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt
)},
154 {"rx_packet_tc6_out_cnt",
155 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt
)},
156 {"rx_packet_tc7_out_cnt",
157 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt
)},
158 {"tx_packet_tc0_in_cnt",
159 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt
)},
160 {"tx_packet_tc1_in_cnt",
161 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt
)},
162 {"tx_packet_tc2_in_cnt",
163 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt
)},
164 {"tx_packet_tc3_in_cnt",
165 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt
)},
166 {"tx_packet_tc4_in_cnt",
167 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt
)},
168 {"tx_packet_tc5_in_cnt",
169 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt
)},
170 {"tx_packet_tc6_in_cnt",
171 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt
)},
172 {"tx_packet_tc7_in_cnt",
173 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt
)},
174 {"tx_packet_tc0_out_cnt",
175 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt
)},
176 {"tx_packet_tc1_out_cnt",
177 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt
)},
178 {"tx_packet_tc2_out_cnt",
179 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt
)},
180 {"tx_packet_tc3_out_cnt",
181 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt
)},
182 {"tx_packet_tc4_out_cnt",
183 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt
)},
184 {"tx_packet_tc5_out_cnt",
185 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt
)},
186 {"tx_packet_tc6_out_cnt",
187 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt
)},
188 {"tx_packet_tc7_out_cnt",
189 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt
)},
190 {"pkt_curr_buf_tc0_cnt",
191 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt
)},
192 {"pkt_curr_buf_tc1_cnt",
193 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt
)},
194 {"pkt_curr_buf_tc2_cnt",
195 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt
)},
196 {"pkt_curr_buf_tc3_cnt",
197 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt
)},
198 {"pkt_curr_buf_tc4_cnt",
199 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt
)},
200 {"pkt_curr_buf_tc5_cnt",
201 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt
)},
202 {"pkt_curr_buf_tc6_cnt",
203 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt
)},
204 {"pkt_curr_buf_tc7_cnt",
205 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt
)},
207 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num
)},
208 {"lo_pri_unicast_rlt_drop_num",
209 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num
)},
210 {"hi_pri_multicast_rlt_drop_num",
211 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num
)},
212 {"lo_pri_multicast_rlt_drop_num",
213 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num
)},
214 {"rx_oq_drop_pkt_cnt",
215 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt
)},
216 {"tx_oq_drop_pkt_cnt",
217 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt
)},
218 {"nic_l2_err_drop_pkt_cnt",
219 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt
)},
220 {"roc_l2_err_drop_pkt_cnt",
221 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt
)}
224 static const struct hclge_comm_stats_str g_mac_stats_string
[] = {
225 {"mac_tx_mac_pause_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num
)},
227 {"mac_rx_mac_pause_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num
)},
229 {"mac_tx_pfc_pri0_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num
)},
231 {"mac_tx_pfc_pri1_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num
)},
233 {"mac_tx_pfc_pri2_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num
)},
235 {"mac_tx_pfc_pri3_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num
)},
237 {"mac_tx_pfc_pri4_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num
)},
239 {"mac_tx_pfc_pri5_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num
)},
241 {"mac_tx_pfc_pri6_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num
)},
243 {"mac_tx_pfc_pri7_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num
)},
245 {"mac_rx_pfc_pri0_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num
)},
247 {"mac_rx_pfc_pri1_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num
)},
249 {"mac_rx_pfc_pri2_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num
)},
251 {"mac_rx_pfc_pri3_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num
)},
253 {"mac_rx_pfc_pri4_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num
)},
255 {"mac_rx_pfc_pri5_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num
)},
257 {"mac_rx_pfc_pri6_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num
)},
259 {"mac_rx_pfc_pri7_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num
)},
261 {"mac_tx_total_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num
)},
263 {"mac_tx_total_oct_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num
)},
265 {"mac_tx_good_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num
)},
267 {"mac_tx_bad_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num
)},
269 {"mac_tx_good_oct_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num
)},
271 {"mac_tx_bad_oct_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num
)},
273 {"mac_tx_uni_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num
)},
275 {"mac_tx_multi_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num
)},
277 {"mac_tx_broad_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num
)},
279 {"mac_tx_undersize_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num
)},
281 {"mac_tx_overrsize_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num
)},
283 {"mac_tx_64_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num
)},
285 {"mac_tx_65_127_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num
)},
287 {"mac_tx_128_255_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num
)},
289 {"mac_tx_256_511_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num
)},
291 {"mac_tx_512_1023_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num
)},
293 {"mac_tx_1024_1518_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num
)},
295 {"mac_tx_1519_max_oct_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num
)},
297 {"mac_rx_total_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num
)},
299 {"mac_rx_total_oct_num",
300 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num
)},
301 {"mac_rx_good_pkt_num",
302 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num
)},
303 {"mac_rx_bad_pkt_num",
304 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num
)},
305 {"mac_rx_good_oct_num",
306 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num
)},
307 {"mac_rx_bad_oct_num",
308 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num
)},
309 {"mac_rx_uni_pkt_num",
310 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num
)},
311 {"mac_rx_multi_pkt_num",
312 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num
)},
313 {"mac_rx_broad_pkt_num",
314 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num
)},
315 {"mac_rx_undersize_pkt_num",
316 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num
)},
317 {"mac_rx_overrsize_pkt_num",
318 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num
)},
319 {"mac_rx_64_oct_pkt_num",
320 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num
)},
321 {"mac_rx_65_127_oct_pkt_num",
322 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num
)},
323 {"mac_rx_128_255_oct_pkt_num",
324 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num
)},
325 {"mac_rx_256_511_oct_pkt_num",
326 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num
)},
327 {"mac_rx_512_1023_oct_pkt_num",
328 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num
)},
329 {"mac_rx_1024_1518_oct_pkt_num",
330 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num
)},
331 {"mac_rx_1519_max_oct_pkt_num",
332 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num
)},
334 {"mac_trans_fragment_pkt_num",
335 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num
)},
336 {"mac_trans_undermin_pkt_num",
337 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num
)},
338 {"mac_trans_jabber_pkt_num",
339 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num
)},
340 {"mac_trans_err_all_pkt_num",
341 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num
)},
342 {"mac_trans_from_app_good_pkt_num",
343 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num
)},
344 {"mac_trans_from_app_bad_pkt_num",
345 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num
)},
346 {"mac_rcv_fragment_pkt_num",
347 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num
)},
348 {"mac_rcv_undermin_pkt_num",
349 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num
)},
350 {"mac_rcv_jabber_pkt_num",
351 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num
)},
352 {"mac_rcv_fcs_err_pkt_num",
353 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num
)},
354 {"mac_rcv_send_app_good_pkt_num",
355 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num
)},
356 {"mac_rcv_send_app_bad_pkt_num",
357 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num
)}
360 static int hclge_64_bit_update_stats(struct hclge_dev
*hdev
)
362 #define HCLGE_64_BIT_CMD_NUM 5
363 #define HCLGE_64_BIT_RTN_DATANUM 4
364 u64
*data
= (u64
*)(&hdev
->hw_stats
.all_64_bit_stats
);
365 struct hclge_desc desc
[HCLGE_64_BIT_CMD_NUM
];
370 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_64_BIT
, true);
371 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_64_BIT_CMD_NUM
);
373 dev_err(&hdev
->pdev
->dev
,
374 "Get 64 bit pkt stats fail, status = %d.\n", ret
);
378 for (i
= 0; i
< HCLGE_64_BIT_CMD_NUM
; i
++) {
379 if (unlikely(i
== 0)) {
380 desc_data
= (__le64
*)(&desc
[i
].data
[0]);
381 n
= HCLGE_64_BIT_RTN_DATANUM
- 1;
383 desc_data
= (__le64
*)(&desc
[i
]);
384 n
= HCLGE_64_BIT_RTN_DATANUM
;
386 for (k
= 0; k
< n
; k
++) {
387 *data
++ += le64_to_cpu(*desc_data
);
395 static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats
*stats
)
397 stats
->pkt_curr_buf_cnt
= 0;
398 stats
->pkt_curr_buf_tc0_cnt
= 0;
399 stats
->pkt_curr_buf_tc1_cnt
= 0;
400 stats
->pkt_curr_buf_tc2_cnt
= 0;
401 stats
->pkt_curr_buf_tc3_cnt
= 0;
402 stats
->pkt_curr_buf_tc4_cnt
= 0;
403 stats
->pkt_curr_buf_tc5_cnt
= 0;
404 stats
->pkt_curr_buf_tc6_cnt
= 0;
405 stats
->pkt_curr_buf_tc7_cnt
= 0;
408 static int hclge_32_bit_update_stats(struct hclge_dev
*hdev
)
410 #define HCLGE_32_BIT_CMD_NUM 8
411 #define HCLGE_32_BIT_RTN_DATANUM 8
413 struct hclge_desc desc
[HCLGE_32_BIT_CMD_NUM
];
414 struct hclge_32_bit_stats
*all_32_bit_stats
;
420 all_32_bit_stats
= &hdev
->hw_stats
.all_32_bit_stats
;
421 data
= (u64
*)(&all_32_bit_stats
->egu_tx_1588_pkt
);
423 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_32_BIT
, true);
424 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_32_BIT_CMD_NUM
);
426 dev_err(&hdev
->pdev
->dev
,
427 "Get 32 bit pkt stats fail, status = %d.\n", ret
);
432 hclge_reset_partial_32bit_counter(all_32_bit_stats
);
433 for (i
= 0; i
< HCLGE_32_BIT_CMD_NUM
; i
++) {
434 if (unlikely(i
== 0)) {
435 __le16
*desc_data_16bit
;
437 all_32_bit_stats
->igu_rx_err_pkt
+=
438 le32_to_cpu(desc
[i
].data
[0]);
440 desc_data_16bit
= (__le16
*)&desc
[i
].data
[1];
441 all_32_bit_stats
->igu_rx_no_eof_pkt
+=
442 le16_to_cpu(*desc_data_16bit
);
445 all_32_bit_stats
->igu_rx_no_sof_pkt
+=
446 le16_to_cpu(*desc_data_16bit
);
448 desc_data
= &desc
[i
].data
[2];
449 n
= HCLGE_32_BIT_RTN_DATANUM
- 4;
451 desc_data
= (__le32
*)&desc
[i
];
452 n
= HCLGE_32_BIT_RTN_DATANUM
;
454 for (k
= 0; k
< n
; k
++) {
455 *data
++ += le32_to_cpu(*desc_data
);
463 static int hclge_mac_update_stats(struct hclge_dev
*hdev
)
465 #define HCLGE_MAC_CMD_NUM 17
466 #define HCLGE_RTN_DATA_NUM 4
468 u64
*data
= (u64
*)(&hdev
->hw_stats
.mac_stats
);
469 struct hclge_desc desc
[HCLGE_MAC_CMD_NUM
];
474 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_MAC
, true);
475 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_MAC_CMD_NUM
);
477 dev_err(&hdev
->pdev
->dev
,
478 "Get MAC pkt stats fail, status = %d.\n", ret
);
483 for (i
= 0; i
< HCLGE_MAC_CMD_NUM
; i
++) {
484 if (unlikely(i
== 0)) {
485 desc_data
= (__le64
*)(&desc
[i
].data
[0]);
486 n
= HCLGE_RTN_DATA_NUM
- 2;
488 desc_data
= (__le64
*)(&desc
[i
]);
489 n
= HCLGE_RTN_DATA_NUM
;
491 for (k
= 0; k
< n
; k
++) {
492 *data
++ += le64_to_cpu(*desc_data
);
500 static int hclge_tqps_update_stats(struct hnae3_handle
*handle
)
502 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
503 struct hclge_vport
*vport
= hclge_get_vport(handle
);
504 struct hclge_dev
*hdev
= vport
->back
;
505 struct hnae3_queue
*queue
;
506 struct hclge_desc desc
[1];
507 struct hclge_tqp
*tqp
;
510 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
511 queue
= handle
->kinfo
.tqp
[i
];
512 tqp
= container_of(queue
, struct hclge_tqp
, q
);
513 /* command : HCLGE_OPC_QUERY_IGU_STAT */
514 hclge_cmd_setup_basic_desc(&desc
[0],
515 HCLGE_OPC_QUERY_RX_STATUS
,
518 desc
[0].data
[0] = cpu_to_le32((tqp
->index
& 0x1ff));
519 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
521 dev_err(&hdev
->pdev
->dev
,
522 "Query tqp stat fail, status = %d,queue = %d\n",
526 tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
+=
527 le32_to_cpu(desc
[0].data
[4]);
530 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
531 queue
= handle
->kinfo
.tqp
[i
];
532 tqp
= container_of(queue
, struct hclge_tqp
, q
);
533 /* command : HCLGE_OPC_QUERY_IGU_STAT */
534 hclge_cmd_setup_basic_desc(&desc
[0],
535 HCLGE_OPC_QUERY_TX_STATUS
,
538 desc
[0].data
[0] = cpu_to_le32((tqp
->index
& 0x1ff));
539 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
541 dev_err(&hdev
->pdev
->dev
,
542 "Query tqp stat fail, status = %d,queue = %d\n",
546 tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
+=
547 le32_to_cpu(desc
[0].data
[4]);
553 static u64
*hclge_tqps_get_stats(struct hnae3_handle
*handle
, u64
*data
)
555 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
556 struct hclge_tqp
*tqp
;
560 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
561 tqp
= container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
562 *buff
++ = tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
;
565 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
566 tqp
= container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
567 *buff
++ = tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
;
573 static int hclge_tqps_get_sset_count(struct hnae3_handle
*handle
, int stringset
)
575 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
577 return kinfo
->num_tqps
* (2);
580 static u8
*hclge_tqps_get_strings(struct hnae3_handle
*handle
, u8
*data
)
582 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
586 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
587 struct hclge_tqp
*tqp
= container_of(handle
->kinfo
.tqp
[i
],
588 struct hclge_tqp
, q
);
589 snprintf(buff
, ETH_GSTRING_LEN
, "rcb_q%d_tx_pktnum_rcd",
591 buff
= buff
+ ETH_GSTRING_LEN
;
594 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
595 struct hclge_tqp
*tqp
= container_of(kinfo
->tqp
[i
],
596 struct hclge_tqp
, q
);
597 snprintf(buff
, ETH_GSTRING_LEN
, "rcb_q%d_rx_pktnum_rcd",
599 buff
= buff
+ ETH_GSTRING_LEN
;
605 static u64
*hclge_comm_get_stats(void *comm_stats
,
606 const struct hclge_comm_stats_str strs
[],
612 for (i
= 0; i
< size
; i
++)
613 buf
[i
] = HCLGE_STATS_READ(comm_stats
, strs
[i
].offset
);
618 static u8
*hclge_comm_get_strings(u32 stringset
,
619 const struct hclge_comm_stats_str strs
[],
622 char *buff
= (char *)data
;
625 if (stringset
!= ETH_SS_STATS
)
628 for (i
= 0; i
< size
; i
++) {
629 snprintf(buff
, ETH_GSTRING_LEN
,
631 buff
= buff
+ ETH_GSTRING_LEN
;
637 static void hclge_update_netstat(struct hclge_hw_stats
*hw_stats
,
638 struct net_device_stats
*net_stats
)
640 net_stats
->tx_dropped
= 0;
641 net_stats
->rx_dropped
= hw_stats
->all_32_bit_stats
.ssu_full_drop_num
;
642 net_stats
->rx_dropped
+= hw_stats
->all_32_bit_stats
.ppp_key_drop_num
;
643 net_stats
->rx_dropped
+= hw_stats
->all_32_bit_stats
.ssu_key_drop_num
;
645 net_stats
->rx_errors
= hw_stats
->mac_stats
.mac_rx_overrsize_pkt_num
;
646 net_stats
->rx_errors
+= hw_stats
->mac_stats
.mac_rx_undersize_pkt_num
;
647 net_stats
->rx_errors
+= hw_stats
->all_32_bit_stats
.igu_rx_err_pkt
;
648 net_stats
->rx_errors
+= hw_stats
->all_32_bit_stats
.igu_rx_no_eof_pkt
;
649 net_stats
->rx_errors
+= hw_stats
->all_32_bit_stats
.igu_rx_no_sof_pkt
;
650 net_stats
->rx_errors
+= hw_stats
->mac_stats
.mac_rcv_fcs_err_pkt_num
;
652 net_stats
->multicast
= hw_stats
->mac_stats
.mac_tx_multi_pkt_num
;
653 net_stats
->multicast
+= hw_stats
->mac_stats
.mac_rx_multi_pkt_num
;
655 net_stats
->rx_crc_errors
= hw_stats
->mac_stats
.mac_rcv_fcs_err_pkt_num
;
656 net_stats
->rx_length_errors
=
657 hw_stats
->mac_stats
.mac_rx_undersize_pkt_num
;
658 net_stats
->rx_length_errors
+=
659 hw_stats
->mac_stats
.mac_rx_overrsize_pkt_num
;
660 net_stats
->rx_over_errors
=
661 hw_stats
->mac_stats
.mac_rx_overrsize_pkt_num
;
664 static void hclge_update_stats_for_all(struct hclge_dev
*hdev
)
666 struct hnae3_handle
*handle
;
669 handle
= &hdev
->vport
[0].nic
;
670 if (handle
->client
) {
671 status
= hclge_tqps_update_stats(handle
);
673 dev_err(&hdev
->pdev
->dev
,
674 "Update TQPS stats fail, status = %d.\n",
679 status
= hclge_mac_update_stats(hdev
);
681 dev_err(&hdev
->pdev
->dev
,
682 "Update MAC stats fail, status = %d.\n", status
);
684 status
= hclge_32_bit_update_stats(hdev
);
686 dev_err(&hdev
->pdev
->dev
,
687 "Update 32 bit stats fail, status = %d.\n",
690 hclge_update_netstat(&hdev
->hw_stats
, &handle
->kinfo
.netdev
->stats
);
693 static void hclge_update_stats(struct hnae3_handle
*handle
,
694 struct net_device_stats
*net_stats
)
696 struct hclge_vport
*vport
= hclge_get_vport(handle
);
697 struct hclge_dev
*hdev
= vport
->back
;
698 struct hclge_hw_stats
*hw_stats
= &hdev
->hw_stats
;
701 status
= hclge_mac_update_stats(hdev
);
703 dev_err(&hdev
->pdev
->dev
,
704 "Update MAC stats fail, status = %d.\n",
707 status
= hclge_32_bit_update_stats(hdev
);
709 dev_err(&hdev
->pdev
->dev
,
710 "Update 32 bit stats fail, status = %d.\n",
713 status
= hclge_64_bit_update_stats(hdev
);
715 dev_err(&hdev
->pdev
->dev
,
716 "Update 64 bit stats fail, status = %d.\n",
719 status
= hclge_tqps_update_stats(handle
);
721 dev_err(&hdev
->pdev
->dev
,
722 "Update TQPS stats fail, status = %d.\n",
725 hclge_update_netstat(hw_stats
, net_stats
);
728 static int hclge_get_sset_count(struct hnae3_handle
*handle
, int stringset
)
730 #define HCLGE_LOOPBACK_TEST_FLAGS 0x7
732 struct hclge_vport
*vport
= hclge_get_vport(handle
);
733 struct hclge_dev
*hdev
= vport
->back
;
736 /* Loopback test support rules:
737 * mac: only GE mode support
738 * serdes: all mac mode will support include GE/XGE/LGE/CGE
739 * phy: only support when phy device exist on board
741 if (stringset
== ETH_SS_TEST
) {
742 /* clear loopback bit flags at first */
743 handle
->flags
= (handle
->flags
& (~HCLGE_LOOPBACK_TEST_FLAGS
));
744 if (hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_10M
||
745 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_100M
||
746 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_1G
) {
748 handle
->flags
|= HNAE3_SUPPORT_MAC_LOOPBACK
;
752 } else if (stringset
== ETH_SS_STATS
) {
753 count
= ARRAY_SIZE(g_mac_stats_string
) +
754 ARRAY_SIZE(g_all_32bit_stats_string
) +
755 ARRAY_SIZE(g_all_64bit_stats_string
) +
756 hclge_tqps_get_sset_count(handle
, stringset
);
762 static void hclge_get_strings(struct hnae3_handle
*handle
,
766 u8
*p
= (char *)data
;
769 if (stringset
== ETH_SS_STATS
) {
770 size
= ARRAY_SIZE(g_mac_stats_string
);
771 p
= hclge_comm_get_strings(stringset
,
775 size
= ARRAY_SIZE(g_all_32bit_stats_string
);
776 p
= hclge_comm_get_strings(stringset
,
777 g_all_32bit_stats_string
,
780 size
= ARRAY_SIZE(g_all_64bit_stats_string
);
781 p
= hclge_comm_get_strings(stringset
,
782 g_all_64bit_stats_string
,
785 p
= hclge_tqps_get_strings(handle
, p
);
786 } else if (stringset
== ETH_SS_TEST
) {
787 if (handle
->flags
& HNAE3_SUPPORT_MAC_LOOPBACK
) {
789 hns3_nic_test_strs
[HNAE3_MAC_INTER_LOOP_MAC
],
791 p
+= ETH_GSTRING_LEN
;
793 if (handle
->flags
& HNAE3_SUPPORT_SERDES_LOOPBACK
) {
795 hns3_nic_test_strs
[HNAE3_MAC_INTER_LOOP_SERDES
],
797 p
+= ETH_GSTRING_LEN
;
799 if (handle
->flags
& HNAE3_SUPPORT_PHY_LOOPBACK
) {
801 hns3_nic_test_strs
[HNAE3_MAC_INTER_LOOP_PHY
],
803 p
+= ETH_GSTRING_LEN
;
808 static void hclge_get_stats(struct hnae3_handle
*handle
, u64
*data
)
810 struct hclge_vport
*vport
= hclge_get_vport(handle
);
811 struct hclge_dev
*hdev
= vport
->back
;
814 p
= hclge_comm_get_stats(&hdev
->hw_stats
.mac_stats
,
816 ARRAY_SIZE(g_mac_stats_string
),
818 p
= hclge_comm_get_stats(&hdev
->hw_stats
.all_32_bit_stats
,
819 g_all_32bit_stats_string
,
820 ARRAY_SIZE(g_all_32bit_stats_string
),
822 p
= hclge_comm_get_stats(&hdev
->hw_stats
.all_64_bit_stats
,
823 g_all_64bit_stats_string
,
824 ARRAY_SIZE(g_all_64bit_stats_string
),
826 p
= hclge_tqps_get_stats(handle
, p
);
829 static int hclge_parse_func_status(struct hclge_dev
*hdev
,
830 struct hclge_func_status_cmd
*status
)
832 if (!(status
->pf_state
& HCLGE_PF_STATE_DONE
))
835 /* Set the pf to main pf */
836 if (status
->pf_state
& HCLGE_PF_STATE_MAIN
)
837 hdev
->flag
|= HCLGE_FLAG_MAIN
;
839 hdev
->flag
&= ~HCLGE_FLAG_MAIN
;
844 static int hclge_query_function_status(struct hclge_dev
*hdev
)
846 struct hclge_func_status_cmd
*req
;
847 struct hclge_desc desc
;
851 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_FUNC_STATUS
, true);
852 req
= (struct hclge_func_status_cmd
*)desc
.data
;
855 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
857 dev_err(&hdev
->pdev
->dev
,
858 "query function status failed %d.\n",
864 /* Check pf reset is done */
867 usleep_range(1000, 2000);
868 } while (timeout
++ < 5);
870 ret
= hclge_parse_func_status(hdev
, req
);
875 static int hclge_query_pf_resource(struct hclge_dev
*hdev
)
877 struct hclge_pf_res_cmd
*req
;
878 struct hclge_desc desc
;
881 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_PF_RSRC
, true);
882 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
884 dev_err(&hdev
->pdev
->dev
,
885 "query pf resource failed %d.\n", ret
);
889 req
= (struct hclge_pf_res_cmd
*)desc
.data
;
890 hdev
->num_tqps
= __le16_to_cpu(req
->tqp_num
);
891 hdev
->pkt_buf_size
= __le16_to_cpu(req
->buf_size
) << HCLGE_BUF_UNIT_S
;
893 if (hnae3_dev_roce_supported(hdev
)) {
895 hnae_get_field(__le16_to_cpu(req
->pf_intr_vector_number
),
896 HCLGE_PF_VEC_NUM_M
, HCLGE_PF_VEC_NUM_S
);
898 /* PF should have NIC vectors and Roce vectors,
899 * NIC vectors are queued before Roce vectors.
901 hdev
->num_msi
= hdev
->num_roce_msi
+ HCLGE_ROCE_VECTOR_OFFSET
;
904 hnae_get_field(__le16_to_cpu(req
->pf_intr_vector_number
),
905 HCLGE_PF_VEC_NUM_M
, HCLGE_PF_VEC_NUM_S
);
911 static int hclge_parse_speed(int speed_cmd
, int *speed
)
915 *speed
= HCLGE_MAC_SPEED_10M
;
918 *speed
= HCLGE_MAC_SPEED_100M
;
921 *speed
= HCLGE_MAC_SPEED_1G
;
924 *speed
= HCLGE_MAC_SPEED_10G
;
927 *speed
= HCLGE_MAC_SPEED_25G
;
930 *speed
= HCLGE_MAC_SPEED_40G
;
933 *speed
= HCLGE_MAC_SPEED_50G
;
936 *speed
= HCLGE_MAC_SPEED_100G
;
945 static void hclge_parse_cfg(struct hclge_cfg
*cfg
, struct hclge_desc
*desc
)
947 struct hclge_cfg_param_cmd
*req
;
948 u64 mac_addr_tmp_high
;
952 req
= (struct hclge_cfg_param_cmd
*)desc
[0].data
;
954 /* get the configuration */
955 cfg
->vmdq_vport_num
= hnae_get_field(__le32_to_cpu(req
->param
[0]),
958 cfg
->tc_num
= hnae_get_field(__le32_to_cpu(req
->param
[0]),
959 HCLGE_CFG_TC_NUM_M
, HCLGE_CFG_TC_NUM_S
);
960 cfg
->tqp_desc_num
= hnae_get_field(__le32_to_cpu(req
->param
[0]),
961 HCLGE_CFG_TQP_DESC_N_M
,
962 HCLGE_CFG_TQP_DESC_N_S
);
964 cfg
->phy_addr
= hnae_get_field(__le32_to_cpu(req
->param
[1]),
965 HCLGE_CFG_PHY_ADDR_M
,
966 HCLGE_CFG_PHY_ADDR_S
);
967 cfg
->media_type
= hnae_get_field(__le32_to_cpu(req
->param
[1]),
968 HCLGE_CFG_MEDIA_TP_M
,
969 HCLGE_CFG_MEDIA_TP_S
);
970 cfg
->rx_buf_len
= hnae_get_field(__le32_to_cpu(req
->param
[1]),
971 HCLGE_CFG_RX_BUF_LEN_M
,
972 HCLGE_CFG_RX_BUF_LEN_S
);
973 /* get mac_address */
974 mac_addr_tmp
= __le32_to_cpu(req
->param
[2]);
975 mac_addr_tmp_high
= hnae_get_field(__le32_to_cpu(req
->param
[3]),
976 HCLGE_CFG_MAC_ADDR_H_M
,
977 HCLGE_CFG_MAC_ADDR_H_S
);
979 mac_addr_tmp
|= (mac_addr_tmp_high
<< 31) << 1;
981 cfg
->default_speed
= hnae_get_field(__le32_to_cpu(req
->param
[3]),
982 HCLGE_CFG_DEFAULT_SPEED_M
,
983 HCLGE_CFG_DEFAULT_SPEED_S
);
984 for (i
= 0; i
< ETH_ALEN
; i
++)
985 cfg
->mac_addr
[i
] = (mac_addr_tmp
>> (8 * i
)) & 0xff;
987 req
= (struct hclge_cfg_param_cmd
*)desc
[1].data
;
988 cfg
->numa_node_map
= __le32_to_cpu(req
->param
[0]);
991 /* hclge_get_cfg: query the static parameter from flash
992 * @hdev: pointer to struct hclge_dev
993 * @hcfg: the config structure to be getted
995 static int hclge_get_cfg(struct hclge_dev
*hdev
, struct hclge_cfg
*hcfg
)
997 struct hclge_desc desc
[HCLGE_PF_CFG_DESC_NUM
];
998 struct hclge_cfg_param_cmd
*req
;
1001 for (i
= 0; i
< HCLGE_PF_CFG_DESC_NUM
; i
++) {
1004 req
= (struct hclge_cfg_param_cmd
*)desc
[i
].data
;
1005 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_GET_CFG_PARAM
,
1007 hnae_set_field(offset
, HCLGE_CFG_OFFSET_M
,
1008 HCLGE_CFG_OFFSET_S
, i
* HCLGE_CFG_RD_LEN_BYTES
);
1009 /* Len should be united by 4 bytes when send to hardware */
1010 hnae_set_field(offset
, HCLGE_CFG_RD_LEN_M
, HCLGE_CFG_RD_LEN_S
,
1011 HCLGE_CFG_RD_LEN_BYTES
/ HCLGE_CFG_RD_LEN_UNIT
);
1012 req
->offset
= cpu_to_le32(offset
);
1015 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_PF_CFG_DESC_NUM
);
1017 dev_err(&hdev
->pdev
->dev
,
1018 "get config failed %d.\n", ret
);
1022 hclge_parse_cfg(hcfg
, desc
);
1026 static int hclge_get_cap(struct hclge_dev
*hdev
)
1030 ret
= hclge_query_function_status(hdev
);
1032 dev_err(&hdev
->pdev
->dev
,
1033 "query function status error %d.\n", ret
);
1037 /* get pf resource */
1038 ret
= hclge_query_pf_resource(hdev
);
1040 dev_err(&hdev
->pdev
->dev
,
1041 "query pf resource error %d.\n", ret
);
1048 static int hclge_configure(struct hclge_dev
*hdev
)
1050 struct hclge_cfg cfg
;
1053 ret
= hclge_get_cfg(hdev
, &cfg
);
1055 dev_err(&hdev
->pdev
->dev
, "get mac mode error %d.\n", ret
);
1059 hdev
->num_vmdq_vport
= cfg
.vmdq_vport_num
;
1060 hdev
->base_tqp_pid
= 0;
1061 hdev
->rss_size_max
= 1;
1062 hdev
->rx_buf_len
= cfg
.rx_buf_len
;
1063 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, cfg
.mac_addr
);
1064 hdev
->hw
.mac
.media_type
= cfg
.media_type
;
1065 hdev
->hw
.mac
.phy_addr
= cfg
.phy_addr
;
1066 hdev
->num_desc
= cfg
.tqp_desc_num
;
1067 hdev
->tm_info
.num_pg
= 1;
1068 hdev
->tc_max
= cfg
.tc_num
;
1069 hdev
->tm_info
.hw_pfc_map
= 0;
1071 ret
= hclge_parse_speed(cfg
.default_speed
, &hdev
->hw
.mac
.speed
);
1073 dev_err(&hdev
->pdev
->dev
, "Get wrong speed ret=%d.\n", ret
);
1077 if ((hdev
->tc_max
> HNAE3_MAX_TC
) ||
1078 (hdev
->tc_max
< 1)) {
1079 dev_warn(&hdev
->pdev
->dev
, "TC num = %d.\n",
1084 /* Dev does not support DCB */
1085 if (!hnae3_dev_dcb_supported(hdev
)) {
1089 hdev
->pfc_max
= hdev
->tc_max
;
1092 hdev
->tm_info
.num_tc
= hdev
->tc_max
;
1094 /* Currently not support uncontiuous tc */
1095 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++)
1096 hnae_set_bit(hdev
->hw_tc_map
, i
, 1);
1098 if (!hdev
->num_vmdq_vport
&& !hdev
->num_req_vfs
)
1099 hdev
->tx_sch_mode
= HCLGE_FLAG_TC_BASE_SCH_MODE
;
1101 hdev
->tx_sch_mode
= HCLGE_FLAG_VNET_BASE_SCH_MODE
;
1106 static int hclge_config_tso(struct hclge_dev
*hdev
, int tso_mss_min
,
1109 struct hclge_cfg_tso_status_cmd
*req
;
1110 struct hclge_desc desc
;
1113 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TSO_GENERIC_CONFIG
, false);
1115 req
= (struct hclge_cfg_tso_status_cmd
*)desc
.data
;
1118 hnae_set_field(tso_mss
, HCLGE_TSO_MSS_MIN_M
,
1119 HCLGE_TSO_MSS_MIN_S
, tso_mss_min
);
1120 req
->tso_mss_min
= cpu_to_le16(tso_mss
);
1123 hnae_set_field(tso_mss
, HCLGE_TSO_MSS_MIN_M
,
1124 HCLGE_TSO_MSS_MIN_S
, tso_mss_max
);
1125 req
->tso_mss_max
= cpu_to_le16(tso_mss
);
1127 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1130 static int hclge_alloc_tqps(struct hclge_dev
*hdev
)
1132 struct hclge_tqp
*tqp
;
1135 hdev
->htqp
= devm_kcalloc(&hdev
->pdev
->dev
, hdev
->num_tqps
,
1136 sizeof(struct hclge_tqp
), GFP_KERNEL
);
1142 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
1143 tqp
->dev
= &hdev
->pdev
->dev
;
1146 tqp
->q
.ae_algo
= &ae_algo
;
1147 tqp
->q
.buf_size
= hdev
->rx_buf_len
;
1148 tqp
->q
.desc_num
= hdev
->num_desc
;
1149 tqp
->q
.io_base
= hdev
->hw
.io_base
+ HCLGE_TQP_REG_OFFSET
+
1150 i
* HCLGE_TQP_REG_SIZE
;
1158 static int hclge_map_tqps_to_func(struct hclge_dev
*hdev
, u16 func_id
,
1159 u16 tqp_pid
, u16 tqp_vid
, bool is_pf
)
1161 struct hclge_tqp_map_cmd
*req
;
1162 struct hclge_desc desc
;
1165 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_SET_TQP_MAP
, false);
1167 req
= (struct hclge_tqp_map_cmd
*)desc
.data
;
1168 req
->tqp_id
= cpu_to_le16(tqp_pid
);
1169 req
->tqp_vf
= func_id
;
1170 req
->tqp_flag
= !is_pf
<< HCLGE_TQP_MAP_TYPE_B
|
1171 1 << HCLGE_TQP_MAP_EN_B
;
1172 req
->tqp_vid
= cpu_to_le16(tqp_vid
);
1174 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1176 dev_err(&hdev
->pdev
->dev
, "TQP map failed %d.\n",
1184 static int hclge_assign_tqp(struct hclge_vport
*vport
,
1185 struct hnae3_queue
**tqp
, u16 num_tqps
)
1187 struct hclge_dev
*hdev
= vport
->back
;
1190 for (i
= 0, alloced
= 0; i
< hdev
->num_tqps
&&
1191 alloced
< num_tqps
; i
++) {
1192 if (!hdev
->htqp
[i
].alloced
) {
1193 hdev
->htqp
[i
].q
.handle
= &vport
->nic
;
1194 hdev
->htqp
[i
].q
.tqp_index
= alloced
;
1195 tqp
[alloced
] = &hdev
->htqp
[i
].q
;
1196 hdev
->htqp
[i
].alloced
= true;
1200 vport
->alloc_tqps
= num_tqps
;
1205 static int hclge_knic_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1207 struct hnae3_handle
*nic
= &vport
->nic
;
1208 struct hnae3_knic_private_info
*kinfo
= &nic
->kinfo
;
1209 struct hclge_dev
*hdev
= vport
->back
;
1212 kinfo
->num_desc
= hdev
->num_desc
;
1213 kinfo
->rx_buf_len
= hdev
->rx_buf_len
;
1214 kinfo
->num_tc
= min_t(u16
, num_tqps
, hdev
->tm_info
.num_tc
);
1216 = min_t(u16
, hdev
->rss_size_max
, num_tqps
/ kinfo
->num_tc
);
1217 kinfo
->num_tqps
= kinfo
->rss_size
* kinfo
->num_tc
;
1219 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
1220 if (hdev
->hw_tc_map
& BIT(i
)) {
1221 kinfo
->tc_info
[i
].enable
= true;
1222 kinfo
->tc_info
[i
].tqp_offset
= i
* kinfo
->rss_size
;
1223 kinfo
->tc_info
[i
].tqp_count
= kinfo
->rss_size
;
1224 kinfo
->tc_info
[i
].tc
= i
;
1226 /* Set to default queue if TC is disable */
1227 kinfo
->tc_info
[i
].enable
= false;
1228 kinfo
->tc_info
[i
].tqp_offset
= 0;
1229 kinfo
->tc_info
[i
].tqp_count
= 1;
1230 kinfo
->tc_info
[i
].tc
= 0;
1234 kinfo
->tqp
= devm_kcalloc(&hdev
->pdev
->dev
, kinfo
->num_tqps
,
1235 sizeof(struct hnae3_queue
*), GFP_KERNEL
);
1239 ret
= hclge_assign_tqp(vport
, kinfo
->tqp
, kinfo
->num_tqps
);
1241 dev_err(&hdev
->pdev
->dev
, "fail to assign TQPs %d.\n", ret
);
1248 static int hclge_map_tqp_to_vport(struct hclge_dev
*hdev
,
1249 struct hclge_vport
*vport
)
1251 struct hnae3_handle
*nic
= &vport
->nic
;
1252 struct hnae3_knic_private_info
*kinfo
;
1255 kinfo
= &nic
->kinfo
;
1256 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
1257 struct hclge_tqp
*q
=
1258 container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
1262 is_pf
= !(vport
->vport_id
);
1263 ret
= hclge_map_tqps_to_func(hdev
, vport
->vport_id
, q
->index
,
1272 static int hclge_map_tqp(struct hclge_dev
*hdev
)
1274 struct hclge_vport
*vport
= hdev
->vport
;
1277 num_vport
= hdev
->num_vmdq_vport
+ hdev
->num_req_vfs
+ 1;
1278 for (i
= 0; i
< num_vport
; i
++) {
1281 ret
= hclge_map_tqp_to_vport(hdev
, vport
);
1291 static void hclge_unic_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1293 /* this would be initialized later */
1296 static int hclge_vport_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1298 struct hnae3_handle
*nic
= &vport
->nic
;
1299 struct hclge_dev
*hdev
= vport
->back
;
1302 nic
->pdev
= hdev
->pdev
;
1303 nic
->ae_algo
= &ae_algo
;
1304 nic
->numa_node_mask
= hdev
->numa_node_mask
;
1306 if (hdev
->ae_dev
->dev_type
== HNAE3_DEV_KNIC
) {
1307 ret
= hclge_knic_setup(vport
, num_tqps
);
1309 dev_err(&hdev
->pdev
->dev
, "knic setup failed %d\n",
1314 hclge_unic_setup(vport
, num_tqps
);
1320 static int hclge_alloc_vport(struct hclge_dev
*hdev
)
1322 struct pci_dev
*pdev
= hdev
->pdev
;
1323 struct hclge_vport
*vport
;
1329 /* We need to alloc a vport for main NIC of PF */
1330 num_vport
= hdev
->num_vmdq_vport
+ hdev
->num_req_vfs
+ 1;
1332 if (hdev
->num_tqps
< num_vport
)
1333 num_vport
= hdev
->num_tqps
;
1335 /* Alloc the same number of TQPs for every vport */
1336 tqp_per_vport
= hdev
->num_tqps
/ num_vport
;
1337 tqp_main_vport
= tqp_per_vport
+ hdev
->num_tqps
% num_vport
;
1339 vport
= devm_kcalloc(&pdev
->dev
, num_vport
, sizeof(struct hclge_vport
),
1344 hdev
->vport
= vport
;
1345 hdev
->num_alloc_vport
= num_vport
;
1347 #ifdef CONFIG_PCI_IOV
1349 if (hdev
->num_req_vfs
) {
1350 dev_info(&pdev
->dev
, "active VFs(%d) found, enabling SRIOV\n",
1352 ret
= pci_enable_sriov(hdev
->pdev
, hdev
->num_req_vfs
);
1354 hdev
->num_alloc_vfs
= 0;
1355 dev_err(&pdev
->dev
, "SRIOV enable failed %d\n",
1360 hdev
->num_alloc_vfs
= hdev
->num_req_vfs
;
1363 for (i
= 0; i
< num_vport
; i
++) {
1365 vport
->vport_id
= i
;
1368 ret
= hclge_vport_setup(vport
, tqp_main_vport
);
1370 ret
= hclge_vport_setup(vport
, tqp_per_vport
);
1373 "vport setup failed for vport %d, %d\n",
1384 static int hclge_cmd_alloc_tx_buff(struct hclge_dev
*hdev
,
1385 struct hclge_pkt_buf_alloc
*buf_alloc
)
1387 /* TX buffer size is unit by 128 byte */
1388 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1389 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1390 struct hclge_tx_buff_alloc_cmd
*req
;
1391 struct hclge_desc desc
;
1395 req
= (struct hclge_tx_buff_alloc_cmd
*)desc
.data
;
1397 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TX_BUFF_ALLOC
, 0);
1398 for (i
= 0; i
< HCLGE_TC_NUM
; i
++) {
1399 u32 buf_size
= buf_alloc
->priv_buf
[i
].tx_buf_size
;
1401 req
->tx_pkt_buff
[i
] =
1402 cpu_to_le16((buf_size
>> HCLGE_BUF_SIZE_UNIT_SHIFT
) |
1403 HCLGE_BUF_SIZE_UPDATE_EN_MSK
);
1406 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1408 dev_err(&hdev
->pdev
->dev
, "tx buffer alloc cmd failed %d.\n",
1416 static int hclge_tx_buffer_alloc(struct hclge_dev
*hdev
,
1417 struct hclge_pkt_buf_alloc
*buf_alloc
)
1419 int ret
= hclge_cmd_alloc_tx_buff(hdev
, buf_alloc
);
1422 dev_err(&hdev
->pdev
->dev
,
1423 "tx buffer alloc failed %d\n", ret
);
1430 static int hclge_get_tc_num(struct hclge_dev
*hdev
)
1434 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1435 if (hdev
->hw_tc_map
& BIT(i
))
1440 static int hclge_get_pfc_enalbe_num(struct hclge_dev
*hdev
)
1444 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1445 if (hdev
->hw_tc_map
& BIT(i
) &&
1446 hdev
->tm_info
.hw_pfc_map
& BIT(i
))
1451 /* Get the number of pfc enabled TCs, which have private buffer */
1452 static int hclge_get_pfc_priv_num(struct hclge_dev
*hdev
,
1453 struct hclge_pkt_buf_alloc
*buf_alloc
)
1455 struct hclge_priv_buf
*priv
;
1458 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1459 priv
= &buf_alloc
->priv_buf
[i
];
1460 if ((hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1468 /* Get the number of pfc disabled TCs, which have private buffer */
1469 static int hclge_get_no_pfc_priv_num(struct hclge_dev
*hdev
,
1470 struct hclge_pkt_buf_alloc
*buf_alloc
)
1472 struct hclge_priv_buf
*priv
;
1475 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1476 priv
= &buf_alloc
->priv_buf
[i
];
1477 if (hdev
->hw_tc_map
& BIT(i
) &&
1478 !(hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1486 static u32
hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc
*buf_alloc
)
1488 struct hclge_priv_buf
*priv
;
1492 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1493 priv
= &buf_alloc
->priv_buf
[i
];
1495 rx_priv
+= priv
->buf_size
;
1500 static u32
hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc
*buf_alloc
)
1502 u32 i
, total_tx_size
= 0;
1504 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1505 total_tx_size
+= buf_alloc
->priv_buf
[i
].tx_buf_size
;
1507 return total_tx_size
;
1510 static bool hclge_is_rx_buf_ok(struct hclge_dev
*hdev
,
1511 struct hclge_pkt_buf_alloc
*buf_alloc
,
1514 u32 shared_buf_min
, shared_buf_tc
, shared_std
;
1515 int tc_num
, pfc_enable_num
;
1520 tc_num
= hclge_get_tc_num(hdev
);
1521 pfc_enable_num
= hclge_get_pfc_enalbe_num(hdev
);
1523 if (hnae3_dev_dcb_supported(hdev
))
1524 shared_buf_min
= 2 * hdev
->mps
+ HCLGE_DEFAULT_DV
;
1526 shared_buf_min
= 2 * hdev
->mps
+ HCLGE_DEFAULT_NON_DCB_DV
;
1528 shared_buf_tc
= pfc_enable_num
* hdev
->mps
+
1529 (tc_num
- pfc_enable_num
) * hdev
->mps
/ 2 +
1531 shared_std
= max_t(u32
, shared_buf_min
, shared_buf_tc
);
1533 rx_priv
= hclge_get_rx_priv_buff_alloced(buf_alloc
);
1534 if (rx_all
<= rx_priv
+ shared_std
)
1537 shared_buf
= rx_all
- rx_priv
;
1538 buf_alloc
->s_buf
.buf_size
= shared_buf
;
1539 buf_alloc
->s_buf
.self
.high
= shared_buf
;
1540 buf_alloc
->s_buf
.self
.low
= 2 * hdev
->mps
;
1542 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1543 if ((hdev
->hw_tc_map
& BIT(i
)) &&
1544 (hdev
->tm_info
.hw_pfc_map
& BIT(i
))) {
1545 buf_alloc
->s_buf
.tc_thrd
[i
].low
= hdev
->mps
;
1546 buf_alloc
->s_buf
.tc_thrd
[i
].high
= 2 * hdev
->mps
;
1548 buf_alloc
->s_buf
.tc_thrd
[i
].low
= 0;
1549 buf_alloc
->s_buf
.tc_thrd
[i
].high
= hdev
->mps
;
1556 static int hclge_tx_buffer_calc(struct hclge_dev
*hdev
,
1557 struct hclge_pkt_buf_alloc
*buf_alloc
)
1561 total_size
= hdev
->pkt_buf_size
;
1563 /* alloc tx buffer for all enabled tc */
1564 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1565 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
1567 if (total_size
< HCLGE_DEFAULT_TX_BUF
)
1570 if (hdev
->hw_tc_map
& BIT(i
))
1571 priv
->tx_buf_size
= HCLGE_DEFAULT_TX_BUF
;
1573 priv
->tx_buf_size
= 0;
1575 total_size
-= priv
->tx_buf_size
;
1581 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1582 * @hdev: pointer to struct hclge_dev
1583 * @buf_alloc: pointer to buffer calculation data
1584 * @return: 0: calculate sucessful, negative: fail
1586 static int hclge_rx_buffer_calc(struct hclge_dev
*hdev
,
1587 struct hclge_pkt_buf_alloc
*buf_alloc
)
1589 u32 rx_all
= hdev
->pkt_buf_size
;
1590 int no_pfc_priv_num
, pfc_priv_num
;
1591 struct hclge_priv_buf
*priv
;
1594 rx_all
-= hclge_get_tx_buff_alloced(buf_alloc
);
1596 /* When DCB is not supported, rx private
1597 * buffer is not allocated.
1599 if (!hnae3_dev_dcb_supported(hdev
)) {
1600 if (!hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1606 /* step 1, try to alloc private buffer for all enabled tc */
1607 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1608 priv
= &buf_alloc
->priv_buf
[i
];
1609 if (hdev
->hw_tc_map
& BIT(i
)) {
1611 if (hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1612 priv
->wl
.low
= hdev
->mps
;
1613 priv
->wl
.high
= priv
->wl
.low
+ hdev
->mps
;
1614 priv
->buf_size
= priv
->wl
.high
+
1618 priv
->wl
.high
= 2 * hdev
->mps
;
1619 priv
->buf_size
= priv
->wl
.high
;
1629 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1632 /* step 2, try to decrease the buffer size of
1633 * no pfc TC's private buffer
1635 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1636 priv
= &buf_alloc
->priv_buf
[i
];
1643 if (!(hdev
->hw_tc_map
& BIT(i
)))
1648 if (hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1650 priv
->wl
.high
= priv
->wl
.low
+ hdev
->mps
;
1651 priv
->buf_size
= priv
->wl
.high
+ HCLGE_DEFAULT_DV
;
1654 priv
->wl
.high
= hdev
->mps
;
1655 priv
->buf_size
= priv
->wl
.high
;
1659 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1662 /* step 3, try to reduce the number of pfc disabled TCs,
1663 * which have private buffer
1665 /* get the total no pfc enable TC number, which have private buffer */
1666 no_pfc_priv_num
= hclge_get_no_pfc_priv_num(hdev
, buf_alloc
);
1668 /* let the last to be cleared first */
1669 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
1670 priv
= &buf_alloc
->priv_buf
[i
];
1672 if (hdev
->hw_tc_map
& BIT(i
) &&
1673 !(hdev
->tm_info
.hw_pfc_map
& BIT(i
))) {
1674 /* Clear the no pfc TC private buffer */
1682 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
) ||
1683 no_pfc_priv_num
== 0)
1687 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1690 /* step 4, try to reduce the number of pfc enabled TCs
1691 * which have private buffer.
1693 pfc_priv_num
= hclge_get_pfc_priv_num(hdev
, buf_alloc
);
1695 /* let the last to be cleared first */
1696 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
1697 priv
= &buf_alloc
->priv_buf
[i
];
1699 if (hdev
->hw_tc_map
& BIT(i
) &&
1700 hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1701 /* Reduce the number of pfc TC with private buffer */
1709 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
) ||
1713 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1719 static int hclge_rx_priv_buf_alloc(struct hclge_dev
*hdev
,
1720 struct hclge_pkt_buf_alloc
*buf_alloc
)
1722 struct hclge_rx_priv_buff_cmd
*req
;
1723 struct hclge_desc desc
;
1727 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_PRIV_BUFF_ALLOC
, false);
1728 req
= (struct hclge_rx_priv_buff_cmd
*)desc
.data
;
1730 /* Alloc private buffer TCs */
1731 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1732 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
1735 cpu_to_le16(priv
->buf_size
>> HCLGE_BUF_UNIT_S
);
1737 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B
);
1741 cpu_to_le16((buf_alloc
->s_buf
.buf_size
>> HCLGE_BUF_UNIT_S
) |
1742 (1 << HCLGE_TC0_PRI_BUF_EN_B
));
1744 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1746 dev_err(&hdev
->pdev
->dev
,
1747 "rx private buffer alloc cmd failed %d\n", ret
);
1754 #define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
1756 static int hclge_rx_priv_wl_config(struct hclge_dev
*hdev
,
1757 struct hclge_pkt_buf_alloc
*buf_alloc
)
1759 struct hclge_rx_priv_wl_buf
*req
;
1760 struct hclge_priv_buf
*priv
;
1761 struct hclge_desc desc
[2];
1765 for (i
= 0; i
< 2; i
++) {
1766 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_RX_PRIV_WL_ALLOC
,
1768 req
= (struct hclge_rx_priv_wl_buf
*)desc
[i
].data
;
1770 /* The first descriptor set the NEXT bit to 1 */
1772 desc
[i
].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1774 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1776 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
1777 u32 idx
= i
* HCLGE_TC_NUM_ONE_DESC
+ j
;
1779 priv
= &buf_alloc
->priv_buf
[idx
];
1780 req
->tc_wl
[j
].high
=
1781 cpu_to_le16(priv
->wl
.high
>> HCLGE_BUF_UNIT_S
);
1782 req
->tc_wl
[j
].high
|=
1783 cpu_to_le16(HCLGE_PRIV_ENABLE(priv
->wl
.high
) <<
1784 HCLGE_RX_PRIV_EN_B
);
1786 cpu_to_le16(priv
->wl
.low
>> HCLGE_BUF_UNIT_S
);
1787 req
->tc_wl
[j
].low
|=
1788 cpu_to_le16(HCLGE_PRIV_ENABLE(priv
->wl
.low
) <<
1789 HCLGE_RX_PRIV_EN_B
);
1793 /* Send 2 descriptor at one time */
1794 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
1796 dev_err(&hdev
->pdev
->dev
,
1797 "rx private waterline config cmd failed %d\n",
1804 static int hclge_common_thrd_config(struct hclge_dev
*hdev
,
1805 struct hclge_pkt_buf_alloc
*buf_alloc
)
1807 struct hclge_shared_buf
*s_buf
= &buf_alloc
->s_buf
;
1808 struct hclge_rx_com_thrd
*req
;
1809 struct hclge_desc desc
[2];
1810 struct hclge_tc_thrd
*tc
;
1814 for (i
= 0; i
< 2; i
++) {
1815 hclge_cmd_setup_basic_desc(&desc
[i
],
1816 HCLGE_OPC_RX_COM_THRD_ALLOC
, false);
1817 req
= (struct hclge_rx_com_thrd
*)&desc
[i
].data
;
1819 /* The first descriptor set the NEXT bit to 1 */
1821 desc
[i
].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1823 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1825 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
1826 tc
= &s_buf
->tc_thrd
[i
* HCLGE_TC_NUM_ONE_DESC
+ j
];
1828 req
->com_thrd
[j
].high
=
1829 cpu_to_le16(tc
->high
>> HCLGE_BUF_UNIT_S
);
1830 req
->com_thrd
[j
].high
|=
1831 cpu_to_le16(HCLGE_PRIV_ENABLE(tc
->high
) <<
1832 HCLGE_RX_PRIV_EN_B
);
1833 req
->com_thrd
[j
].low
=
1834 cpu_to_le16(tc
->low
>> HCLGE_BUF_UNIT_S
);
1835 req
->com_thrd
[j
].low
|=
1836 cpu_to_le16(HCLGE_PRIV_ENABLE(tc
->low
) <<
1837 HCLGE_RX_PRIV_EN_B
);
1841 /* Send 2 descriptors at one time */
1842 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
1844 dev_err(&hdev
->pdev
->dev
,
1845 "common threshold config cmd failed %d\n", ret
);
1851 static int hclge_common_wl_config(struct hclge_dev
*hdev
,
1852 struct hclge_pkt_buf_alloc
*buf_alloc
)
1854 struct hclge_shared_buf
*buf
= &buf_alloc
->s_buf
;
1855 struct hclge_rx_com_wl
*req
;
1856 struct hclge_desc desc
;
1859 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_COM_WL_ALLOC
, false);
1861 req
= (struct hclge_rx_com_wl
*)desc
.data
;
1862 req
->com_wl
.high
= cpu_to_le16(buf
->self
.high
>> HCLGE_BUF_UNIT_S
);
1864 cpu_to_le16(HCLGE_PRIV_ENABLE(buf
->self
.high
) <<
1865 HCLGE_RX_PRIV_EN_B
);
1867 req
->com_wl
.low
= cpu_to_le16(buf
->self
.low
>> HCLGE_BUF_UNIT_S
);
1869 cpu_to_le16(HCLGE_PRIV_ENABLE(buf
->self
.low
) <<
1870 HCLGE_RX_PRIV_EN_B
);
1872 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1874 dev_err(&hdev
->pdev
->dev
,
1875 "common waterline config cmd failed %d\n", ret
);
1882 int hclge_buffer_alloc(struct hclge_dev
*hdev
)
1884 struct hclge_pkt_buf_alloc
*pkt_buf
;
1887 pkt_buf
= kzalloc(sizeof(*pkt_buf
), GFP_KERNEL
);
1891 ret
= hclge_tx_buffer_calc(hdev
, pkt_buf
);
1893 dev_err(&hdev
->pdev
->dev
,
1894 "could not calc tx buffer size for all TCs %d\n", ret
);
1898 ret
= hclge_tx_buffer_alloc(hdev
, pkt_buf
);
1900 dev_err(&hdev
->pdev
->dev
,
1901 "could not alloc tx buffers %d\n", ret
);
1905 ret
= hclge_rx_buffer_calc(hdev
, pkt_buf
);
1907 dev_err(&hdev
->pdev
->dev
,
1908 "could not calc rx priv buffer size for all TCs %d\n",
1913 ret
= hclge_rx_priv_buf_alloc(hdev
, pkt_buf
);
1915 dev_err(&hdev
->pdev
->dev
, "could not alloc rx priv buffer %d\n",
1920 if (hnae3_dev_dcb_supported(hdev
)) {
1921 ret
= hclge_rx_priv_wl_config(hdev
, pkt_buf
);
1923 dev_err(&hdev
->pdev
->dev
,
1924 "could not configure rx private waterline %d\n",
1929 ret
= hclge_common_thrd_config(hdev
, pkt_buf
);
1931 dev_err(&hdev
->pdev
->dev
,
1932 "could not configure common threshold %d\n",
1938 ret
= hclge_common_wl_config(hdev
, pkt_buf
);
1940 dev_err(&hdev
->pdev
->dev
,
1941 "could not configure common waterline %d\n", ret
);
1948 static int hclge_init_roce_base_info(struct hclge_vport
*vport
)
1950 struct hnae3_handle
*roce
= &vport
->roce
;
1951 struct hnae3_handle
*nic
= &vport
->nic
;
1953 roce
->rinfo
.num_vectors
= vport
->back
->num_roce_msi
;
1955 if (vport
->back
->num_msi_left
< vport
->roce
.rinfo
.num_vectors
||
1956 vport
->back
->num_msi_left
== 0)
1959 roce
->rinfo
.base_vector
= vport
->back
->roce_base_vector
;
1961 roce
->rinfo
.netdev
= nic
->kinfo
.netdev
;
1962 roce
->rinfo
.roce_io_base
= vport
->back
->hw
.io_base
;
1964 roce
->pdev
= nic
->pdev
;
1965 roce
->ae_algo
= nic
->ae_algo
;
1966 roce
->numa_node_mask
= nic
->numa_node_mask
;
1971 static int hclge_init_msi(struct hclge_dev
*hdev
)
1973 struct pci_dev
*pdev
= hdev
->pdev
;
1977 vectors
= pci_alloc_irq_vectors(pdev
, 1, hdev
->num_msi
,
1978 PCI_IRQ_MSI
| PCI_IRQ_MSIX
);
1981 "failed(%d) to allocate MSI/MSI-X vectors\n",
1985 if (vectors
< hdev
->num_msi
)
1986 dev_warn(&hdev
->pdev
->dev
,
1987 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1988 hdev
->num_msi
, vectors
);
1990 hdev
->num_msi
= vectors
;
1991 hdev
->num_msi_left
= vectors
;
1992 hdev
->base_msi_vector
= pdev
->irq
;
1993 hdev
->roce_base_vector
= hdev
->base_msi_vector
+
1994 HCLGE_ROCE_VECTOR_OFFSET
;
1996 hdev
->vector_status
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
1997 sizeof(u16
), GFP_KERNEL
);
1998 if (!hdev
->vector_status
) {
1999 pci_free_irq_vectors(pdev
);
2003 for (i
= 0; i
< hdev
->num_msi
; i
++)
2004 hdev
->vector_status
[i
] = HCLGE_INVALID_VPORT
;
2006 hdev
->vector_irq
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
2007 sizeof(int), GFP_KERNEL
);
2008 if (!hdev
->vector_irq
) {
2009 pci_free_irq_vectors(pdev
);
2016 static void hclge_check_speed_dup(struct hclge_dev
*hdev
, int duplex
, int speed
)
2018 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2020 if ((speed
== HCLGE_MAC_SPEED_10M
) || (speed
== HCLGE_MAC_SPEED_100M
))
2021 mac
->duplex
= (u8
)duplex
;
2023 mac
->duplex
= HCLGE_MAC_FULL
;
2028 int hclge_cfg_mac_speed_dup(struct hclge_dev
*hdev
, int speed
, u8 duplex
)
2030 struct hclge_config_mac_speed_dup_cmd
*req
;
2031 struct hclge_desc desc
;
2034 req
= (struct hclge_config_mac_speed_dup_cmd
*)desc
.data
;
2036 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_SPEED_DUP
, false);
2038 hnae_set_bit(req
->speed_dup
, HCLGE_CFG_DUPLEX_B
, !!duplex
);
2041 case HCLGE_MAC_SPEED_10M
:
2042 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2043 HCLGE_CFG_SPEED_S
, 6);
2045 case HCLGE_MAC_SPEED_100M
:
2046 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2047 HCLGE_CFG_SPEED_S
, 7);
2049 case HCLGE_MAC_SPEED_1G
:
2050 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2051 HCLGE_CFG_SPEED_S
, 0);
2053 case HCLGE_MAC_SPEED_10G
:
2054 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2055 HCLGE_CFG_SPEED_S
, 1);
2057 case HCLGE_MAC_SPEED_25G
:
2058 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2059 HCLGE_CFG_SPEED_S
, 2);
2061 case HCLGE_MAC_SPEED_40G
:
2062 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2063 HCLGE_CFG_SPEED_S
, 3);
2065 case HCLGE_MAC_SPEED_50G
:
2066 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2067 HCLGE_CFG_SPEED_S
, 4);
2069 case HCLGE_MAC_SPEED_100G
:
2070 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2071 HCLGE_CFG_SPEED_S
, 5);
2074 dev_err(&hdev
->pdev
->dev
, "invalid speed (%d)\n", speed
);
2078 hnae_set_bit(req
->mac_change_fec_en
, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B
,
2081 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2083 dev_err(&hdev
->pdev
->dev
,
2084 "mac speed/duplex config cmd failed %d.\n", ret
);
2088 hclge_check_speed_dup(hdev
, duplex
, speed
);
2093 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle
*handle
, int speed
,
2096 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2097 struct hclge_dev
*hdev
= vport
->back
;
2099 return hclge_cfg_mac_speed_dup(hdev
, speed
, duplex
);
2102 static int hclge_query_mac_an_speed_dup(struct hclge_dev
*hdev
, int *speed
,
2105 struct hclge_query_an_speed_dup_cmd
*req
;
2106 struct hclge_desc desc
;
2110 req
= (struct hclge_query_an_speed_dup_cmd
*)desc
.data
;
2112 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_AN_RESULT
, true);
2113 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2115 dev_err(&hdev
->pdev
->dev
,
2116 "mac speed/autoneg/duplex query cmd failed %d\n",
2121 *duplex
= hnae_get_bit(req
->an_syn_dup_speed
, HCLGE_QUERY_DUPLEX_B
);
2122 speed_tmp
= hnae_get_field(req
->an_syn_dup_speed
, HCLGE_QUERY_SPEED_M
,
2123 HCLGE_QUERY_SPEED_S
);
2125 ret
= hclge_parse_speed(speed_tmp
, speed
);
2127 dev_err(&hdev
->pdev
->dev
,
2128 "could not parse speed(=%d), %d\n", speed_tmp
, ret
);
2135 static int hclge_query_autoneg_result(struct hclge_dev
*hdev
)
2137 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2138 struct hclge_query_an_speed_dup_cmd
*req
;
2139 struct hclge_desc desc
;
2142 req
= (struct hclge_query_an_speed_dup_cmd
*)desc
.data
;
2144 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_AN_RESULT
, true);
2145 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2147 dev_err(&hdev
->pdev
->dev
,
2148 "autoneg result query cmd failed %d.\n", ret
);
2152 mac
->autoneg
= hnae_get_bit(req
->an_syn_dup_speed
, HCLGE_QUERY_AN_B
);
2157 static int hclge_set_autoneg_en(struct hclge_dev
*hdev
, bool enable
)
2159 struct hclge_config_auto_neg_cmd
*req
;
2160 struct hclge_desc desc
;
2164 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_AN_MODE
, false);
2166 req
= (struct hclge_config_auto_neg_cmd
*)desc
.data
;
2167 hnae_set_bit(flag
, HCLGE_MAC_CFG_AN_EN_B
, !!enable
);
2168 req
->cfg_an_cmd_flag
= cpu_to_le32(flag
);
2170 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2172 dev_err(&hdev
->pdev
->dev
, "auto neg set cmd failed %d.\n",
2180 static int hclge_set_autoneg(struct hnae3_handle
*handle
, bool enable
)
2182 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2183 struct hclge_dev
*hdev
= vport
->back
;
2185 return hclge_set_autoneg_en(hdev
, enable
);
2188 static int hclge_get_autoneg(struct hnae3_handle
*handle
)
2190 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2191 struct hclge_dev
*hdev
= vport
->back
;
2193 hclge_query_autoneg_result(hdev
);
2195 return hdev
->hw
.mac
.autoneg
;
2198 static int hclge_mac_init(struct hclge_dev
*hdev
)
2200 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2203 ret
= hclge_cfg_mac_speed_dup(hdev
, hdev
->hw
.mac
.speed
, HCLGE_MAC_FULL
);
2205 dev_err(&hdev
->pdev
->dev
,
2206 "Config mac speed dup fail ret=%d\n", ret
);
2212 /* Initialize the MTA table work mode */
2213 hdev
->accept_mta_mc
= true;
2214 hdev
->enable_mta
= true;
2215 hdev
->mta_mac_sel_type
= HCLGE_MAC_ADDR_47_36
;
2217 ret
= hclge_set_mta_filter_mode(hdev
,
2218 hdev
->mta_mac_sel_type
,
2221 dev_err(&hdev
->pdev
->dev
, "set mta filter mode failed %d\n",
2226 return hclge_cfg_func_mta_filter(hdev
, 0, hdev
->accept_mta_mc
);
2229 static void hclge_task_schedule(struct hclge_dev
*hdev
)
2231 if (!test_bit(HCLGE_STATE_DOWN
, &hdev
->state
) &&
2232 !test_bit(HCLGE_STATE_REMOVING
, &hdev
->state
) &&
2233 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
))
2234 (void)schedule_work(&hdev
->service_task
);
2237 static int hclge_get_mac_link_status(struct hclge_dev
*hdev
)
2239 struct hclge_link_status_cmd
*req
;
2240 struct hclge_desc desc
;
2244 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_LINK_STATUS
, true);
2245 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2247 dev_err(&hdev
->pdev
->dev
, "get link status cmd failed %d\n",
2252 req
= (struct hclge_link_status_cmd
*)desc
.data
;
2253 link_status
= req
->status
& HCLGE_LINK_STATUS
;
2255 return !!link_status
;
2258 static int hclge_get_mac_phy_link(struct hclge_dev
*hdev
)
2263 mac_state
= hclge_get_mac_link_status(hdev
);
2265 if (hdev
->hw
.mac
.phydev
) {
2266 if (!genphy_read_status(hdev
->hw
.mac
.phydev
))
2267 link_stat
= mac_state
&
2268 hdev
->hw
.mac
.phydev
->link
;
2273 link_stat
= mac_state
;
2279 static void hclge_update_link_status(struct hclge_dev
*hdev
)
2281 struct hnae3_client
*client
= hdev
->nic_client
;
2282 struct hnae3_handle
*handle
;
2288 state
= hclge_get_mac_phy_link(hdev
);
2289 if (state
!= hdev
->hw
.mac
.link
) {
2290 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
2291 handle
= &hdev
->vport
[i
].nic
;
2292 client
->ops
->link_status_change(handle
, state
);
2294 hdev
->hw
.mac
.link
= state
;
2298 static int hclge_update_speed_duplex(struct hclge_dev
*hdev
)
2300 struct hclge_mac mac
= hdev
->hw
.mac
;
2305 /* get the speed and duplex as autoneg'result from mac cmd when phy
2308 if (mac
.phydev
|| !mac
.autoneg
)
2311 ret
= hclge_query_mac_an_speed_dup(hdev
, &speed
, &duplex
);
2313 dev_err(&hdev
->pdev
->dev
,
2314 "mac autoneg/speed/duplex query failed %d\n", ret
);
2318 if ((mac
.speed
!= speed
) || (mac
.duplex
!= duplex
)) {
2319 ret
= hclge_cfg_mac_speed_dup(hdev
, speed
, duplex
);
2321 dev_err(&hdev
->pdev
->dev
,
2322 "mac speed/duplex config failed %d\n", ret
);
2330 static int hclge_update_speed_duplex_h(struct hnae3_handle
*handle
)
2332 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2333 struct hclge_dev
*hdev
= vport
->back
;
2335 return hclge_update_speed_duplex(hdev
);
2338 static int hclge_get_status(struct hnae3_handle
*handle
)
2340 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2341 struct hclge_dev
*hdev
= vport
->back
;
2343 hclge_update_link_status(hdev
);
2345 return hdev
->hw
.mac
.link
;
2348 static void hclge_service_timer(struct timer_list
*t
)
2350 struct hclge_dev
*hdev
= from_timer(hdev
, t
, service_timer
);
2352 mod_timer(&hdev
->service_timer
, jiffies
+ HZ
);
2353 hclge_task_schedule(hdev
);
2356 static void hclge_service_complete(struct hclge_dev
*hdev
)
2358 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
));
2360 /* Flush memory before next watchdog */
2361 smp_mb__before_atomic();
2362 clear_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
);
2365 static void hclge_enable_vector(struct hclge_misc_vector
*vector
, bool enable
)
2367 writel(enable
? 1 : 0, vector
->addr
);
2370 static irqreturn_t
hclge_misc_irq_handle(int irq
, void *data
)
2372 struct hclge_dev
*hdev
= data
;
2374 hclge_enable_vector(&hdev
->misc_vector
, false);
2375 if (!test_and_set_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
))
2376 schedule_work(&hdev
->service_task
);
2381 static void hclge_free_vector(struct hclge_dev
*hdev
, int vector_id
)
2383 hdev
->vector_status
[vector_id
] = HCLGE_INVALID_VPORT
;
2384 hdev
->num_msi_left
+= 1;
2385 hdev
->num_msi_used
-= 1;
2388 static void hclge_get_misc_vector(struct hclge_dev
*hdev
)
2390 struct hclge_misc_vector
*vector
= &hdev
->misc_vector
;
2392 vector
->vector_irq
= pci_irq_vector(hdev
->pdev
, 0);
2394 vector
->addr
= hdev
->hw
.io_base
+ HCLGE_MISC_VECTOR_REG_BASE
;
2395 hdev
->vector_status
[0] = 0;
2397 hdev
->num_msi_left
-= 1;
2398 hdev
->num_msi_used
+= 1;
2401 static int hclge_misc_irq_init(struct hclge_dev
*hdev
)
2405 hclge_get_misc_vector(hdev
);
2407 ret
= devm_request_irq(&hdev
->pdev
->dev
,
2408 hdev
->misc_vector
.vector_irq
,
2409 hclge_misc_irq_handle
, 0, "hclge_misc", hdev
);
2411 hclge_free_vector(hdev
, 0);
2412 dev_err(&hdev
->pdev
->dev
, "request misc irq(%d) fail\n",
2413 hdev
->misc_vector
.vector_irq
);
2419 static int hclge_notify_client(struct hclge_dev
*hdev
,
2420 enum hnae3_reset_notify_type type
)
2422 struct hnae3_client
*client
= hdev
->nic_client
;
2425 if (!client
->ops
->reset_notify
)
2428 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
2429 struct hnae3_handle
*handle
= &hdev
->vport
[i
].nic
;
2432 ret
= client
->ops
->reset_notify(handle
, type
);
2440 static int hclge_reset_wait(struct hclge_dev
*hdev
)
2442 #define HCLGE_RESET_WATI_MS 100
2443 #define HCLGE_RESET_WAIT_CNT 5
2444 u32 val
, reg
, reg_bit
;
2447 switch (hdev
->reset_type
) {
2448 case HNAE3_GLOBAL_RESET
:
2449 reg
= HCLGE_GLOBAL_RESET_REG
;
2450 reg_bit
= HCLGE_GLOBAL_RESET_BIT
;
2452 case HNAE3_CORE_RESET
:
2453 reg
= HCLGE_GLOBAL_RESET_REG
;
2454 reg_bit
= HCLGE_CORE_RESET_BIT
;
2456 case HNAE3_FUNC_RESET
:
2457 reg
= HCLGE_FUN_RST_ING
;
2458 reg_bit
= HCLGE_FUN_RST_ING_B
;
2461 dev_err(&hdev
->pdev
->dev
,
2462 "Wait for unsupported reset type: %d\n",
2467 val
= hclge_read_dev(&hdev
->hw
, reg
);
2468 while (hnae_get_bit(val
, reg_bit
) && cnt
< HCLGE_RESET_WAIT_CNT
) {
2469 msleep(HCLGE_RESET_WATI_MS
);
2470 val
= hclge_read_dev(&hdev
->hw
, reg
);
2474 /* must clear reset status register to
2475 * prevent driver detect reset interrupt again
2477 reg
= hclge_read_dev(&hdev
->hw
, HCLGE_MISC_RESET_STS_REG
);
2478 hclge_write_dev(&hdev
->hw
, HCLGE_MISC_RESET_STS_REG
, reg
);
2480 if (cnt
>= HCLGE_RESET_WAIT_CNT
) {
2481 dev_warn(&hdev
->pdev
->dev
,
2482 "Wait for reset timeout: %d\n", hdev
->reset_type
);
2489 static int hclge_func_reset_cmd(struct hclge_dev
*hdev
, int func_id
)
2491 struct hclge_desc desc
;
2492 struct hclge_reset_cmd
*req
= (struct hclge_reset_cmd
*)desc
.data
;
2495 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_RST_TRIGGER
, false);
2496 hnae_set_bit(req
->mac_func_reset
, HCLGE_CFG_RESET_MAC_B
, 0);
2497 hnae_set_bit(req
->mac_func_reset
, HCLGE_CFG_RESET_FUNC_B
, 1);
2498 req
->fun_reset_vfid
= func_id
;
2500 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2502 dev_err(&hdev
->pdev
->dev
,
2503 "send function reset cmd fail, status =%d\n", ret
);
2508 static void hclge_do_reset(struct hclge_dev
*hdev
, enum hnae3_reset_type type
)
2510 struct pci_dev
*pdev
= hdev
->pdev
;
2514 case HNAE3_GLOBAL_RESET
:
2515 val
= hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
);
2516 hnae_set_bit(val
, HCLGE_GLOBAL_RESET_BIT
, 1);
2517 hclge_write_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
, val
);
2518 dev_info(&pdev
->dev
, "Global Reset requested\n");
2520 case HNAE3_CORE_RESET
:
2521 val
= hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
);
2522 hnae_set_bit(val
, HCLGE_CORE_RESET_BIT
, 1);
2523 hclge_write_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
, val
);
2524 dev_info(&pdev
->dev
, "Core Reset requested\n");
2526 case HNAE3_FUNC_RESET
:
2527 dev_info(&pdev
->dev
, "PF Reset requested\n");
2528 hclge_func_reset_cmd(hdev
, 0);
2531 dev_warn(&pdev
->dev
,
2532 "Unsupported reset type: %d\n", type
);
2537 static enum hnae3_reset_type
hclge_detected_reset_event(struct hclge_dev
*hdev
)
2539 enum hnae3_reset_type rst_level
= HNAE3_NONE_RESET
;
2542 rst_reg_val
= hclge_read_dev(&hdev
->hw
, HCLGE_MISC_RESET_STS_REG
);
2543 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
) & rst_reg_val
)
2544 rst_level
= HNAE3_GLOBAL_RESET
;
2545 else if (BIT(HCLGE_VECTOR0_CORERESET_INT_B
) & rst_reg_val
)
2546 rst_level
= HNAE3_CORE_RESET
;
2547 else if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B
) & rst_reg_val
)
2548 rst_level
= HNAE3_IMP_RESET
;
2553 static void hclge_reset_event(struct hnae3_handle
*handle
,
2554 enum hnae3_reset_type reset
)
2556 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2557 struct hclge_dev
*hdev
= vport
->back
;
2559 dev_info(&hdev
->pdev
->dev
,
2560 "Receive reset event , reset_type is %d", reset
);
2563 case HNAE3_FUNC_RESET
:
2564 case HNAE3_CORE_RESET
:
2565 case HNAE3_GLOBAL_RESET
:
2566 if (test_bit(HCLGE_STATE_RESET_INT
, &hdev
->state
)) {
2567 dev_err(&hdev
->pdev
->dev
, "Already in reset state");
2570 hdev
->reset_type
= reset
;
2571 set_bit(HCLGE_STATE_RESET_INT
, &hdev
->state
);
2572 set_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
);
2573 schedule_work(&hdev
->service_task
);
2576 dev_warn(&hdev
->pdev
->dev
, "Unsupported reset event:%d", reset
);
2581 static void hclge_reset_subtask(struct hclge_dev
*hdev
)
2585 do_reset
= hdev
->reset_type
!= HNAE3_NONE_RESET
;
2587 /* Reset is detected by interrupt */
2588 if (hdev
->reset_type
== HNAE3_NONE_RESET
)
2589 hdev
->reset_type
= hclge_detected_reset_event(hdev
);
2591 if (hdev
->reset_type
== HNAE3_NONE_RESET
)
2594 switch (hdev
->reset_type
) {
2595 case HNAE3_FUNC_RESET
:
2596 case HNAE3_CORE_RESET
:
2597 case HNAE3_GLOBAL_RESET
:
2598 case HNAE3_IMP_RESET
:
2599 hclge_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
2602 hclge_do_reset(hdev
, hdev
->reset_type
);
2604 set_bit(HCLGE_STATE_RESET_INT
, &hdev
->state
);
2606 if (!hclge_reset_wait(hdev
)) {
2607 hclge_notify_client(hdev
, HNAE3_UNINIT_CLIENT
);
2608 hclge_reset_ae_dev(hdev
->ae_dev
);
2609 hclge_notify_client(hdev
, HNAE3_INIT_CLIENT
);
2610 clear_bit(HCLGE_STATE_RESET_INT
, &hdev
->state
);
2612 hclge_notify_client(hdev
, HNAE3_UP_CLIENT
);
2615 dev_err(&hdev
->pdev
->dev
, "Unsupported reset type:%d\n",
2619 hdev
->reset_type
= HNAE3_NONE_RESET
;
2622 static void hclge_misc_irq_service_task(struct hclge_dev
*hdev
)
2624 hclge_reset_subtask(hdev
);
2625 hclge_enable_vector(&hdev
->misc_vector
, true);
2628 static void hclge_service_task(struct work_struct
*work
)
2630 struct hclge_dev
*hdev
=
2631 container_of(work
, struct hclge_dev
, service_task
);
2633 hclge_misc_irq_service_task(hdev
);
2634 hclge_update_speed_duplex(hdev
);
2635 hclge_update_link_status(hdev
);
2636 hclge_update_stats_for_all(hdev
);
2637 hclge_service_complete(hdev
);
2640 static void hclge_disable_sriov(struct hclge_dev
*hdev
)
2642 /* If our VFs are assigned we cannot shut down SR-IOV
2643 * without causing issues, so just leave the hardware
2644 * available but disabled
2646 if (pci_vfs_assigned(hdev
->pdev
)) {
2647 dev_warn(&hdev
->pdev
->dev
,
2648 "disabling driver while VFs are assigned\n");
2652 pci_disable_sriov(hdev
->pdev
);
2655 struct hclge_vport
*hclge_get_vport(struct hnae3_handle
*handle
)
2657 /* VF handle has no client */
2658 if (!handle
->client
)
2659 return container_of(handle
, struct hclge_vport
, nic
);
2660 else if (handle
->client
->type
== HNAE3_CLIENT_ROCE
)
2661 return container_of(handle
, struct hclge_vport
, roce
);
2663 return container_of(handle
, struct hclge_vport
, nic
);
2666 static int hclge_get_vector(struct hnae3_handle
*handle
, u16 vector_num
,
2667 struct hnae3_vector_info
*vector_info
)
2669 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2670 struct hnae3_vector_info
*vector
= vector_info
;
2671 struct hclge_dev
*hdev
= vport
->back
;
2675 vector_num
= min(hdev
->num_msi_left
, vector_num
);
2677 for (j
= 0; j
< vector_num
; j
++) {
2678 for (i
= 1; i
< hdev
->num_msi
; i
++) {
2679 if (hdev
->vector_status
[i
] == HCLGE_INVALID_VPORT
) {
2680 vector
->vector
= pci_irq_vector(hdev
->pdev
, i
);
2681 vector
->io_addr
= hdev
->hw
.io_base
+
2682 HCLGE_VECTOR_REG_BASE
+
2683 (i
- 1) * HCLGE_VECTOR_REG_OFFSET
+
2685 HCLGE_VECTOR_VF_OFFSET
;
2686 hdev
->vector_status
[i
] = vport
->vport_id
;
2687 hdev
->vector_irq
[i
] = vector
->vector
;
2696 hdev
->num_msi_left
-= alloc
;
2697 hdev
->num_msi_used
+= alloc
;
2702 static int hclge_get_vector_index(struct hclge_dev
*hdev
, int vector
)
2706 for (i
= 0; i
< hdev
->num_msi
; i
++)
2707 if (vector
== hdev
->vector_irq
[i
])
2713 static u32
hclge_get_rss_key_size(struct hnae3_handle
*handle
)
2715 return HCLGE_RSS_KEY_SIZE
;
2718 static u32
hclge_get_rss_indir_size(struct hnae3_handle
*handle
)
2720 return HCLGE_RSS_IND_TBL_SIZE
;
2723 static int hclge_get_rss_algo(struct hclge_dev
*hdev
)
2725 struct hclge_rss_config_cmd
*req
;
2726 struct hclge_desc desc
;
2730 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_GENERIC_CONFIG
, true);
2732 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2734 dev_err(&hdev
->pdev
->dev
,
2735 "Get link status error, status =%d\n", ret
);
2739 req
= (struct hclge_rss_config_cmd
*)desc
.data
;
2740 rss_hash_algo
= (req
->hash_config
& HCLGE_RSS_HASH_ALGO_MASK
);
2742 if (rss_hash_algo
== HCLGE_RSS_HASH_ALGO_TOEPLITZ
)
2743 return ETH_RSS_HASH_TOP
;
2748 static int hclge_set_rss_algo_key(struct hclge_dev
*hdev
,
2749 const u8 hfunc
, const u8
*key
)
2751 struct hclge_rss_config_cmd
*req
;
2752 struct hclge_desc desc
;
2757 req
= (struct hclge_rss_config_cmd
*)desc
.data
;
2759 for (key_offset
= 0; key_offset
< 3; key_offset
++) {
2760 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_GENERIC_CONFIG
,
2763 req
->hash_config
|= (hfunc
& HCLGE_RSS_HASH_ALGO_MASK
);
2764 req
->hash_config
|= (key_offset
<< HCLGE_RSS_HASH_KEY_OFFSET_B
);
2766 if (key_offset
== 2)
2768 HCLGE_RSS_KEY_SIZE
- HCLGE_RSS_HASH_KEY_NUM
* 2;
2770 key_size
= HCLGE_RSS_HASH_KEY_NUM
;
2772 memcpy(req
->hash_key
,
2773 key
+ key_offset
* HCLGE_RSS_HASH_KEY_NUM
, key_size
);
2775 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2777 dev_err(&hdev
->pdev
->dev
,
2778 "Configure RSS config fail, status = %d\n",
2786 static int hclge_set_rss_indir_table(struct hclge_dev
*hdev
, const u32
*indir
)
2788 struct hclge_rss_indirection_table_cmd
*req
;
2789 struct hclge_desc desc
;
2793 req
= (struct hclge_rss_indirection_table_cmd
*)desc
.data
;
2795 for (i
= 0; i
< HCLGE_RSS_CFG_TBL_NUM
; i
++) {
2796 hclge_cmd_setup_basic_desc
2797 (&desc
, HCLGE_OPC_RSS_INDIR_TABLE
, false);
2799 req
->start_table_index
=
2800 cpu_to_le16(i
* HCLGE_RSS_CFG_TBL_SIZE
);
2801 req
->rss_set_bitmap
= cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK
);
2803 for (j
= 0; j
< HCLGE_RSS_CFG_TBL_SIZE
; j
++)
2804 req
->rss_result
[j
] =
2805 indir
[i
* HCLGE_RSS_CFG_TBL_SIZE
+ j
];
2807 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2809 dev_err(&hdev
->pdev
->dev
,
2810 "Configure rss indir table fail,status = %d\n",
2818 static int hclge_set_rss_tc_mode(struct hclge_dev
*hdev
, u16
*tc_valid
,
2819 u16
*tc_size
, u16
*tc_offset
)
2821 struct hclge_rss_tc_mode_cmd
*req
;
2822 struct hclge_desc desc
;
2826 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_TC_MODE
, false);
2827 req
= (struct hclge_rss_tc_mode_cmd
*)desc
.data
;
2829 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
2832 hnae_set_bit(mode
, HCLGE_RSS_TC_VALID_B
, (tc_valid
[i
] & 0x1));
2833 hnae_set_field(mode
, HCLGE_RSS_TC_SIZE_M
,
2834 HCLGE_RSS_TC_SIZE_S
, tc_size
[i
]);
2835 hnae_set_field(mode
, HCLGE_RSS_TC_OFFSET_M
,
2836 HCLGE_RSS_TC_OFFSET_S
, tc_offset
[i
]);
2838 req
->rss_tc_mode
[i
] = cpu_to_le16(mode
);
2841 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2843 dev_err(&hdev
->pdev
->dev
,
2844 "Configure rss tc mode fail, status = %d\n", ret
);
2851 static int hclge_set_rss_input_tuple(struct hclge_dev
*hdev
)
2853 struct hclge_rss_input_tuple_cmd
*req
;
2854 struct hclge_desc desc
;
2857 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_INPUT_TUPLE
, false);
2859 req
= (struct hclge_rss_input_tuple_cmd
*)desc
.data
;
2860 req
->ipv4_tcp_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2861 req
->ipv4_udp_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2862 req
->ipv4_sctp_en
= HCLGE_RSS_INPUT_TUPLE_SCTP
;
2863 req
->ipv4_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2864 req
->ipv6_tcp_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2865 req
->ipv6_udp_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2866 req
->ipv6_sctp_en
= HCLGE_RSS_INPUT_TUPLE_SCTP
;
2867 req
->ipv6_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2868 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2870 dev_err(&hdev
->pdev
->dev
,
2871 "Configure rss input fail, status = %d\n", ret
);
2878 static int hclge_get_rss(struct hnae3_handle
*handle
, u32
*indir
,
2881 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2882 struct hclge_dev
*hdev
= vport
->back
;
2885 /* Get hash algorithm */
2887 *hfunc
= hclge_get_rss_algo(hdev
);
2889 /* Get the RSS Key required by the user */
2891 memcpy(key
, vport
->rss_hash_key
, HCLGE_RSS_KEY_SIZE
);
2893 /* Get indirect table */
2895 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
2896 indir
[i
] = vport
->rss_indirection_tbl
[i
];
2901 static int hclge_set_rss(struct hnae3_handle
*handle
, const u32
*indir
,
2902 const u8
*key
, const u8 hfunc
)
2904 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2905 struct hclge_dev
*hdev
= vport
->back
;
2909 /* Set the RSS Hash Key if specififed by the user */
2911 /* Update the shadow RSS key with user specified qids */
2912 memcpy(vport
->rss_hash_key
, key
, HCLGE_RSS_KEY_SIZE
);
2914 if (hfunc
== ETH_RSS_HASH_TOP
||
2915 hfunc
== ETH_RSS_HASH_NO_CHANGE
)
2916 hash_algo
= HCLGE_RSS_HASH_ALGO_TOEPLITZ
;
2919 ret
= hclge_set_rss_algo_key(hdev
, hash_algo
, key
);
2924 /* Update the shadow RSS table with user specified qids */
2925 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
2926 vport
->rss_indirection_tbl
[i
] = indir
[i
];
2928 /* Update the hardware */
2929 ret
= hclge_set_rss_indir_table(hdev
, indir
);
2933 static u8
hclge_get_rss_hash_bits(struct ethtool_rxnfc
*nfc
)
2935 u8 hash_sets
= nfc
->data
& RXH_L4_B_0_1
? HCLGE_S_PORT_BIT
: 0;
2937 if (nfc
->data
& RXH_L4_B_2_3
)
2938 hash_sets
|= HCLGE_D_PORT_BIT
;
2940 hash_sets
&= ~HCLGE_D_PORT_BIT
;
2942 if (nfc
->data
& RXH_IP_SRC
)
2943 hash_sets
|= HCLGE_S_IP_BIT
;
2945 hash_sets
&= ~HCLGE_S_IP_BIT
;
2947 if (nfc
->data
& RXH_IP_DST
)
2948 hash_sets
|= HCLGE_D_IP_BIT
;
2950 hash_sets
&= ~HCLGE_D_IP_BIT
;
2952 if (nfc
->flow_type
== SCTP_V4_FLOW
|| nfc
->flow_type
== SCTP_V6_FLOW
)
2953 hash_sets
|= HCLGE_V_TAG_BIT
;
2958 static int hclge_set_rss_tuple(struct hnae3_handle
*handle
,
2959 struct ethtool_rxnfc
*nfc
)
2961 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2962 struct hclge_dev
*hdev
= vport
->back
;
2963 struct hclge_rss_input_tuple_cmd
*req
;
2964 struct hclge_desc desc
;
2968 if (nfc
->data
& ~(RXH_IP_SRC
| RXH_IP_DST
|
2969 RXH_L4_B_0_1
| RXH_L4_B_2_3
))
2972 req
= (struct hclge_rss_input_tuple_cmd
*)desc
.data
;
2973 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_INPUT_TUPLE
, true);
2974 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2976 dev_err(&hdev
->pdev
->dev
,
2977 "Read rss tuple fail, status = %d\n", ret
);
2981 hclge_cmd_reuse_desc(&desc
, false);
2983 tuple_sets
= hclge_get_rss_hash_bits(nfc
);
2984 switch (nfc
->flow_type
) {
2986 req
->ipv4_tcp_en
= tuple_sets
;
2989 req
->ipv6_tcp_en
= tuple_sets
;
2992 req
->ipv4_udp_en
= tuple_sets
;
2995 req
->ipv6_udp_en
= tuple_sets
;
2998 req
->ipv4_sctp_en
= tuple_sets
;
3001 if ((nfc
->data
& RXH_L4_B_0_1
) ||
3002 (nfc
->data
& RXH_L4_B_2_3
))
3005 req
->ipv6_sctp_en
= tuple_sets
;
3008 req
->ipv4_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3011 req
->ipv6_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3017 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3019 dev_err(&hdev
->pdev
->dev
,
3020 "Set rss tuple fail, status = %d\n", ret
);
3025 static int hclge_get_rss_tuple(struct hnae3_handle
*handle
,
3026 struct ethtool_rxnfc
*nfc
)
3028 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3029 struct hclge_dev
*hdev
= vport
->back
;
3030 struct hclge_rss_input_tuple_cmd
*req
;
3031 struct hclge_desc desc
;
3037 req
= (struct hclge_rss_input_tuple_cmd
*)desc
.data
;
3038 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_INPUT_TUPLE
, true);
3039 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3041 dev_err(&hdev
->pdev
->dev
,
3042 "Read rss tuple fail, status = %d\n", ret
);
3046 switch (nfc
->flow_type
) {
3048 tuple_sets
= req
->ipv4_tcp_en
;
3051 tuple_sets
= req
->ipv4_udp_en
;
3054 tuple_sets
= req
->ipv6_tcp_en
;
3057 tuple_sets
= req
->ipv6_udp_en
;
3060 tuple_sets
= req
->ipv4_sctp_en
;
3063 tuple_sets
= req
->ipv6_sctp_en
;
3067 tuple_sets
= HCLGE_S_IP_BIT
| HCLGE_D_IP_BIT
;
3076 if (tuple_sets
& HCLGE_D_PORT_BIT
)
3077 nfc
->data
|= RXH_L4_B_2_3
;
3078 if (tuple_sets
& HCLGE_S_PORT_BIT
)
3079 nfc
->data
|= RXH_L4_B_0_1
;
3080 if (tuple_sets
& HCLGE_D_IP_BIT
)
3081 nfc
->data
|= RXH_IP_DST
;
3082 if (tuple_sets
& HCLGE_S_IP_BIT
)
3083 nfc
->data
|= RXH_IP_SRC
;
3088 static int hclge_get_tc_size(struct hnae3_handle
*handle
)
3090 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3091 struct hclge_dev
*hdev
= vport
->back
;
3093 return hdev
->rss_size_max
;
3096 int hclge_rss_init_hw(struct hclge_dev
*hdev
)
3098 const u8 hfunc
= HCLGE_RSS_HASH_ALGO_TOEPLITZ
;
3099 struct hclge_vport
*vport
= hdev
->vport
;
3100 u16 tc_offset
[HCLGE_MAX_TC_NUM
];
3101 u8 rss_key
[HCLGE_RSS_KEY_SIZE
];
3102 u16 tc_valid
[HCLGE_MAX_TC_NUM
];
3103 u16 tc_size
[HCLGE_MAX_TC_NUM
];
3104 u32
*rss_indir
= NULL
;
3105 u16 rss_size
= 0, roundup_size
;
3109 rss_indir
= kcalloc(HCLGE_RSS_IND_TBL_SIZE
, sizeof(u32
), GFP_KERNEL
);
3113 /* Get default RSS key */
3114 netdev_rss_key_fill(rss_key
, HCLGE_RSS_KEY_SIZE
);
3116 /* Initialize RSS indirect table for each vport */
3117 for (j
= 0; j
< hdev
->num_vmdq_vport
+ 1; j
++) {
3118 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++) {
3119 vport
[j
].rss_indirection_tbl
[i
] =
3120 i
% vport
[j
].alloc_rss_size
;
3122 /* vport 0 is for PF */
3126 rss_size
= vport
[j
].alloc_rss_size
;
3127 rss_indir
[i
] = vport
[j
].rss_indirection_tbl
[i
];
3130 ret
= hclge_set_rss_indir_table(hdev
, rss_indir
);
3135 ret
= hclge_set_rss_algo_key(hdev
, hfunc
, key
);
3139 ret
= hclge_set_rss_input_tuple(hdev
);
3143 /* Each TC have the same queue size, and tc_size set to hardware is
3144 * the log2 of roundup power of two of rss_size, the acutal queue
3145 * size is limited by indirection table.
3147 if (rss_size
> HCLGE_RSS_TC_SIZE_7
|| rss_size
== 0) {
3148 dev_err(&hdev
->pdev
->dev
,
3149 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3155 roundup_size
= roundup_pow_of_two(rss_size
);
3156 roundup_size
= ilog2(roundup_size
);
3158 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
3161 if (!(hdev
->hw_tc_map
& BIT(i
)))
3165 tc_size
[i
] = roundup_size
;
3166 tc_offset
[i
] = rss_size
* i
;
3169 ret
= hclge_set_rss_tc_mode(hdev
, tc_valid
, tc_size
, tc_offset
);
3177 int hclge_map_vport_ring_to_vector(struct hclge_vport
*vport
, int vector_id
,
3178 struct hnae3_ring_chain_node
*ring_chain
)
3180 struct hclge_dev
*hdev
= vport
->back
;
3181 struct hclge_ctrl_vector_chain_cmd
*req
;
3182 struct hnae3_ring_chain_node
*node
;
3183 struct hclge_desc desc
;
3187 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_ADD_RING_TO_VECTOR
, false);
3189 req
= (struct hclge_ctrl_vector_chain_cmd
*)desc
.data
;
3190 req
->int_vector_id
= vector_id
;
3193 for (node
= ring_chain
; node
; node
= node
->next
) {
3194 u16 type_and_id
= 0;
3196 hnae_set_field(type_and_id
, HCLGE_INT_TYPE_M
, HCLGE_INT_TYPE_S
,
3197 hnae_get_bit(node
->flag
, HNAE3_RING_TYPE_B
));
3198 hnae_set_field(type_and_id
, HCLGE_TQP_ID_M
, HCLGE_TQP_ID_S
,
3200 hnae_set_field(type_and_id
, HCLGE_INT_GL_IDX_M
,
3202 hnae_get_bit(node
->flag
, HNAE3_RING_TYPE_B
));
3203 req
->tqp_type_and_id
[i
] = cpu_to_le16(type_and_id
);
3204 req
->vfid
= vport
->vport_id
;
3206 if (++i
>= HCLGE_VECTOR_ELEMENTS_PER_CMD
) {
3207 req
->int_cause_num
= HCLGE_VECTOR_ELEMENTS_PER_CMD
;
3209 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3211 dev_err(&hdev
->pdev
->dev
,
3212 "Map TQP fail, status is %d.\n",
3218 hclge_cmd_setup_basic_desc(&desc
,
3219 HCLGE_OPC_ADD_RING_TO_VECTOR
,
3221 req
->int_vector_id
= vector_id
;
3226 req
->int_cause_num
= i
;
3228 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3230 dev_err(&hdev
->pdev
->dev
,
3231 "Map TQP fail, status is %d.\n", ret
);
3239 static int hclge_map_handle_ring_to_vector(
3240 struct hnae3_handle
*handle
, int vector
,
3241 struct hnae3_ring_chain_node
*ring_chain
)
3243 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3244 struct hclge_dev
*hdev
= vport
->back
;
3247 vector_id
= hclge_get_vector_index(hdev
, vector
);
3248 if (vector_id
< 0) {
3249 dev_err(&hdev
->pdev
->dev
,
3250 "Get vector index fail. ret =%d\n", vector_id
);
3254 return hclge_map_vport_ring_to_vector(vport
, vector_id
, ring_chain
);
3257 static int hclge_unmap_ring_from_vector(
3258 struct hnae3_handle
*handle
, int vector
,
3259 struct hnae3_ring_chain_node
*ring_chain
)
3261 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3262 struct hclge_dev
*hdev
= vport
->back
;
3263 struct hclge_ctrl_vector_chain_cmd
*req
;
3264 struct hnae3_ring_chain_node
*node
;
3265 struct hclge_desc desc
;
3269 vector_id
= hclge_get_vector_index(hdev
, vector
);
3270 if (vector_id
< 0) {
3271 dev_err(&handle
->pdev
->dev
,
3272 "Get vector index fail. ret =%d\n", vector_id
);
3276 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_DEL_RING_TO_VECTOR
, false);
3278 req
= (struct hclge_ctrl_vector_chain_cmd
*)desc
.data
;
3279 req
->int_vector_id
= vector_id
;
3282 for (node
= ring_chain
; node
; node
= node
->next
) {
3283 u16 type_and_id
= 0;
3285 hnae_set_field(type_and_id
, HCLGE_INT_TYPE_M
, HCLGE_INT_TYPE_S
,
3286 hnae_get_bit(node
->flag
, HNAE3_RING_TYPE_B
));
3287 hnae_set_field(type_and_id
, HCLGE_TQP_ID_M
, HCLGE_TQP_ID_S
,
3289 hnae_set_field(type_and_id
, HCLGE_INT_GL_IDX_M
,
3291 hnae_get_bit(node
->flag
, HNAE3_RING_TYPE_B
));
3293 req
->tqp_type_and_id
[i
] = cpu_to_le16(type_and_id
);
3294 req
->vfid
= vport
->vport_id
;
3296 if (++i
>= HCLGE_VECTOR_ELEMENTS_PER_CMD
) {
3297 req
->int_cause_num
= HCLGE_VECTOR_ELEMENTS_PER_CMD
;
3299 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3301 dev_err(&hdev
->pdev
->dev
,
3302 "Unmap TQP fail, status is %d.\n",
3307 hclge_cmd_setup_basic_desc(&desc
,
3308 HCLGE_OPC_DEL_RING_TO_VECTOR
,
3310 req
->int_vector_id
= vector_id
;
3315 req
->int_cause_num
= i
;
3317 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3319 dev_err(&hdev
->pdev
->dev
,
3320 "Unmap TQP fail, status is %d.\n", ret
);
3328 int hclge_cmd_set_promisc_mode(struct hclge_dev
*hdev
,
3329 struct hclge_promisc_param
*param
)
3331 struct hclge_promisc_cfg_cmd
*req
;
3332 struct hclge_desc desc
;
3335 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_PROMISC_MODE
, false);
3337 req
= (struct hclge_promisc_cfg_cmd
*)desc
.data
;
3338 req
->vf_id
= param
->vf_id
;
3339 req
->flag
= (param
->enable
<< HCLGE_PROMISC_EN_B
);
3341 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3343 dev_err(&hdev
->pdev
->dev
,
3344 "Set promisc mode fail, status is %d.\n", ret
);
3350 void hclge_promisc_param_init(struct hclge_promisc_param
*param
, bool en_uc
,
3351 bool en_mc
, bool en_bc
, int vport_id
)
3356 memset(param
, 0, sizeof(struct hclge_promisc_param
));
3358 param
->enable
= HCLGE_PROMISC_EN_UC
;
3360 param
->enable
|= HCLGE_PROMISC_EN_MC
;
3362 param
->enable
|= HCLGE_PROMISC_EN_BC
;
3363 param
->vf_id
= vport_id
;
3366 static void hclge_set_promisc_mode(struct hnae3_handle
*handle
, u32 en
)
3368 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3369 struct hclge_dev
*hdev
= vport
->back
;
3370 struct hclge_promisc_param param
;
3372 hclge_promisc_param_init(¶m
, en
, en
, true, vport
->vport_id
);
3373 hclge_cmd_set_promisc_mode(hdev
, ¶m
);
3376 static void hclge_cfg_mac_mode(struct hclge_dev
*hdev
, bool enable
)
3378 struct hclge_desc desc
;
3379 struct hclge_config_mac_mode_cmd
*req
=
3380 (struct hclge_config_mac_mode_cmd
*)desc
.data
;
3384 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAC_MODE
, false);
3385 hnae_set_bit(loop_en
, HCLGE_MAC_TX_EN_B
, enable
);
3386 hnae_set_bit(loop_en
, HCLGE_MAC_RX_EN_B
, enable
);
3387 hnae_set_bit(loop_en
, HCLGE_MAC_PAD_TX_B
, enable
);
3388 hnae_set_bit(loop_en
, HCLGE_MAC_PAD_RX_B
, enable
);
3389 hnae_set_bit(loop_en
, HCLGE_MAC_1588_TX_B
, 0);
3390 hnae_set_bit(loop_en
, HCLGE_MAC_1588_RX_B
, 0);
3391 hnae_set_bit(loop_en
, HCLGE_MAC_APP_LP_B
, 0);
3392 hnae_set_bit(loop_en
, HCLGE_MAC_LINE_LP_B
, 0);
3393 hnae_set_bit(loop_en
, HCLGE_MAC_FCS_TX_B
, enable
);
3394 hnae_set_bit(loop_en
, HCLGE_MAC_RX_FCS_B
, enable
);
3395 hnae_set_bit(loop_en
, HCLGE_MAC_RX_FCS_STRIP_B
, enable
);
3396 hnae_set_bit(loop_en
, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B
, enable
);
3397 hnae_set_bit(loop_en
, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B
, enable
);
3398 hnae_set_bit(loop_en
, HCLGE_MAC_TX_UNDER_MIN_ERR_B
, enable
);
3399 req
->txrx_pad_fcs_loop_en
= cpu_to_le32(loop_en
);
3401 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3403 dev_err(&hdev
->pdev
->dev
,
3404 "mac enable fail, ret =%d.\n", ret
);
3407 static int hclge_set_loopback(struct hnae3_handle
*handle
,
3408 enum hnae3_loop loop_mode
, bool en
)
3410 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3411 struct hclge_config_mac_mode_cmd
*req
;
3412 struct hclge_dev
*hdev
= vport
->back
;
3413 struct hclge_desc desc
;
3417 switch (loop_mode
) {
3418 case HNAE3_MAC_INTER_LOOP_MAC
:
3419 req
= (struct hclge_config_mac_mode_cmd
*)&desc
.data
[0];
3420 /* 1 Read out the MAC mode config at first */
3421 hclge_cmd_setup_basic_desc(&desc
,
3422 HCLGE_OPC_CONFIG_MAC_MODE
,
3424 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3426 dev_err(&hdev
->pdev
->dev
,
3427 "mac loopback get fail, ret =%d.\n",
3432 /* 2 Then setup the loopback flag */
3433 loop_en
= le32_to_cpu(req
->txrx_pad_fcs_loop_en
);
3435 hnae_set_bit(loop_en
, HCLGE_MAC_APP_LP_B
, 1);
3437 hnae_set_bit(loop_en
, HCLGE_MAC_APP_LP_B
, 0);
3439 req
->txrx_pad_fcs_loop_en
= cpu_to_le32(loop_en
);
3441 /* 3 Config mac work mode with loopback flag
3442 * and its original configure parameters
3444 hclge_cmd_reuse_desc(&desc
, false);
3445 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3447 dev_err(&hdev
->pdev
->dev
,
3448 "mac loopback set fail, ret =%d.\n", ret
);
3452 dev_err(&hdev
->pdev
->dev
,
3453 "loop_mode %d is not supported\n", loop_mode
);
3460 static int hclge_tqp_enable(struct hclge_dev
*hdev
, int tqp_id
,
3461 int stream_id
, bool enable
)
3463 struct hclge_desc desc
;
3464 struct hclge_cfg_com_tqp_queue_cmd
*req
=
3465 (struct hclge_cfg_com_tqp_queue_cmd
*)desc
.data
;
3468 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_COM_TQP_QUEUE
, false);
3469 req
->tqp_id
= cpu_to_le16(tqp_id
& HCLGE_RING_ID_MASK
);
3470 req
->stream_id
= cpu_to_le16(stream_id
);
3471 req
->enable
|= enable
<< HCLGE_TQP_ENABLE_B
;
3473 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3475 dev_err(&hdev
->pdev
->dev
,
3476 "Tqp enable fail, status =%d.\n", ret
);
3480 static void hclge_reset_tqp_stats(struct hnae3_handle
*handle
)
3482 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3483 struct hnae3_queue
*queue
;
3484 struct hclge_tqp
*tqp
;
3487 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
3488 queue
= handle
->kinfo
.tqp
[i
];
3489 tqp
= container_of(queue
, struct hclge_tqp
, q
);
3490 memset(&tqp
->tqp_stats
, 0, sizeof(tqp
->tqp_stats
));
3494 static int hclge_ae_start(struct hnae3_handle
*handle
)
3496 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3497 struct hclge_dev
*hdev
= vport
->back
;
3498 int i
, queue_id
, ret
;
3500 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
3501 /* todo clear interrupt */
3503 queue_id
= hclge_get_queue_id(handle
->kinfo
.tqp
[i
]);
3505 dev_warn(&hdev
->pdev
->dev
,
3506 "Get invalid queue id, ignore it\n");
3510 hclge_tqp_enable(hdev
, queue_id
, 0, true);
3513 hclge_cfg_mac_mode(hdev
, true);
3514 clear_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
3515 mod_timer(&hdev
->service_timer
, jiffies
+ HZ
);
3517 ret
= hclge_mac_start_phy(hdev
);
3521 /* reset tqp stats */
3522 hclge_reset_tqp_stats(handle
);
3527 static void hclge_ae_stop(struct hnae3_handle
*handle
)
3529 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3530 struct hclge_dev
*hdev
= vport
->back
;
3533 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
3535 queue_id
= hclge_get_queue_id(handle
->kinfo
.tqp
[i
]);
3537 dev_warn(&hdev
->pdev
->dev
,
3538 "Get invalid queue id, ignore it\n");
3542 hclge_tqp_enable(hdev
, queue_id
, 0, false);
3545 hclge_cfg_mac_mode(hdev
, false);
3547 hclge_mac_stop_phy(hdev
);
3549 /* reset tqp stats */
3550 hclge_reset_tqp_stats(handle
);
3553 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport
*vport
,
3554 u16 cmdq_resp
, u8 resp_code
,
3555 enum hclge_mac_vlan_tbl_opcode op
)
3557 struct hclge_dev
*hdev
= vport
->back
;
3558 int return_status
= -EIO
;
3561 dev_err(&hdev
->pdev
->dev
,
3562 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
3567 if (op
== HCLGE_MAC_VLAN_ADD
) {
3568 if ((!resp_code
) || (resp_code
== 1)) {
3570 } else if (resp_code
== 2) {
3571 return_status
= -EIO
;
3572 dev_err(&hdev
->pdev
->dev
,
3573 "add mac addr failed for uc_overflow.\n");
3574 } else if (resp_code
== 3) {
3575 return_status
= -EIO
;
3576 dev_err(&hdev
->pdev
->dev
,
3577 "add mac addr failed for mc_overflow.\n");
3579 dev_err(&hdev
->pdev
->dev
,
3580 "add mac addr failed for undefined, code=%d.\n",
3583 } else if (op
== HCLGE_MAC_VLAN_REMOVE
) {
3586 } else if (resp_code
== 1) {
3587 return_status
= -EIO
;
3588 dev_dbg(&hdev
->pdev
->dev
,
3589 "remove mac addr failed for miss.\n");
3591 dev_err(&hdev
->pdev
->dev
,
3592 "remove mac addr failed for undefined, code=%d.\n",
3595 } else if (op
== HCLGE_MAC_VLAN_LKUP
) {
3598 } else if (resp_code
== 1) {
3599 return_status
= -EIO
;
3600 dev_dbg(&hdev
->pdev
->dev
,
3601 "lookup mac addr failed for miss.\n");
3603 dev_err(&hdev
->pdev
->dev
,
3604 "lookup mac addr failed for undefined, code=%d.\n",
3608 return_status
= -EIO
;
3609 dev_err(&hdev
->pdev
->dev
,
3610 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
3614 return return_status
;
3617 static int hclge_update_desc_vfid(struct hclge_desc
*desc
, int vfid
, bool clr
)
3622 if (vfid
> 255 || vfid
< 0)
3625 if (vfid
>= 0 && vfid
<= 191) {
3626 word_num
= vfid
/ 32;
3627 bit_num
= vfid
% 32;
3629 desc
[1].data
[word_num
] &= cpu_to_le32(~(1 << bit_num
));
3631 desc
[1].data
[word_num
] |= cpu_to_le32(1 << bit_num
);
3633 word_num
= (vfid
- 192) / 32;
3634 bit_num
= vfid
% 32;
3636 desc
[2].data
[word_num
] &= cpu_to_le32(~(1 << bit_num
));
3638 desc
[2].data
[word_num
] |= cpu_to_le32(1 << bit_num
);
3644 static bool hclge_is_all_function_id_zero(struct hclge_desc
*desc
)
3646 #define HCLGE_DESC_NUMBER 3
3647 #define HCLGE_FUNC_NUMBER_PER_DESC 6
3650 for (i
= 0; i
< HCLGE_DESC_NUMBER
; i
++)
3651 for (j
= 0; j
< HCLGE_FUNC_NUMBER_PER_DESC
; j
++)
3652 if (desc
[i
].data
[j
])
3658 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd
*new_req
,
3661 const unsigned char *mac_addr
= addr
;
3662 u32 high_val
= mac_addr
[2] << 16 | (mac_addr
[3] << 24) |
3663 (mac_addr
[0]) | (mac_addr
[1] << 8);
3664 u32 low_val
= mac_addr
[4] | (mac_addr
[5] << 8);
3666 new_req
->mac_addr_hi32
= cpu_to_le32(high_val
);
3667 new_req
->mac_addr_lo16
= cpu_to_le16(low_val
& 0xffff);
3670 static u16
hclge_get_mac_addr_to_mta_index(struct hclge_vport
*vport
,
3673 u16 high_val
= addr
[1] | (addr
[0] << 8);
3674 struct hclge_dev
*hdev
= vport
->back
;
3675 u32 rsh
= 4 - hdev
->mta_mac_sel_type
;
3676 u16 ret_val
= (high_val
>> rsh
) & 0xfff;
3681 static int hclge_set_mta_filter_mode(struct hclge_dev
*hdev
,
3682 enum hclge_mta_dmac_sel_type mta_mac_sel
,
3685 struct hclge_mta_filter_mode_cmd
*req
;
3686 struct hclge_desc desc
;
3689 req
= (struct hclge_mta_filter_mode_cmd
*)desc
.data
;
3690 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MTA_MAC_MODE_CFG
, false);
3692 hnae_set_bit(req
->dmac_sel_en
, HCLGE_CFG_MTA_MAC_EN_B
,
3694 hnae_set_field(req
->dmac_sel_en
, HCLGE_CFG_MTA_MAC_SEL_M
,
3695 HCLGE_CFG_MTA_MAC_SEL_S
, mta_mac_sel
);
3697 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3699 dev_err(&hdev
->pdev
->dev
,
3700 "Config mat filter mode failed for cmd_send, ret =%d.\n",
3708 int hclge_cfg_func_mta_filter(struct hclge_dev
*hdev
,
3712 struct hclge_cfg_func_mta_filter_cmd
*req
;
3713 struct hclge_desc desc
;
3716 req
= (struct hclge_cfg_func_mta_filter_cmd
*)desc
.data
;
3717 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MTA_MAC_FUNC_CFG
, false);
3719 hnae_set_bit(req
->accept
, HCLGE_CFG_FUNC_MTA_ACCEPT_B
,
3721 req
->function_id
= func_id
;
3723 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3725 dev_err(&hdev
->pdev
->dev
,
3726 "Config func_id enable failed for cmd_send, ret =%d.\n",
3734 static int hclge_set_mta_table_item(struct hclge_vport
*vport
,
3738 struct hclge_dev
*hdev
= vport
->back
;
3739 struct hclge_cfg_func_mta_item_cmd
*req
;
3740 struct hclge_desc desc
;
3744 req
= (struct hclge_cfg_func_mta_item_cmd
*)desc
.data
;
3745 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MTA_TBL_ITEM_CFG
, false);
3746 hnae_set_bit(req
->accept
, HCLGE_CFG_MTA_ITEM_ACCEPT_B
, enable
);
3748 hnae_set_field(item_idx
, HCLGE_CFG_MTA_ITEM_IDX_M
,
3749 HCLGE_CFG_MTA_ITEM_IDX_S
, idx
);
3750 req
->item_idx
= cpu_to_le16(item_idx
);
3752 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3754 dev_err(&hdev
->pdev
->dev
,
3755 "Config mta table item failed for cmd_send, ret =%d.\n",
3763 static int hclge_remove_mac_vlan_tbl(struct hclge_vport
*vport
,
3764 struct hclge_mac_vlan_tbl_entry_cmd
*req
)
3766 struct hclge_dev
*hdev
= vport
->back
;
3767 struct hclge_desc desc
;
3772 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_REMOVE
, false);
3774 memcpy(desc
.data
, req
, sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
3776 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3778 dev_err(&hdev
->pdev
->dev
,
3779 "del mac addr failed for cmd_send, ret =%d.\n",
3783 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
3784 retval
= le16_to_cpu(desc
.retval
);
3786 return hclge_get_mac_vlan_cmd_status(vport
, retval
, resp_code
,
3787 HCLGE_MAC_VLAN_REMOVE
);
3790 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport
*vport
,
3791 struct hclge_mac_vlan_tbl_entry_cmd
*req
,
3792 struct hclge_desc
*desc
,
3795 struct hclge_dev
*hdev
= vport
->back
;
3800 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_MAC_VLAN_ADD
, true);
3802 desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3803 memcpy(desc
[0].data
,
3805 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
3806 hclge_cmd_setup_basic_desc(&desc
[1],
3807 HCLGE_OPC_MAC_VLAN_ADD
,
3809 desc
[1].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3810 hclge_cmd_setup_basic_desc(&desc
[2],
3811 HCLGE_OPC_MAC_VLAN_ADD
,
3813 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 3);
3815 memcpy(desc
[0].data
,
3817 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
3818 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
3821 dev_err(&hdev
->pdev
->dev
,
3822 "lookup mac addr failed for cmd_send, ret =%d.\n",
3826 resp_code
= (le32_to_cpu(desc
[0].data
[0]) >> 8) & 0xff;
3827 retval
= le16_to_cpu(desc
[0].retval
);
3829 return hclge_get_mac_vlan_cmd_status(vport
, retval
, resp_code
,
3830 HCLGE_MAC_VLAN_LKUP
);
3833 static int hclge_add_mac_vlan_tbl(struct hclge_vport
*vport
,
3834 struct hclge_mac_vlan_tbl_entry_cmd
*req
,
3835 struct hclge_desc
*mc_desc
)
3837 struct hclge_dev
*hdev
= vport
->back
;
3844 struct hclge_desc desc
;
3846 hclge_cmd_setup_basic_desc(&desc
,
3847 HCLGE_OPC_MAC_VLAN_ADD
,
3849 memcpy(desc
.data
, req
,
3850 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
3851 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3852 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
3853 retval
= le16_to_cpu(desc
.retval
);
3855 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, retval
,
3857 HCLGE_MAC_VLAN_ADD
);
3859 hclge_cmd_reuse_desc(&mc_desc
[0], false);
3860 mc_desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3861 hclge_cmd_reuse_desc(&mc_desc
[1], false);
3862 mc_desc
[1].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3863 hclge_cmd_reuse_desc(&mc_desc
[2], false);
3864 mc_desc
[2].flag
&= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT
);
3865 memcpy(mc_desc
[0].data
, req
,
3866 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
3867 ret
= hclge_cmd_send(&hdev
->hw
, mc_desc
, 3);
3868 resp_code
= (le32_to_cpu(mc_desc
[0].data
[0]) >> 8) & 0xff;
3869 retval
= le16_to_cpu(mc_desc
[0].retval
);
3871 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, retval
,
3873 HCLGE_MAC_VLAN_ADD
);
3877 dev_err(&hdev
->pdev
->dev
,
3878 "add mac addr failed for cmd_send, ret =%d.\n",
3886 static int hclge_add_uc_addr(struct hnae3_handle
*handle
,
3887 const unsigned char *addr
)
3889 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3891 return hclge_add_uc_addr_common(vport
, addr
);
3894 int hclge_add_uc_addr_common(struct hclge_vport
*vport
,
3895 const unsigned char *addr
)
3897 struct hclge_dev
*hdev
= vport
->back
;
3898 struct hclge_mac_vlan_tbl_entry_cmd req
;
3899 enum hclge_cmd_status status
;
3900 u16 egress_port
= 0;
3902 /* mac addr check */
3903 if (is_zero_ether_addr(addr
) ||
3904 is_broadcast_ether_addr(addr
) ||
3905 is_multicast_ether_addr(addr
)) {
3906 dev_err(&hdev
->pdev
->dev
,
3907 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
3909 is_zero_ether_addr(addr
),
3910 is_broadcast_ether_addr(addr
),
3911 is_multicast_ether_addr(addr
));
3915 memset(&req
, 0, sizeof(req
));
3916 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
3917 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
3918 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 0);
3919 hnae_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
3921 hnae_set_bit(egress_port
, HCLGE_MAC_EPORT_SW_EN_B
, 0);
3922 hnae_set_bit(egress_port
, HCLGE_MAC_EPORT_TYPE_B
, 0);
3923 hnae_set_field(egress_port
, HCLGE_MAC_EPORT_VFID_M
,
3924 HCLGE_MAC_EPORT_VFID_S
, vport
->vport_id
);
3925 hnae_set_field(egress_port
, HCLGE_MAC_EPORT_PFID_M
,
3926 HCLGE_MAC_EPORT_PFID_S
, 0);
3928 req
.egress_port
= cpu_to_le16(egress_port
);
3930 hclge_prepare_mac_addr(&req
, addr
);
3932 status
= hclge_add_mac_vlan_tbl(vport
, &req
, NULL
);
3937 static int hclge_rm_uc_addr(struct hnae3_handle
*handle
,
3938 const unsigned char *addr
)
3940 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3942 return hclge_rm_uc_addr_common(vport
, addr
);
3945 int hclge_rm_uc_addr_common(struct hclge_vport
*vport
,
3946 const unsigned char *addr
)
3948 struct hclge_dev
*hdev
= vport
->back
;
3949 struct hclge_mac_vlan_tbl_entry_cmd req
;
3950 enum hclge_cmd_status status
;
3952 /* mac addr check */
3953 if (is_zero_ether_addr(addr
) ||
3954 is_broadcast_ether_addr(addr
) ||
3955 is_multicast_ether_addr(addr
)) {
3956 dev_dbg(&hdev
->pdev
->dev
,
3957 "Remove mac err! invalid mac:%pM.\n",
3962 memset(&req
, 0, sizeof(req
));
3963 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
3964 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
3965 hclge_prepare_mac_addr(&req
, addr
);
3966 status
= hclge_remove_mac_vlan_tbl(vport
, &req
);
3971 static int hclge_add_mc_addr(struct hnae3_handle
*handle
,
3972 const unsigned char *addr
)
3974 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3976 return hclge_add_mc_addr_common(vport
, addr
);
3979 int hclge_add_mc_addr_common(struct hclge_vport
*vport
,
3980 const unsigned char *addr
)
3982 struct hclge_dev
*hdev
= vport
->back
;
3983 struct hclge_mac_vlan_tbl_entry_cmd req
;
3984 struct hclge_desc desc
[3];
3988 /* mac addr check */
3989 if (!is_multicast_ether_addr(addr
)) {
3990 dev_err(&hdev
->pdev
->dev
,
3991 "Add mc mac err! invalid mac:%pM.\n",
3995 memset(&req
, 0, sizeof(req
));
3996 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
3997 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
3998 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 1);
3999 hnae_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4000 hclge_prepare_mac_addr(&req
, addr
);
4001 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
4003 /* This mac addr exist, update VFID for it */
4004 hclge_update_desc_vfid(desc
, vport
->vport_id
, false);
4005 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
4007 /* This mac addr do not exist, add new entry for it */
4008 memset(desc
[0].data
, 0, sizeof(desc
[0].data
));
4009 memset(desc
[1].data
, 0, sizeof(desc
[0].data
));
4010 memset(desc
[2].data
, 0, sizeof(desc
[0].data
));
4011 hclge_update_desc_vfid(desc
, vport
->vport_id
, false);
4012 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
4015 /* Set MTA table for this MAC address */
4016 tbl_idx
= hclge_get_mac_addr_to_mta_index(vport
, addr
);
4017 status
= hclge_set_mta_table_item(vport
, tbl_idx
, true);
4022 static int hclge_rm_mc_addr(struct hnae3_handle
*handle
,
4023 const unsigned char *addr
)
4025 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4027 return hclge_rm_mc_addr_common(vport
, addr
);
4030 int hclge_rm_mc_addr_common(struct hclge_vport
*vport
,
4031 const unsigned char *addr
)
4033 struct hclge_dev
*hdev
= vport
->back
;
4034 struct hclge_mac_vlan_tbl_entry_cmd req
;
4035 enum hclge_cmd_status status
;
4036 struct hclge_desc desc
[3];
4039 /* mac addr check */
4040 if (!is_multicast_ether_addr(addr
)) {
4041 dev_dbg(&hdev
->pdev
->dev
,
4042 "Remove mc mac err! invalid mac:%pM.\n",
4047 memset(&req
, 0, sizeof(req
));
4048 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
4049 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4050 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 1);
4051 hnae_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4052 hclge_prepare_mac_addr(&req
, addr
);
4053 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
4055 /* This mac addr exist, remove this handle's VFID for it */
4056 hclge_update_desc_vfid(desc
, vport
->vport_id
, true);
4058 if (hclge_is_all_function_id_zero(desc
))
4059 /* All the vfid is zero, so need to delete this entry */
4060 status
= hclge_remove_mac_vlan_tbl(vport
, &req
);
4062 /* Not all the vfid is zero, update the vfid */
4063 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
4066 /* This mac addr do not exist, can't delete it */
4067 dev_err(&hdev
->pdev
->dev
,
4068 "Rm multicast mac addr failed, ret = %d.\n",
4073 /* Set MTB table for this MAC address */
4074 tbl_idx
= hclge_get_mac_addr_to_mta_index(vport
, addr
);
4075 status
= hclge_set_mta_table_item(vport
, tbl_idx
, false);
4080 static void hclge_get_mac_addr(struct hnae3_handle
*handle
, u8
*p
)
4082 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4083 struct hclge_dev
*hdev
= vport
->back
;
4085 ether_addr_copy(p
, hdev
->hw
.mac
.mac_addr
);
4088 static int hclge_set_mac_addr(struct hnae3_handle
*handle
, void *p
)
4090 const unsigned char *new_addr
= (const unsigned char *)p
;
4091 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4092 struct hclge_dev
*hdev
= vport
->back
;
4094 /* mac addr check */
4095 if (is_zero_ether_addr(new_addr
) ||
4096 is_broadcast_ether_addr(new_addr
) ||
4097 is_multicast_ether_addr(new_addr
)) {
4098 dev_err(&hdev
->pdev
->dev
,
4099 "Change uc mac err! invalid mac:%p.\n",
4104 hclge_rm_uc_addr(handle
, hdev
->hw
.mac
.mac_addr
);
4106 if (!hclge_add_uc_addr(handle
, new_addr
)) {
4107 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, new_addr
);
4114 static int hclge_set_vlan_filter_ctrl(struct hclge_dev
*hdev
, u8 vlan_type
,
4117 struct hclge_vlan_filter_ctrl_cmd
*req
;
4118 struct hclge_desc desc
;
4121 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_CTRL
, false);
4123 req
= (struct hclge_vlan_filter_ctrl_cmd
*)desc
.data
;
4124 req
->vlan_type
= vlan_type
;
4125 req
->vlan_fe
= filter_en
;
4127 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4129 dev_err(&hdev
->pdev
->dev
, "set vlan filter fail, ret =%d.\n",
4137 int hclge_set_vf_vlan_common(struct hclge_dev
*hdev
, int vfid
,
4138 bool is_kill
, u16 vlan
, u8 qos
, __be16 proto
)
4140 #define HCLGE_MAX_VF_BYTES 16
4141 struct hclge_vlan_filter_vf_cfg_cmd
*req0
;
4142 struct hclge_vlan_filter_vf_cfg_cmd
*req1
;
4143 struct hclge_desc desc
[2];
4148 hclge_cmd_setup_basic_desc(&desc
[0],
4149 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
4150 hclge_cmd_setup_basic_desc(&desc
[1],
4151 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
4153 desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
4155 vf_byte_off
= vfid
/ 8;
4156 vf_byte_val
= 1 << (vfid
% 8);
4158 req0
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[0].data
;
4159 req1
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[1].data
;
4161 req0
->vlan_id
= cpu_to_le16(vlan
);
4162 req0
->vlan_cfg
= is_kill
;
4164 if (vf_byte_off
< HCLGE_MAX_VF_BYTES
)
4165 req0
->vf_bitmap
[vf_byte_off
] = vf_byte_val
;
4167 req1
->vf_bitmap
[vf_byte_off
- HCLGE_MAX_VF_BYTES
] = vf_byte_val
;
4169 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
4171 dev_err(&hdev
->pdev
->dev
,
4172 "Send vf vlan command fail, ret =%d.\n",
4178 if (!req0
->resp_code
|| req0
->resp_code
== 1)
4181 dev_err(&hdev
->pdev
->dev
,
4182 "Add vf vlan filter fail, ret =%d.\n",
4185 if (!req0
->resp_code
)
4188 dev_err(&hdev
->pdev
->dev
,
4189 "Kill vf vlan filter fail, ret =%d.\n",
4196 static int hclge_set_port_vlan_filter(struct hnae3_handle
*handle
,
4197 __be16 proto
, u16 vlan_id
,
4200 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4201 struct hclge_dev
*hdev
= vport
->back
;
4202 struct hclge_vlan_filter_pf_cfg_cmd
*req
;
4203 struct hclge_desc desc
;
4204 u8 vlan_offset_byte_val
;
4205 u8 vlan_offset_byte
;
4209 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_PF_CFG
, false);
4211 vlan_offset_160
= vlan_id
/ 160;
4212 vlan_offset_byte
= (vlan_id
% 160) / 8;
4213 vlan_offset_byte_val
= 1 << (vlan_id
% 8);
4215 req
= (struct hclge_vlan_filter_pf_cfg_cmd
*)desc
.data
;
4216 req
->vlan_offset
= vlan_offset_160
;
4217 req
->vlan_cfg
= is_kill
;
4218 req
->vlan_offset_bitmap
[vlan_offset_byte
] = vlan_offset_byte_val
;
4220 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4222 dev_err(&hdev
->pdev
->dev
,
4223 "port vlan command, send fail, ret =%d.\n",
4228 ret
= hclge_set_vf_vlan_common(hdev
, 0, is_kill
, vlan_id
, 0, proto
);
4230 dev_err(&hdev
->pdev
->dev
,
4231 "Set pf vlan filter config fail, ret =%d.\n",
4239 static int hclge_set_vf_vlan_filter(struct hnae3_handle
*handle
, int vfid
,
4240 u16 vlan
, u8 qos
, __be16 proto
)
4242 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4243 struct hclge_dev
*hdev
= vport
->back
;
4245 if ((vfid
>= hdev
->num_alloc_vfs
) || (vlan
> 4095) || (qos
> 7))
4247 if (proto
!= htons(ETH_P_8021Q
))
4248 return -EPROTONOSUPPORT
;
4250 return hclge_set_vf_vlan_common(hdev
, vfid
, false, vlan
, qos
, proto
);
4253 static int hclge_init_vlan_config(struct hclge_dev
*hdev
)
4255 #define HCLGE_VLAN_TYPE_VF_TABLE 0
4256 #define HCLGE_VLAN_TYPE_PORT_TABLE 1
4257 struct hnae3_handle
*handle
;
4260 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_VLAN_TYPE_VF_TABLE
,
4265 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_VLAN_TYPE_PORT_TABLE
,
4270 handle
= &hdev
->vport
[0].nic
;
4271 return hclge_set_port_vlan_filter(handle
, htons(ETH_P_8021Q
), 0, false);
4274 static int hclge_set_mtu(struct hnae3_handle
*handle
, int new_mtu
)
4276 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4277 struct hclge_config_max_frm_size_cmd
*req
;
4278 struct hclge_dev
*hdev
= vport
->back
;
4279 struct hclge_desc desc
;
4282 if ((new_mtu
< HCLGE_MAC_MIN_MTU
) || (new_mtu
> HCLGE_MAC_MAX_MTU
))
4285 hdev
->mps
= new_mtu
;
4286 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAX_FRM_SIZE
, false);
4288 req
= (struct hclge_config_max_frm_size_cmd
*)desc
.data
;
4289 req
->max_frm_size
= cpu_to_le16(new_mtu
);
4291 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4293 dev_err(&hdev
->pdev
->dev
, "set mtu fail, ret =%d.\n", ret
);
4300 static int hclge_send_reset_tqp_cmd(struct hclge_dev
*hdev
, u16 queue_id
,
4303 struct hclge_reset_tqp_queue_cmd
*req
;
4304 struct hclge_desc desc
;
4307 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, false);
4309 req
= (struct hclge_reset_tqp_queue_cmd
*)desc
.data
;
4310 req
->tqp_id
= cpu_to_le16(queue_id
& HCLGE_RING_ID_MASK
);
4311 hnae_set_bit(req
->reset_req
, HCLGE_TQP_RESET_B
, enable
);
4313 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4315 dev_err(&hdev
->pdev
->dev
,
4316 "Send tqp reset cmd error, status =%d\n", ret
);
4323 static int hclge_get_reset_status(struct hclge_dev
*hdev
, u16 queue_id
)
4325 struct hclge_reset_tqp_queue_cmd
*req
;
4326 struct hclge_desc desc
;
4329 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, true);
4331 req
= (struct hclge_reset_tqp_queue_cmd
*)desc
.data
;
4332 req
->tqp_id
= cpu_to_le16(queue_id
& HCLGE_RING_ID_MASK
);
4334 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4336 dev_err(&hdev
->pdev
->dev
,
4337 "Get reset status error, status =%d\n", ret
);
4341 return hnae_get_bit(req
->ready_to_reset
, HCLGE_TQP_RESET_B
);
4344 static void hclge_reset_tqp(struct hnae3_handle
*handle
, u16 queue_id
)
4346 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4347 struct hclge_dev
*hdev
= vport
->back
;
4348 int reset_try_times
= 0;
4352 ret
= hclge_tqp_enable(hdev
, queue_id
, 0, false);
4354 dev_warn(&hdev
->pdev
->dev
, "Disable tqp fail, ret = %d\n", ret
);
4358 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_id
, true);
4360 dev_warn(&hdev
->pdev
->dev
,
4361 "Send reset tqp cmd fail, ret = %d\n", ret
);
4365 reset_try_times
= 0;
4366 while (reset_try_times
++ < HCLGE_TQP_RESET_TRY_TIMES
) {
4367 /* Wait for tqp hw reset */
4369 reset_status
= hclge_get_reset_status(hdev
, queue_id
);
4374 if (reset_try_times
>= HCLGE_TQP_RESET_TRY_TIMES
) {
4375 dev_warn(&hdev
->pdev
->dev
, "Reset TQP fail\n");
4379 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_id
, false);
4381 dev_warn(&hdev
->pdev
->dev
,
4382 "Deassert the soft reset fail, ret = %d\n", ret
);
4387 static u32
hclge_get_fw_version(struct hnae3_handle
*handle
)
4389 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4390 struct hclge_dev
*hdev
= vport
->back
;
4392 return hdev
->fw_version
;
4395 static void hclge_get_pauseparam(struct hnae3_handle
*handle
, u32
*auto_neg
,
4396 u32
*rx_en
, u32
*tx_en
)
4398 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4399 struct hclge_dev
*hdev
= vport
->back
;
4401 *auto_neg
= hclge_get_autoneg(handle
);
4403 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
) {
4409 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_RX_PAUSE
) {
4412 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_TX_PAUSE
) {
4415 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_FULL
) {
4424 static void hclge_get_ksettings_an_result(struct hnae3_handle
*handle
,
4425 u8
*auto_neg
, u32
*speed
, u8
*duplex
)
4427 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4428 struct hclge_dev
*hdev
= vport
->back
;
4431 *speed
= hdev
->hw
.mac
.speed
;
4433 *duplex
= hdev
->hw
.mac
.duplex
;
4435 *auto_neg
= hdev
->hw
.mac
.autoneg
;
4438 static void hclge_get_media_type(struct hnae3_handle
*handle
, u8
*media_type
)
4440 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4441 struct hclge_dev
*hdev
= vport
->back
;
4444 *media_type
= hdev
->hw
.mac
.media_type
;
4447 static void hclge_get_mdix_mode(struct hnae3_handle
*handle
,
4448 u8
*tp_mdix_ctrl
, u8
*tp_mdix
)
4450 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4451 struct hclge_dev
*hdev
= vport
->back
;
4452 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
4453 int mdix_ctrl
, mdix
, retval
, is_resolved
;
4456 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
4457 *tp_mdix
= ETH_TP_MDI_INVALID
;
4461 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_MDIX
);
4463 retval
= phy_read(phydev
, HCLGE_PHY_CSC_REG
);
4464 mdix_ctrl
= hnae_get_field(retval
, HCLGE_PHY_MDIX_CTRL_M
,
4465 HCLGE_PHY_MDIX_CTRL_S
);
4467 retval
= phy_read(phydev
, HCLGE_PHY_CSS_REG
);
4468 mdix
= hnae_get_bit(retval
, HCLGE_PHY_MDIX_STATUS_B
);
4469 is_resolved
= hnae_get_bit(retval
, HCLGE_PHY_SPEED_DUP_RESOLVE_B
);
4471 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_COPPER
);
4473 switch (mdix_ctrl
) {
4475 *tp_mdix_ctrl
= ETH_TP_MDI
;
4478 *tp_mdix_ctrl
= ETH_TP_MDI_X
;
4481 *tp_mdix_ctrl
= ETH_TP_MDI_AUTO
;
4484 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
4489 *tp_mdix
= ETH_TP_MDI_INVALID
;
4491 *tp_mdix
= ETH_TP_MDI_X
;
4493 *tp_mdix
= ETH_TP_MDI
;
4496 static int hclge_init_client_instance(struct hnae3_client
*client
,
4497 struct hnae3_ae_dev
*ae_dev
)
4499 struct hclge_dev
*hdev
= ae_dev
->priv
;
4500 struct hclge_vport
*vport
;
4503 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
4504 vport
= &hdev
->vport
[i
];
4506 switch (client
->type
) {
4507 case HNAE3_CLIENT_KNIC
:
4509 hdev
->nic_client
= client
;
4510 vport
->nic
.client
= client
;
4511 ret
= client
->ops
->init_instance(&vport
->nic
);
4515 if (hdev
->roce_client
&&
4516 hnae3_dev_roce_supported(hdev
)) {
4517 struct hnae3_client
*rc
= hdev
->roce_client
;
4519 ret
= hclge_init_roce_base_info(vport
);
4523 ret
= rc
->ops
->init_instance(&vport
->roce
);
4529 case HNAE3_CLIENT_UNIC
:
4530 hdev
->nic_client
= client
;
4531 vport
->nic
.client
= client
;
4533 ret
= client
->ops
->init_instance(&vport
->nic
);
4538 case HNAE3_CLIENT_ROCE
:
4539 if (hnae3_dev_roce_supported(hdev
)) {
4540 hdev
->roce_client
= client
;
4541 vport
->roce
.client
= client
;
4544 if (hdev
->roce_client
&& hdev
->nic_client
) {
4545 ret
= hclge_init_roce_base_info(vport
);
4549 ret
= client
->ops
->init_instance(&vport
->roce
);
4561 static void hclge_uninit_client_instance(struct hnae3_client
*client
,
4562 struct hnae3_ae_dev
*ae_dev
)
4564 struct hclge_dev
*hdev
= ae_dev
->priv
;
4565 struct hclge_vport
*vport
;
4568 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
4569 vport
= &hdev
->vport
[i
];
4570 if (hdev
->roce_client
) {
4571 hdev
->roce_client
->ops
->uninit_instance(&vport
->roce
,
4573 hdev
->roce_client
= NULL
;
4574 vport
->roce
.client
= NULL
;
4576 if (client
->type
== HNAE3_CLIENT_ROCE
)
4578 if (client
->ops
->uninit_instance
) {
4579 client
->ops
->uninit_instance(&vport
->nic
, 0);
4580 hdev
->nic_client
= NULL
;
4581 vport
->nic
.client
= NULL
;
4586 static int hclge_pci_init(struct hclge_dev
*hdev
)
4588 struct pci_dev
*pdev
= hdev
->pdev
;
4589 struct hclge_hw
*hw
;
4592 ret
= pci_enable_device(pdev
);
4594 dev_err(&pdev
->dev
, "failed to enable PCI device\n");
4595 goto err_no_drvdata
;
4598 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
4600 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
4603 "can't set consistent PCI DMA");
4604 goto err_disable_device
;
4606 dev_warn(&pdev
->dev
, "set DMA mask to 32 bits\n");
4609 ret
= pci_request_regions(pdev
, HCLGE_DRIVER_NAME
);
4611 dev_err(&pdev
->dev
, "PCI request regions failed %d\n", ret
);
4612 goto err_disable_device
;
4615 pci_set_master(pdev
);
4618 hw
->io_base
= pcim_iomap(pdev
, 2, 0);
4620 dev_err(&pdev
->dev
, "Can't map configuration register space\n");
4622 goto err_clr_master
;
4625 hdev
->num_req_vfs
= pci_sriov_get_totalvfs(pdev
);
4629 pci_clear_master(pdev
);
4630 pci_release_regions(pdev
);
4632 pci_disable_device(pdev
);
4634 pci_set_drvdata(pdev
, NULL
);
4639 static void hclge_pci_uninit(struct hclge_dev
*hdev
)
4641 struct pci_dev
*pdev
= hdev
->pdev
;
4643 pci_free_irq_vectors(pdev
);
4644 pci_clear_master(pdev
);
4645 pci_release_mem_regions(pdev
);
4646 pci_disable_device(pdev
);
4649 static int hclge_init_ae_dev(struct hnae3_ae_dev
*ae_dev
)
4651 struct pci_dev
*pdev
= ae_dev
->pdev
;
4652 struct hclge_dev
*hdev
;
4655 hdev
= devm_kzalloc(&pdev
->dev
, sizeof(*hdev
), GFP_KERNEL
);
4662 hdev
->ae_dev
= ae_dev
;
4663 hdev
->reset_type
= HNAE3_NONE_RESET
;
4664 ae_dev
->priv
= hdev
;
4666 ret
= hclge_pci_init(hdev
);
4668 dev_err(&pdev
->dev
, "PCI init failed\n");
4672 /* Firmware command queue initialize */
4673 ret
= hclge_cmd_queue_init(hdev
);
4675 dev_err(&pdev
->dev
, "Cmd queue init failed, ret = %d.\n", ret
);
4679 /* Firmware command initialize */
4680 ret
= hclge_cmd_init(hdev
);
4684 ret
= hclge_get_cap(hdev
);
4686 dev_err(&pdev
->dev
, "get hw capability error, ret = %d.\n",
4691 ret
= hclge_configure(hdev
);
4693 dev_err(&pdev
->dev
, "Configure dev error, ret = %d.\n", ret
);
4697 ret
= hclge_init_msi(hdev
);
4699 dev_err(&pdev
->dev
, "Init MSI/MSI-X error, ret = %d.\n", ret
);
4703 ret
= hclge_misc_irq_init(hdev
);
4706 "Misc IRQ(vector0) init error, ret = %d.\n",
4711 ret
= hclge_alloc_tqps(hdev
);
4713 dev_err(&pdev
->dev
, "Allocate TQPs error, ret = %d.\n", ret
);
4717 ret
= hclge_alloc_vport(hdev
);
4719 dev_err(&pdev
->dev
, "Allocate vport error, ret = %d.\n", ret
);
4723 ret
= hclge_map_tqp(hdev
);
4725 dev_err(&pdev
->dev
, "Map tqp error, ret = %d.\n", ret
);
4729 ret
= hclge_mac_mdio_config(hdev
);
4731 dev_warn(&hdev
->pdev
->dev
,
4732 "mdio config fail ret=%d\n", ret
);
4736 ret
= hclge_mac_init(hdev
);
4738 dev_err(&pdev
->dev
, "Mac init error, ret = %d\n", ret
);
4741 ret
= hclge_buffer_alloc(hdev
);
4743 dev_err(&pdev
->dev
, "Buffer allocate fail, ret =%d\n", ret
);
4747 ret
= hclge_config_tso(hdev
, HCLGE_TSO_MSS_MIN
, HCLGE_TSO_MSS_MAX
);
4749 dev_err(&pdev
->dev
, "Enable tso fail, ret =%d\n", ret
);
4753 ret
= hclge_init_vlan_config(hdev
);
4755 dev_err(&pdev
->dev
, "VLAN init fail, ret =%d\n", ret
);
4759 ret
= hclge_tm_schd_init(hdev
);
4761 dev_err(&pdev
->dev
, "tm schd init fail, ret =%d\n", ret
);
4765 ret
= hclge_rss_init_hw(hdev
);
4767 dev_err(&pdev
->dev
, "Rss init fail, ret =%d\n", ret
);
4771 hclge_dcb_ops_set(hdev
);
4773 timer_setup(&hdev
->service_timer
, hclge_service_timer
, 0);
4774 INIT_WORK(&hdev
->service_task
, hclge_service_task
);
4776 /* Enable MISC vector(vector0) */
4777 hclge_enable_vector(&hdev
->misc_vector
, true);
4779 set_bit(HCLGE_STATE_SERVICE_INITED
, &hdev
->state
);
4780 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
4782 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME
);
4786 pci_release_regions(pdev
);
4788 pci_set_drvdata(pdev
, NULL
);
4793 static void hclge_stats_clear(struct hclge_dev
*hdev
)
4795 memset(&hdev
->hw_stats
, 0, sizeof(hdev
->hw_stats
));
4798 static int hclge_reset_ae_dev(struct hnae3_ae_dev
*ae_dev
)
4800 struct hclge_dev
*hdev
= ae_dev
->priv
;
4801 struct pci_dev
*pdev
= ae_dev
->pdev
;
4804 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
4806 hclge_stats_clear(hdev
);
4808 ret
= hclge_cmd_init(hdev
);
4810 dev_err(&pdev
->dev
, "Cmd queue init failed\n");
4814 ret
= hclge_get_cap(hdev
);
4816 dev_err(&pdev
->dev
, "get hw capability error, ret = %d.\n",
4821 ret
= hclge_configure(hdev
);
4823 dev_err(&pdev
->dev
, "Configure dev error, ret = %d.\n", ret
);
4827 ret
= hclge_map_tqp(hdev
);
4829 dev_err(&pdev
->dev
, "Map tqp error, ret = %d.\n", ret
);
4833 ret
= hclge_mac_init(hdev
);
4835 dev_err(&pdev
->dev
, "Mac init error, ret = %d\n", ret
);
4839 ret
= hclge_buffer_alloc(hdev
);
4841 dev_err(&pdev
->dev
, "Buffer allocate fail, ret =%d\n", ret
);
4845 ret
= hclge_config_tso(hdev
, HCLGE_TSO_MSS_MIN
, HCLGE_TSO_MSS_MAX
);
4847 dev_err(&pdev
->dev
, "Enable tso fail, ret =%d\n", ret
);
4851 ret
= hclge_init_vlan_config(hdev
);
4853 dev_err(&pdev
->dev
, "VLAN init fail, ret =%d\n", ret
);
4857 ret
= hclge_tm_schd_init(hdev
);
4859 dev_err(&pdev
->dev
, "tm schd init fail, ret =%d\n", ret
);
4863 ret
= hclge_rss_init_hw(hdev
);
4865 dev_err(&pdev
->dev
, "Rss init fail, ret =%d\n", ret
);
4869 /* Enable MISC vector(vector0) */
4870 hclge_enable_vector(&hdev
->misc_vector
, true);
4872 dev_info(&pdev
->dev
, "Reset done, %s driver initialization finished.\n",
4878 static void hclge_uninit_ae_dev(struct hnae3_ae_dev
*ae_dev
)
4880 struct hclge_dev
*hdev
= ae_dev
->priv
;
4881 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
4883 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
4885 if (IS_ENABLED(CONFIG_PCI_IOV
))
4886 hclge_disable_sriov(hdev
);
4888 if (hdev
->service_timer
.function
)
4889 del_timer_sync(&hdev
->service_timer
);
4890 if (hdev
->service_task
.func
)
4891 cancel_work_sync(&hdev
->service_task
);
4894 mdiobus_unregister(mac
->mdio_bus
);
4896 /* Disable MISC vector(vector0) */
4897 hclge_enable_vector(&hdev
->misc_vector
, false);
4898 hclge_free_vector(hdev
, 0);
4899 hclge_destroy_cmd_queue(&hdev
->hw
);
4900 hclge_pci_uninit(hdev
);
4901 ae_dev
->priv
= NULL
;
4904 static const struct hnae3_ae_ops hclge_ops
= {
4905 .init_ae_dev
= hclge_init_ae_dev
,
4906 .uninit_ae_dev
= hclge_uninit_ae_dev
,
4907 .init_client_instance
= hclge_init_client_instance
,
4908 .uninit_client_instance
= hclge_uninit_client_instance
,
4909 .map_ring_to_vector
= hclge_map_handle_ring_to_vector
,
4910 .unmap_ring_from_vector
= hclge_unmap_ring_from_vector
,
4911 .get_vector
= hclge_get_vector
,
4912 .set_promisc_mode
= hclge_set_promisc_mode
,
4913 .set_loopback
= hclge_set_loopback
,
4914 .start
= hclge_ae_start
,
4915 .stop
= hclge_ae_stop
,
4916 .get_status
= hclge_get_status
,
4917 .get_ksettings_an_result
= hclge_get_ksettings_an_result
,
4918 .update_speed_duplex_h
= hclge_update_speed_duplex_h
,
4919 .cfg_mac_speed_dup_h
= hclge_cfg_mac_speed_dup_h
,
4920 .get_media_type
= hclge_get_media_type
,
4921 .get_rss_key_size
= hclge_get_rss_key_size
,
4922 .get_rss_indir_size
= hclge_get_rss_indir_size
,
4923 .get_rss
= hclge_get_rss
,
4924 .set_rss
= hclge_set_rss
,
4925 .set_rss_tuple
= hclge_set_rss_tuple
,
4926 .get_rss_tuple
= hclge_get_rss_tuple
,
4927 .get_tc_size
= hclge_get_tc_size
,
4928 .get_mac_addr
= hclge_get_mac_addr
,
4929 .set_mac_addr
= hclge_set_mac_addr
,
4930 .add_uc_addr
= hclge_add_uc_addr
,
4931 .rm_uc_addr
= hclge_rm_uc_addr
,
4932 .add_mc_addr
= hclge_add_mc_addr
,
4933 .rm_mc_addr
= hclge_rm_mc_addr
,
4934 .set_autoneg
= hclge_set_autoneg
,
4935 .get_autoneg
= hclge_get_autoneg
,
4936 .get_pauseparam
= hclge_get_pauseparam
,
4937 .set_mtu
= hclge_set_mtu
,
4938 .reset_queue
= hclge_reset_tqp
,
4939 .get_stats
= hclge_get_stats
,
4940 .update_stats
= hclge_update_stats
,
4941 .get_strings
= hclge_get_strings
,
4942 .get_sset_count
= hclge_get_sset_count
,
4943 .get_fw_version
= hclge_get_fw_version
,
4944 .get_mdix_mode
= hclge_get_mdix_mode
,
4945 .set_vlan_filter
= hclge_set_port_vlan_filter
,
4946 .set_vf_vlan_filter
= hclge_set_vf_vlan_filter
,
4947 .reset_event
= hclge_reset_event
,
4950 static struct hnae3_ae_algo ae_algo
= {
4953 .pdev_id_table
= ae_algo_pci_tbl
,
4956 static int hclge_init(void)
4958 pr_info("%s is initializing\n", HCLGE_NAME
);
4960 return hnae3_register_ae_algo(&ae_algo
);
4963 static void hclge_exit(void)
4965 hnae3_unregister_ae_algo(&ae_algo
);
4967 module_init(hclge_init
);
4968 module_exit(hclge_exit
);
4970 MODULE_LICENSE("GPL");
4971 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
4972 MODULE_DESCRIPTION("HCLGE Driver");
4973 MODULE_VERSION(HCLGE_MOD_VERSION
);