2 * Copyright (c) 2016-2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/acpi.h>
11 #include <linux/device.h>
12 #include <linux/etherdevice.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/pci.h>
19 #include <linux/platform_device.h>
21 #include "hclge_cmd.h"
22 #include "hclge_dcb.h"
23 #include "hclge_main.h"
24 #include "hclge_mdio.h"
28 #define HCLGE_NAME "hclge"
29 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
30 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
31 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
32 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
34 static int hclge_set_mta_filter_mode(struct hclge_dev
*hdev
,
35 enum hclge_mta_dmac_sel_type mta_mac_sel
,
37 static int hclge_init_vlan_config(struct hclge_dev
*hdev
);
39 static struct hnae3_ae_algo ae_algo
;
41 static const struct pci_device_id ae_algo_pci_tbl
[] = {
42 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
43 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
44 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
), 0},
45 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
), 0},
46 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
), 0},
47 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
), 0},
48 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
), 0},
49 /* required last entry */
53 static const char hns3_nic_test_strs
[][ETH_GSTRING_LEN
] = {
55 "Serdes Loopback test",
59 static const struct hclge_comm_stats_str g_all_64bit_stats_string
[] = {
60 {"igu_rx_oversize_pkt",
61 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt
)},
62 {"igu_rx_undersize_pkt",
63 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt
)},
64 {"igu_rx_out_all_pkt",
65 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt
)},
67 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt
)},
69 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt
)},
71 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt
)},
72 {"egu_tx_out_all_pkt",
73 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt
)},
75 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt
)},
77 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt
)},
79 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt
)},
80 {"ssu_ppp_mac_key_num",
81 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num
)},
82 {"ssu_ppp_host_key_num",
83 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num
)},
84 {"ppp_ssu_mac_rlt_num",
85 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num
)},
86 {"ppp_ssu_host_rlt_num",
87 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num
)},
89 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num
)},
91 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num
)},
93 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num
)},
95 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num
)}
98 static const struct hclge_comm_stats_str g_all_32bit_stats_string
[] = {
100 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt
)},
101 {"igu_rx_no_eof_pkt",
102 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt
)},
103 {"igu_rx_no_sof_pkt",
104 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt
)},
106 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt
)},
107 {"ssu_full_drop_num",
108 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num
)},
109 {"ssu_part_drop_num",
110 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num
)},
112 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num
)},
114 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num
)},
116 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num
)},
118 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt
)},
120 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt
)},
122 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt
)},
123 {"qcn_fb_invaild_cnt",
124 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt
)},
125 {"rx_packet_tc0_in_cnt",
126 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt
)},
127 {"rx_packet_tc1_in_cnt",
128 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt
)},
129 {"rx_packet_tc2_in_cnt",
130 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt
)},
131 {"rx_packet_tc3_in_cnt",
132 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt
)},
133 {"rx_packet_tc4_in_cnt",
134 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt
)},
135 {"rx_packet_tc5_in_cnt",
136 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt
)},
137 {"rx_packet_tc6_in_cnt",
138 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt
)},
139 {"rx_packet_tc7_in_cnt",
140 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt
)},
141 {"rx_packet_tc0_out_cnt",
142 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt
)},
143 {"rx_packet_tc1_out_cnt",
144 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt
)},
145 {"rx_packet_tc2_out_cnt",
146 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt
)},
147 {"rx_packet_tc3_out_cnt",
148 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt
)},
149 {"rx_packet_tc4_out_cnt",
150 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt
)},
151 {"rx_packet_tc5_out_cnt",
152 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt
)},
153 {"rx_packet_tc6_out_cnt",
154 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt
)},
155 {"rx_packet_tc7_out_cnt",
156 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt
)},
157 {"tx_packet_tc0_in_cnt",
158 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt
)},
159 {"tx_packet_tc1_in_cnt",
160 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt
)},
161 {"tx_packet_tc2_in_cnt",
162 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt
)},
163 {"tx_packet_tc3_in_cnt",
164 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt
)},
165 {"tx_packet_tc4_in_cnt",
166 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt
)},
167 {"tx_packet_tc5_in_cnt",
168 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt
)},
169 {"tx_packet_tc6_in_cnt",
170 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt
)},
171 {"tx_packet_tc7_in_cnt",
172 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt
)},
173 {"tx_packet_tc0_out_cnt",
174 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt
)},
175 {"tx_packet_tc1_out_cnt",
176 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt
)},
177 {"tx_packet_tc2_out_cnt",
178 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt
)},
179 {"tx_packet_tc3_out_cnt",
180 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt
)},
181 {"tx_packet_tc4_out_cnt",
182 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt
)},
183 {"tx_packet_tc5_out_cnt",
184 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt
)},
185 {"tx_packet_tc6_out_cnt",
186 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt
)},
187 {"tx_packet_tc7_out_cnt",
188 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt
)},
189 {"pkt_curr_buf_tc0_cnt",
190 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt
)},
191 {"pkt_curr_buf_tc1_cnt",
192 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt
)},
193 {"pkt_curr_buf_tc2_cnt",
194 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt
)},
195 {"pkt_curr_buf_tc3_cnt",
196 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt
)},
197 {"pkt_curr_buf_tc4_cnt",
198 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt
)},
199 {"pkt_curr_buf_tc5_cnt",
200 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt
)},
201 {"pkt_curr_buf_tc6_cnt",
202 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt
)},
203 {"pkt_curr_buf_tc7_cnt",
204 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt
)},
206 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num
)},
207 {"lo_pri_unicast_rlt_drop_num",
208 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num
)},
209 {"hi_pri_multicast_rlt_drop_num",
210 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num
)},
211 {"lo_pri_multicast_rlt_drop_num",
212 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num
)},
213 {"rx_oq_drop_pkt_cnt",
214 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt
)},
215 {"tx_oq_drop_pkt_cnt",
216 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt
)},
217 {"nic_l2_err_drop_pkt_cnt",
218 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt
)},
219 {"roc_l2_err_drop_pkt_cnt",
220 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt
)}
223 static const struct hclge_comm_stats_str g_mac_stats_string
[] = {
224 {"mac_tx_mac_pause_num",
225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num
)},
226 {"mac_rx_mac_pause_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num
)},
228 {"mac_tx_pfc_pri0_pkt_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num
)},
230 {"mac_tx_pfc_pri1_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num
)},
232 {"mac_tx_pfc_pri2_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num
)},
234 {"mac_tx_pfc_pri3_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num
)},
236 {"mac_tx_pfc_pri4_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num
)},
238 {"mac_tx_pfc_pri5_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num
)},
240 {"mac_tx_pfc_pri6_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num
)},
242 {"mac_tx_pfc_pri7_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num
)},
244 {"mac_rx_pfc_pri0_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num
)},
246 {"mac_rx_pfc_pri1_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num
)},
248 {"mac_rx_pfc_pri2_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num
)},
250 {"mac_rx_pfc_pri3_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num
)},
252 {"mac_rx_pfc_pri4_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num
)},
254 {"mac_rx_pfc_pri5_pkt_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num
)},
256 {"mac_rx_pfc_pri6_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num
)},
258 {"mac_rx_pfc_pri7_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num
)},
260 {"mac_tx_total_pkt_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num
)},
262 {"mac_tx_total_oct_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num
)},
264 {"mac_tx_good_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num
)},
266 {"mac_tx_bad_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num
)},
268 {"mac_tx_good_oct_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num
)},
270 {"mac_tx_bad_oct_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num
)},
272 {"mac_tx_uni_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num
)},
274 {"mac_tx_multi_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num
)},
276 {"mac_tx_broad_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num
)},
278 {"mac_tx_undersize_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num
)},
280 {"mac_tx_overrsize_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num
)},
282 {"mac_tx_64_oct_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num
)},
284 {"mac_tx_65_127_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num
)},
286 {"mac_tx_128_255_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num
)},
288 {"mac_tx_256_511_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num
)},
290 {"mac_tx_512_1023_oct_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num
)},
292 {"mac_tx_1024_1518_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num
)},
294 {"mac_tx_1519_max_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num
)},
296 {"mac_rx_total_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num
)},
298 {"mac_rx_total_oct_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num
)},
300 {"mac_rx_good_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num
)},
302 {"mac_rx_bad_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num
)},
304 {"mac_rx_good_oct_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num
)},
306 {"mac_rx_bad_oct_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num
)},
308 {"mac_rx_uni_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num
)},
310 {"mac_rx_multi_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num
)},
312 {"mac_rx_broad_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num
)},
314 {"mac_rx_undersize_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num
)},
316 {"mac_rx_overrsize_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num
)},
318 {"mac_rx_64_oct_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num
)},
320 {"mac_rx_65_127_oct_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num
)},
322 {"mac_rx_128_255_oct_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num
)},
324 {"mac_rx_256_511_oct_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num
)},
326 {"mac_rx_512_1023_oct_pkt_num",
327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num
)},
328 {"mac_rx_1024_1518_oct_pkt_num",
329 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num
)},
330 {"mac_rx_1519_max_oct_pkt_num",
331 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num
)},
333 {"mac_trans_fragment_pkt_num",
334 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num
)},
335 {"mac_trans_undermin_pkt_num",
336 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num
)},
337 {"mac_trans_jabber_pkt_num",
338 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num
)},
339 {"mac_trans_err_all_pkt_num",
340 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num
)},
341 {"mac_trans_from_app_good_pkt_num",
342 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num
)},
343 {"mac_trans_from_app_bad_pkt_num",
344 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num
)},
345 {"mac_rcv_fragment_pkt_num",
346 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num
)},
347 {"mac_rcv_undermin_pkt_num",
348 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num
)},
349 {"mac_rcv_jabber_pkt_num",
350 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num
)},
351 {"mac_rcv_fcs_err_pkt_num",
352 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num
)},
353 {"mac_rcv_send_app_good_pkt_num",
354 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num
)},
355 {"mac_rcv_send_app_bad_pkt_num",
356 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num
)}
359 static int hclge_64_bit_update_stats(struct hclge_dev
*hdev
)
361 #define HCLGE_64_BIT_CMD_NUM 5
362 #define HCLGE_64_BIT_RTN_DATANUM 4
363 u64
*data
= (u64
*)(&hdev
->hw_stats
.all_64_bit_stats
);
364 struct hclge_desc desc
[HCLGE_64_BIT_CMD_NUM
];
369 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_64_BIT
, true);
370 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_64_BIT_CMD_NUM
);
372 dev_err(&hdev
->pdev
->dev
,
373 "Get 64 bit pkt stats fail, status = %d.\n", ret
);
377 for (i
= 0; i
< HCLGE_64_BIT_CMD_NUM
; i
++) {
378 if (unlikely(i
== 0)) {
379 desc_data
= (__le64
*)(&desc
[i
].data
[0]);
380 n
= HCLGE_64_BIT_RTN_DATANUM
- 1;
382 desc_data
= (__le64
*)(&desc
[i
]);
383 n
= HCLGE_64_BIT_RTN_DATANUM
;
385 for (k
= 0; k
< n
; k
++) {
386 *data
++ += le64_to_cpu(*desc_data
);
394 static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats
*stats
)
396 stats
->pkt_curr_buf_cnt
= 0;
397 stats
->pkt_curr_buf_tc0_cnt
= 0;
398 stats
->pkt_curr_buf_tc1_cnt
= 0;
399 stats
->pkt_curr_buf_tc2_cnt
= 0;
400 stats
->pkt_curr_buf_tc3_cnt
= 0;
401 stats
->pkt_curr_buf_tc4_cnt
= 0;
402 stats
->pkt_curr_buf_tc5_cnt
= 0;
403 stats
->pkt_curr_buf_tc6_cnt
= 0;
404 stats
->pkt_curr_buf_tc7_cnt
= 0;
407 static int hclge_32_bit_update_stats(struct hclge_dev
*hdev
)
409 #define HCLGE_32_BIT_CMD_NUM 8
410 #define HCLGE_32_BIT_RTN_DATANUM 8
412 struct hclge_desc desc
[HCLGE_32_BIT_CMD_NUM
];
413 struct hclge_32_bit_stats
*all_32_bit_stats
;
419 all_32_bit_stats
= &hdev
->hw_stats
.all_32_bit_stats
;
420 data
= (u64
*)(&all_32_bit_stats
->egu_tx_1588_pkt
);
422 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_32_BIT
, true);
423 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_32_BIT_CMD_NUM
);
425 dev_err(&hdev
->pdev
->dev
,
426 "Get 32 bit pkt stats fail, status = %d.\n", ret
);
431 hclge_reset_partial_32bit_counter(all_32_bit_stats
);
432 for (i
= 0; i
< HCLGE_32_BIT_CMD_NUM
; i
++) {
433 if (unlikely(i
== 0)) {
434 __le16
*desc_data_16bit
;
436 all_32_bit_stats
->igu_rx_err_pkt
+=
437 le32_to_cpu(desc
[i
].data
[0]);
439 desc_data_16bit
= (__le16
*)&desc
[i
].data
[1];
440 all_32_bit_stats
->igu_rx_no_eof_pkt
+=
441 le16_to_cpu(*desc_data_16bit
);
444 all_32_bit_stats
->igu_rx_no_sof_pkt
+=
445 le16_to_cpu(*desc_data_16bit
);
447 desc_data
= &desc
[i
].data
[2];
448 n
= HCLGE_32_BIT_RTN_DATANUM
- 4;
450 desc_data
= (__le32
*)&desc
[i
];
451 n
= HCLGE_32_BIT_RTN_DATANUM
;
453 for (k
= 0; k
< n
; k
++) {
454 *data
++ += le32_to_cpu(*desc_data
);
462 static int hclge_mac_update_stats(struct hclge_dev
*hdev
)
464 #define HCLGE_MAC_CMD_NUM 17
465 #define HCLGE_RTN_DATA_NUM 4
467 u64
*data
= (u64
*)(&hdev
->hw_stats
.mac_stats
);
468 struct hclge_desc desc
[HCLGE_MAC_CMD_NUM
];
473 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_MAC
, true);
474 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_MAC_CMD_NUM
);
476 dev_err(&hdev
->pdev
->dev
,
477 "Get MAC pkt stats fail, status = %d.\n", ret
);
482 for (i
= 0; i
< HCLGE_MAC_CMD_NUM
; i
++) {
483 if (unlikely(i
== 0)) {
484 desc_data
= (__le64
*)(&desc
[i
].data
[0]);
485 n
= HCLGE_RTN_DATA_NUM
- 2;
487 desc_data
= (__le64
*)(&desc
[i
]);
488 n
= HCLGE_RTN_DATA_NUM
;
490 for (k
= 0; k
< n
; k
++) {
491 *data
++ += le64_to_cpu(*desc_data
);
499 static int hclge_tqps_update_stats(struct hnae3_handle
*handle
)
501 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
502 struct hclge_vport
*vport
= hclge_get_vport(handle
);
503 struct hclge_dev
*hdev
= vport
->back
;
504 struct hnae3_queue
*queue
;
505 struct hclge_desc desc
[1];
506 struct hclge_tqp
*tqp
;
509 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
510 queue
= handle
->kinfo
.tqp
[i
];
511 tqp
= container_of(queue
, struct hclge_tqp
, q
);
512 /* command : HCLGE_OPC_QUERY_IGU_STAT */
513 hclge_cmd_setup_basic_desc(&desc
[0],
514 HCLGE_OPC_QUERY_RX_STATUS
,
517 desc
[0].data
[0] = cpu_to_le32((tqp
->index
& 0x1ff));
518 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
520 dev_err(&hdev
->pdev
->dev
,
521 "Query tqp stat fail, status = %d,queue = %d\n",
525 tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
+=
526 le32_to_cpu(desc
[0].data
[4]);
529 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
530 queue
= handle
->kinfo
.tqp
[i
];
531 tqp
= container_of(queue
, struct hclge_tqp
, q
);
532 /* command : HCLGE_OPC_QUERY_IGU_STAT */
533 hclge_cmd_setup_basic_desc(&desc
[0],
534 HCLGE_OPC_QUERY_TX_STATUS
,
537 desc
[0].data
[0] = cpu_to_le32((tqp
->index
& 0x1ff));
538 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
540 dev_err(&hdev
->pdev
->dev
,
541 "Query tqp stat fail, status = %d,queue = %d\n",
545 tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
+=
546 le32_to_cpu(desc
[0].data
[4]);
552 static u64
*hclge_tqps_get_stats(struct hnae3_handle
*handle
, u64
*data
)
554 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
555 struct hclge_tqp
*tqp
;
559 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
560 tqp
= container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
561 *buff
++ = tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
;
564 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
565 tqp
= container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
566 *buff
++ = tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
;
572 static int hclge_tqps_get_sset_count(struct hnae3_handle
*handle
, int stringset
)
574 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
576 return kinfo
->num_tqps
* (2);
579 static u8
*hclge_tqps_get_strings(struct hnae3_handle
*handle
, u8
*data
)
581 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
585 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
586 struct hclge_tqp
*tqp
= container_of(handle
->kinfo
.tqp
[i
],
587 struct hclge_tqp
, q
);
588 snprintf(buff
, ETH_GSTRING_LEN
, "rcb_q%d_tx_pktnum_rcd",
590 buff
= buff
+ ETH_GSTRING_LEN
;
593 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
594 struct hclge_tqp
*tqp
= container_of(kinfo
->tqp
[i
],
595 struct hclge_tqp
, q
);
596 snprintf(buff
, ETH_GSTRING_LEN
, "rcb_q%d_rx_pktnum_rcd",
598 buff
= buff
+ ETH_GSTRING_LEN
;
604 static u64
*hclge_comm_get_stats(void *comm_stats
,
605 const struct hclge_comm_stats_str strs
[],
611 for (i
= 0; i
< size
; i
++)
612 buf
[i
] = HCLGE_STATS_READ(comm_stats
, strs
[i
].offset
);
617 static u8
*hclge_comm_get_strings(u32 stringset
,
618 const struct hclge_comm_stats_str strs
[],
621 char *buff
= (char *)data
;
624 if (stringset
!= ETH_SS_STATS
)
627 for (i
= 0; i
< size
; i
++) {
628 snprintf(buff
, ETH_GSTRING_LEN
,
630 buff
= buff
+ ETH_GSTRING_LEN
;
636 static void hclge_update_netstat(struct hclge_hw_stats
*hw_stats
,
637 struct net_device_stats
*net_stats
)
639 net_stats
->tx_dropped
= 0;
640 net_stats
->rx_dropped
= hw_stats
->all_32_bit_stats
.ssu_full_drop_num
;
641 net_stats
->rx_dropped
+= hw_stats
->all_32_bit_stats
.ppp_key_drop_num
;
642 net_stats
->rx_dropped
+= hw_stats
->all_32_bit_stats
.ssu_key_drop_num
;
644 net_stats
->rx_errors
= hw_stats
->mac_stats
.mac_rx_overrsize_pkt_num
;
645 net_stats
->rx_errors
+= hw_stats
->mac_stats
.mac_rx_undersize_pkt_num
;
646 net_stats
->rx_errors
+= hw_stats
->all_32_bit_stats
.igu_rx_err_pkt
;
647 net_stats
->rx_errors
+= hw_stats
->all_32_bit_stats
.igu_rx_no_eof_pkt
;
648 net_stats
->rx_errors
+= hw_stats
->all_32_bit_stats
.igu_rx_no_sof_pkt
;
649 net_stats
->rx_errors
+= hw_stats
->mac_stats
.mac_rcv_fcs_err_pkt_num
;
651 net_stats
->multicast
= hw_stats
->mac_stats
.mac_tx_multi_pkt_num
;
652 net_stats
->multicast
+= hw_stats
->mac_stats
.mac_rx_multi_pkt_num
;
654 net_stats
->rx_crc_errors
= hw_stats
->mac_stats
.mac_rcv_fcs_err_pkt_num
;
655 net_stats
->rx_length_errors
=
656 hw_stats
->mac_stats
.mac_rx_undersize_pkt_num
;
657 net_stats
->rx_length_errors
+=
658 hw_stats
->mac_stats
.mac_rx_overrsize_pkt_num
;
659 net_stats
->rx_over_errors
=
660 hw_stats
->mac_stats
.mac_rx_overrsize_pkt_num
;
663 static void hclge_update_stats_for_all(struct hclge_dev
*hdev
)
665 struct hnae3_handle
*handle
;
668 handle
= &hdev
->vport
[0].nic
;
669 if (handle
->client
) {
670 status
= hclge_tqps_update_stats(handle
);
672 dev_err(&hdev
->pdev
->dev
,
673 "Update TQPS stats fail, status = %d.\n",
678 status
= hclge_mac_update_stats(hdev
);
680 dev_err(&hdev
->pdev
->dev
,
681 "Update MAC stats fail, status = %d.\n", status
);
683 status
= hclge_32_bit_update_stats(hdev
);
685 dev_err(&hdev
->pdev
->dev
,
686 "Update 32 bit stats fail, status = %d.\n",
689 hclge_update_netstat(&hdev
->hw_stats
, &handle
->kinfo
.netdev
->stats
);
692 static void hclge_update_stats(struct hnae3_handle
*handle
,
693 struct net_device_stats
*net_stats
)
695 struct hclge_vport
*vport
= hclge_get_vport(handle
);
696 struct hclge_dev
*hdev
= vport
->back
;
697 struct hclge_hw_stats
*hw_stats
= &hdev
->hw_stats
;
700 status
= hclge_mac_update_stats(hdev
);
702 dev_err(&hdev
->pdev
->dev
,
703 "Update MAC stats fail, status = %d.\n",
706 status
= hclge_32_bit_update_stats(hdev
);
708 dev_err(&hdev
->pdev
->dev
,
709 "Update 32 bit stats fail, status = %d.\n",
712 status
= hclge_64_bit_update_stats(hdev
);
714 dev_err(&hdev
->pdev
->dev
,
715 "Update 64 bit stats fail, status = %d.\n",
718 status
= hclge_tqps_update_stats(handle
);
720 dev_err(&hdev
->pdev
->dev
,
721 "Update TQPS stats fail, status = %d.\n",
724 hclge_update_netstat(hw_stats
, net_stats
);
727 static int hclge_get_sset_count(struct hnae3_handle
*handle
, int stringset
)
729 #define HCLGE_LOOPBACK_TEST_FLAGS 0x7
731 struct hclge_vport
*vport
= hclge_get_vport(handle
);
732 struct hclge_dev
*hdev
= vport
->back
;
735 /* Loopback test support rules:
736 * mac: only GE mode support
737 * serdes: all mac mode will support include GE/XGE/LGE/CGE
738 * phy: only support when phy device exist on board
740 if (stringset
== ETH_SS_TEST
) {
741 /* clear loopback bit flags at first */
742 handle
->flags
= (handle
->flags
& (~HCLGE_LOOPBACK_TEST_FLAGS
));
743 if (hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_10M
||
744 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_100M
||
745 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_1G
) {
747 handle
->flags
|= HNAE3_SUPPORT_MAC_LOOPBACK
;
751 } else if (stringset
== ETH_SS_STATS
) {
752 count
= ARRAY_SIZE(g_mac_stats_string
) +
753 ARRAY_SIZE(g_all_32bit_stats_string
) +
754 ARRAY_SIZE(g_all_64bit_stats_string
) +
755 hclge_tqps_get_sset_count(handle
, stringset
);
761 static void hclge_get_strings(struct hnae3_handle
*handle
,
765 u8
*p
= (char *)data
;
768 if (stringset
== ETH_SS_STATS
) {
769 size
= ARRAY_SIZE(g_mac_stats_string
);
770 p
= hclge_comm_get_strings(stringset
,
774 size
= ARRAY_SIZE(g_all_32bit_stats_string
);
775 p
= hclge_comm_get_strings(stringset
,
776 g_all_32bit_stats_string
,
779 size
= ARRAY_SIZE(g_all_64bit_stats_string
);
780 p
= hclge_comm_get_strings(stringset
,
781 g_all_64bit_stats_string
,
784 p
= hclge_tqps_get_strings(handle
, p
);
785 } else if (stringset
== ETH_SS_TEST
) {
786 if (handle
->flags
& HNAE3_SUPPORT_MAC_LOOPBACK
) {
788 hns3_nic_test_strs
[HNAE3_MAC_INTER_LOOP_MAC
],
790 p
+= ETH_GSTRING_LEN
;
792 if (handle
->flags
& HNAE3_SUPPORT_SERDES_LOOPBACK
) {
794 hns3_nic_test_strs
[HNAE3_MAC_INTER_LOOP_SERDES
],
796 p
+= ETH_GSTRING_LEN
;
798 if (handle
->flags
& HNAE3_SUPPORT_PHY_LOOPBACK
) {
800 hns3_nic_test_strs
[HNAE3_MAC_INTER_LOOP_PHY
],
802 p
+= ETH_GSTRING_LEN
;
807 static void hclge_get_stats(struct hnae3_handle
*handle
, u64
*data
)
809 struct hclge_vport
*vport
= hclge_get_vport(handle
);
810 struct hclge_dev
*hdev
= vport
->back
;
813 p
= hclge_comm_get_stats(&hdev
->hw_stats
.mac_stats
,
815 ARRAY_SIZE(g_mac_stats_string
),
817 p
= hclge_comm_get_stats(&hdev
->hw_stats
.all_32_bit_stats
,
818 g_all_32bit_stats_string
,
819 ARRAY_SIZE(g_all_32bit_stats_string
),
821 p
= hclge_comm_get_stats(&hdev
->hw_stats
.all_64_bit_stats
,
822 g_all_64bit_stats_string
,
823 ARRAY_SIZE(g_all_64bit_stats_string
),
825 p
= hclge_tqps_get_stats(handle
, p
);
828 static int hclge_parse_func_status(struct hclge_dev
*hdev
,
829 struct hclge_func_status_cmd
*status
)
831 if (!(status
->pf_state
& HCLGE_PF_STATE_DONE
))
834 /* Set the pf to main pf */
835 if (status
->pf_state
& HCLGE_PF_STATE_MAIN
)
836 hdev
->flag
|= HCLGE_FLAG_MAIN
;
838 hdev
->flag
&= ~HCLGE_FLAG_MAIN
;
840 hdev
->num_req_vfs
= status
->vf_num
/ status
->pf_num
;
844 static int hclge_query_function_status(struct hclge_dev
*hdev
)
846 struct hclge_func_status_cmd
*req
;
847 struct hclge_desc desc
;
851 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_FUNC_STATUS
, true);
852 req
= (struct hclge_func_status_cmd
*)desc
.data
;
855 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
857 dev_err(&hdev
->pdev
->dev
,
858 "query function status failed %d.\n",
864 /* Check pf reset is done */
867 usleep_range(1000, 2000);
868 } while (timeout
++ < 5);
870 ret
= hclge_parse_func_status(hdev
, req
);
875 static int hclge_query_pf_resource(struct hclge_dev
*hdev
)
877 struct hclge_pf_res_cmd
*req
;
878 struct hclge_desc desc
;
881 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_PF_RSRC
, true);
882 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
884 dev_err(&hdev
->pdev
->dev
,
885 "query pf resource failed %d.\n", ret
);
889 req
= (struct hclge_pf_res_cmd
*)desc
.data
;
890 hdev
->num_tqps
= __le16_to_cpu(req
->tqp_num
);
891 hdev
->pkt_buf_size
= __le16_to_cpu(req
->buf_size
) << HCLGE_BUF_UNIT_S
;
893 if (hnae3_dev_roce_supported(hdev
)) {
894 hdev
->num_roce_msix
=
895 hnae_get_field(__le16_to_cpu(req
->pf_intr_vector_number
),
896 HCLGE_PF_VEC_NUM_M
, HCLGE_PF_VEC_NUM_S
);
898 /* PF should have NIC vectors and Roce vectors,
899 * NIC vectors are queued before Roce vectors.
901 hdev
->num_msi
= hdev
->num_roce_msix
+ HCLGE_ROCE_VECTOR_OFFSET
;
904 hnae_get_field(__le16_to_cpu(req
->pf_intr_vector_number
),
905 HCLGE_PF_VEC_NUM_M
, HCLGE_PF_VEC_NUM_S
);
911 static int hclge_parse_speed(int speed_cmd
, int *speed
)
915 *speed
= HCLGE_MAC_SPEED_10M
;
918 *speed
= HCLGE_MAC_SPEED_100M
;
921 *speed
= HCLGE_MAC_SPEED_1G
;
924 *speed
= HCLGE_MAC_SPEED_10G
;
927 *speed
= HCLGE_MAC_SPEED_25G
;
930 *speed
= HCLGE_MAC_SPEED_40G
;
933 *speed
= HCLGE_MAC_SPEED_50G
;
936 *speed
= HCLGE_MAC_SPEED_100G
;
945 static void hclge_parse_cfg(struct hclge_cfg
*cfg
, struct hclge_desc
*desc
)
947 struct hclge_cfg_param_cmd
*req
;
948 u64 mac_addr_tmp_high
;
952 req
= (struct hclge_cfg_param_cmd
*)desc
[0].data
;
954 /* get the configuration */
955 cfg
->vmdq_vport_num
= hnae_get_field(__le32_to_cpu(req
->param
[0]),
958 cfg
->tc_num
= hnae_get_field(__le32_to_cpu(req
->param
[0]),
959 HCLGE_CFG_TC_NUM_M
, HCLGE_CFG_TC_NUM_S
);
960 cfg
->tqp_desc_num
= hnae_get_field(__le32_to_cpu(req
->param
[0]),
961 HCLGE_CFG_TQP_DESC_N_M
,
962 HCLGE_CFG_TQP_DESC_N_S
);
964 cfg
->phy_addr
= hnae_get_field(__le32_to_cpu(req
->param
[1]),
965 HCLGE_CFG_PHY_ADDR_M
,
966 HCLGE_CFG_PHY_ADDR_S
);
967 cfg
->media_type
= hnae_get_field(__le32_to_cpu(req
->param
[1]),
968 HCLGE_CFG_MEDIA_TP_M
,
969 HCLGE_CFG_MEDIA_TP_S
);
970 cfg
->rx_buf_len
= hnae_get_field(__le32_to_cpu(req
->param
[1]),
971 HCLGE_CFG_RX_BUF_LEN_M
,
972 HCLGE_CFG_RX_BUF_LEN_S
);
973 /* get mac_address */
974 mac_addr_tmp
= __le32_to_cpu(req
->param
[2]);
975 mac_addr_tmp_high
= hnae_get_field(__le32_to_cpu(req
->param
[3]),
976 HCLGE_CFG_MAC_ADDR_H_M
,
977 HCLGE_CFG_MAC_ADDR_H_S
);
979 mac_addr_tmp
|= (mac_addr_tmp_high
<< 31) << 1;
981 cfg
->default_speed
= hnae_get_field(__le32_to_cpu(req
->param
[3]),
982 HCLGE_CFG_DEFAULT_SPEED_M
,
983 HCLGE_CFG_DEFAULT_SPEED_S
);
984 for (i
= 0; i
< ETH_ALEN
; i
++)
985 cfg
->mac_addr
[i
] = (mac_addr_tmp
>> (8 * i
)) & 0xff;
987 req
= (struct hclge_cfg_param_cmd
*)desc
[1].data
;
988 cfg
->numa_node_map
= __le32_to_cpu(req
->param
[0]);
991 /* hclge_get_cfg: query the static parameter from flash
992 * @hdev: pointer to struct hclge_dev
993 * @hcfg: the config structure to be getted
995 static int hclge_get_cfg(struct hclge_dev
*hdev
, struct hclge_cfg
*hcfg
)
997 struct hclge_desc desc
[HCLGE_PF_CFG_DESC_NUM
];
998 struct hclge_cfg_param_cmd
*req
;
1001 for (i
= 0; i
< HCLGE_PF_CFG_DESC_NUM
; i
++) {
1004 req
= (struct hclge_cfg_param_cmd
*)desc
[i
].data
;
1005 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_GET_CFG_PARAM
,
1007 hnae_set_field(offset
, HCLGE_CFG_OFFSET_M
,
1008 HCLGE_CFG_OFFSET_S
, i
* HCLGE_CFG_RD_LEN_BYTES
);
1009 /* Len should be united by 4 bytes when send to hardware */
1010 hnae_set_field(offset
, HCLGE_CFG_RD_LEN_M
, HCLGE_CFG_RD_LEN_S
,
1011 HCLGE_CFG_RD_LEN_BYTES
/ HCLGE_CFG_RD_LEN_UNIT
);
1012 req
->offset
= cpu_to_le32(offset
);
1015 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_PF_CFG_DESC_NUM
);
1017 dev_err(&hdev
->pdev
->dev
,
1018 "get config failed %d.\n", ret
);
1022 hclge_parse_cfg(hcfg
, desc
);
1026 static int hclge_get_cap(struct hclge_dev
*hdev
)
1030 ret
= hclge_query_function_status(hdev
);
1032 dev_err(&hdev
->pdev
->dev
,
1033 "query function status error %d.\n", ret
);
1037 /* get pf resource */
1038 ret
= hclge_query_pf_resource(hdev
);
1040 dev_err(&hdev
->pdev
->dev
,
1041 "query pf resource error %d.\n", ret
);
1048 static int hclge_configure(struct hclge_dev
*hdev
)
1050 struct hclge_cfg cfg
;
1053 ret
= hclge_get_cfg(hdev
, &cfg
);
1055 dev_err(&hdev
->pdev
->dev
, "get mac mode error %d.\n", ret
);
1059 hdev
->num_vmdq_vport
= cfg
.vmdq_vport_num
;
1060 hdev
->base_tqp_pid
= 0;
1061 hdev
->rss_size_max
= 1;
1062 hdev
->rx_buf_len
= cfg
.rx_buf_len
;
1063 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, cfg
.mac_addr
);
1064 hdev
->hw
.mac
.media_type
= cfg
.media_type
;
1065 hdev
->hw
.mac
.phy_addr
= cfg
.phy_addr
;
1066 hdev
->num_desc
= cfg
.tqp_desc_num
;
1067 hdev
->tm_info
.num_pg
= 1;
1068 hdev
->tc_max
= cfg
.tc_num
;
1069 hdev
->tm_info
.hw_pfc_map
= 0;
1071 ret
= hclge_parse_speed(cfg
.default_speed
, &hdev
->hw
.mac
.speed
);
1073 dev_err(&hdev
->pdev
->dev
, "Get wrong speed ret=%d.\n", ret
);
1077 if ((hdev
->tc_max
> HNAE3_MAX_TC
) ||
1078 (hdev
->tc_max
< 1)) {
1079 dev_warn(&hdev
->pdev
->dev
, "TC num = %d.\n",
1084 /* Dev does not support DCB */
1085 if (!hnae3_dev_dcb_supported(hdev
)) {
1089 hdev
->pfc_max
= hdev
->tc_max
;
1092 hdev
->tm_info
.num_tc
= hdev
->tc_max
;
1094 /* Currently not support uncontiuous tc */
1095 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++)
1096 hnae_set_bit(hdev
->hw_tc_map
, i
, 1);
1098 if (!hdev
->num_vmdq_vport
&& !hdev
->num_req_vfs
)
1099 hdev
->tx_sch_mode
= HCLGE_FLAG_TC_BASE_SCH_MODE
;
1101 hdev
->tx_sch_mode
= HCLGE_FLAG_VNET_BASE_SCH_MODE
;
1106 static int hclge_config_tso(struct hclge_dev
*hdev
, int tso_mss_min
,
1109 struct hclge_cfg_tso_status_cmd
*req
;
1110 struct hclge_desc desc
;
1113 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TSO_GENERIC_CONFIG
, false);
1115 req
= (struct hclge_cfg_tso_status_cmd
*)desc
.data
;
1118 hnae_set_field(tso_mss
, HCLGE_TSO_MSS_MIN_M
,
1119 HCLGE_TSO_MSS_MIN_S
, tso_mss_min
);
1120 req
->tso_mss_min
= cpu_to_le16(tso_mss
);
1123 hnae_set_field(tso_mss
, HCLGE_TSO_MSS_MIN_M
,
1124 HCLGE_TSO_MSS_MIN_S
, tso_mss_max
);
1125 req
->tso_mss_max
= cpu_to_le16(tso_mss
);
1127 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1130 static int hclge_alloc_tqps(struct hclge_dev
*hdev
)
1132 struct hclge_tqp
*tqp
;
1135 hdev
->htqp
= devm_kcalloc(&hdev
->pdev
->dev
, hdev
->num_tqps
,
1136 sizeof(struct hclge_tqp
), GFP_KERNEL
);
1142 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
1143 tqp
->dev
= &hdev
->pdev
->dev
;
1146 tqp
->q
.ae_algo
= &ae_algo
;
1147 tqp
->q
.buf_size
= hdev
->rx_buf_len
;
1148 tqp
->q
.desc_num
= hdev
->num_desc
;
1149 tqp
->q
.io_base
= hdev
->hw
.io_base
+ HCLGE_TQP_REG_OFFSET
+
1150 i
* HCLGE_TQP_REG_SIZE
;
1158 static int hclge_map_tqps_to_func(struct hclge_dev
*hdev
, u16 func_id
,
1159 u16 tqp_pid
, u16 tqp_vid
, bool is_pf
)
1161 struct hclge_tqp_map_cmd
*req
;
1162 struct hclge_desc desc
;
1165 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_SET_TQP_MAP
, false);
1167 req
= (struct hclge_tqp_map_cmd
*)desc
.data
;
1168 req
->tqp_id
= cpu_to_le16(tqp_pid
);
1169 req
->tqp_vf
= func_id
;
1170 req
->tqp_flag
= !is_pf
<< HCLGE_TQP_MAP_TYPE_B
|
1171 1 << HCLGE_TQP_MAP_EN_B
;
1172 req
->tqp_vid
= cpu_to_le16(tqp_vid
);
1174 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1176 dev_err(&hdev
->pdev
->dev
, "TQP map failed %d.\n",
1184 static int hclge_assign_tqp(struct hclge_vport
*vport
,
1185 struct hnae3_queue
**tqp
, u16 num_tqps
)
1187 struct hclge_dev
*hdev
= vport
->back
;
1188 int i
, alloced
, func_id
, ret
;
1191 func_id
= vport
->vport_id
;
1192 is_pf
= (vport
->vport_id
== 0) ? true : false;
1194 for (i
= 0, alloced
= 0; i
< hdev
->num_tqps
&&
1195 alloced
< num_tqps
; i
++) {
1196 if (!hdev
->htqp
[i
].alloced
) {
1197 hdev
->htqp
[i
].q
.handle
= &vport
->nic
;
1198 hdev
->htqp
[i
].q
.tqp_index
= alloced
;
1199 tqp
[alloced
] = &hdev
->htqp
[i
].q
;
1200 hdev
->htqp
[i
].alloced
= true;
1201 ret
= hclge_map_tqps_to_func(hdev
, func_id
,
1202 hdev
->htqp
[i
].index
,
1210 vport
->alloc_tqps
= num_tqps
;
1215 static int hclge_knic_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1217 struct hnae3_handle
*nic
= &vport
->nic
;
1218 struct hnae3_knic_private_info
*kinfo
= &nic
->kinfo
;
1219 struct hclge_dev
*hdev
= vport
->back
;
1222 kinfo
->num_desc
= hdev
->num_desc
;
1223 kinfo
->rx_buf_len
= hdev
->rx_buf_len
;
1224 kinfo
->num_tc
= min_t(u16
, num_tqps
, hdev
->tm_info
.num_tc
);
1226 = min_t(u16
, hdev
->rss_size_max
, num_tqps
/ kinfo
->num_tc
);
1227 kinfo
->num_tqps
= kinfo
->rss_size
* kinfo
->num_tc
;
1229 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
1230 if (hdev
->hw_tc_map
& BIT(i
)) {
1231 kinfo
->tc_info
[i
].enable
= true;
1232 kinfo
->tc_info
[i
].tqp_offset
= i
* kinfo
->rss_size
;
1233 kinfo
->tc_info
[i
].tqp_count
= kinfo
->rss_size
;
1234 kinfo
->tc_info
[i
].tc
= i
;
1236 /* Set to default queue if TC is disable */
1237 kinfo
->tc_info
[i
].enable
= false;
1238 kinfo
->tc_info
[i
].tqp_offset
= 0;
1239 kinfo
->tc_info
[i
].tqp_count
= 1;
1240 kinfo
->tc_info
[i
].tc
= 0;
1244 kinfo
->tqp
= devm_kcalloc(&hdev
->pdev
->dev
, kinfo
->num_tqps
,
1245 sizeof(struct hnae3_queue
*), GFP_KERNEL
);
1249 ret
= hclge_assign_tqp(vport
, kinfo
->tqp
, kinfo
->num_tqps
);
1251 dev_err(&hdev
->pdev
->dev
, "fail to assign TQPs %d.\n", ret
);
1258 static void hclge_unic_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1260 /* this would be initialized later */
1263 static int hclge_vport_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1265 struct hnae3_handle
*nic
= &vport
->nic
;
1266 struct hclge_dev
*hdev
= vport
->back
;
1269 nic
->pdev
= hdev
->pdev
;
1270 nic
->ae_algo
= &ae_algo
;
1271 nic
->numa_node_mask
= hdev
->numa_node_mask
;
1273 if (hdev
->ae_dev
->dev_type
== HNAE3_DEV_KNIC
) {
1274 ret
= hclge_knic_setup(vport
, num_tqps
);
1276 dev_err(&hdev
->pdev
->dev
, "knic setup failed %d\n",
1281 hclge_unic_setup(vport
, num_tqps
);
1287 static int hclge_alloc_vport(struct hclge_dev
*hdev
)
1289 struct pci_dev
*pdev
= hdev
->pdev
;
1290 struct hclge_vport
*vport
;
1296 /* We need to alloc a vport for main NIC of PF */
1297 num_vport
= hdev
->num_vmdq_vport
+ hdev
->num_req_vfs
+ 1;
1299 if (hdev
->num_tqps
< num_vport
)
1300 num_vport
= hdev
->num_tqps
;
1302 /* Alloc the same number of TQPs for every vport */
1303 tqp_per_vport
= hdev
->num_tqps
/ num_vport
;
1304 tqp_main_vport
= tqp_per_vport
+ hdev
->num_tqps
% num_vport
;
1306 vport
= devm_kcalloc(&pdev
->dev
, num_vport
, sizeof(struct hclge_vport
),
1311 hdev
->vport
= vport
;
1312 hdev
->num_alloc_vport
= num_vport
;
1314 #ifdef CONFIG_PCI_IOV
1316 if (hdev
->num_req_vfs
) {
1317 dev_info(&pdev
->dev
, "active VFs(%d) found, enabling SRIOV\n",
1319 ret
= pci_enable_sriov(hdev
->pdev
, hdev
->num_req_vfs
);
1321 hdev
->num_alloc_vfs
= 0;
1322 dev_err(&pdev
->dev
, "SRIOV enable failed %d\n",
1327 hdev
->num_alloc_vfs
= hdev
->num_req_vfs
;
1330 for (i
= 0; i
< num_vport
; i
++) {
1332 vport
->vport_id
= i
;
1335 ret
= hclge_vport_setup(vport
, tqp_main_vport
);
1337 ret
= hclge_vport_setup(vport
, tqp_per_vport
);
1340 "vport setup failed for vport %d, %d\n",
1351 static int hclge_cmd_alloc_tx_buff(struct hclge_dev
*hdev
,
1352 struct hclge_pkt_buf_alloc
*buf_alloc
)
1354 /* TX buffer size is unit by 128 byte */
1355 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1356 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1357 struct hclge_tx_buff_alloc_cmd
*req
;
1358 struct hclge_desc desc
;
1362 req
= (struct hclge_tx_buff_alloc_cmd
*)desc
.data
;
1364 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TX_BUFF_ALLOC
, 0);
1365 for (i
= 0; i
< HCLGE_TC_NUM
; i
++) {
1366 u32 buf_size
= buf_alloc
->priv_buf
[i
].tx_buf_size
;
1368 req
->tx_pkt_buff
[i
] =
1369 cpu_to_le16((buf_size
>> HCLGE_BUF_SIZE_UNIT_SHIFT
) |
1370 HCLGE_BUF_SIZE_UPDATE_EN_MSK
);
1373 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1375 dev_err(&hdev
->pdev
->dev
, "tx buffer alloc cmd failed %d.\n",
1383 static int hclge_tx_buffer_alloc(struct hclge_dev
*hdev
,
1384 struct hclge_pkt_buf_alloc
*buf_alloc
)
1386 int ret
= hclge_cmd_alloc_tx_buff(hdev
, buf_alloc
);
1389 dev_err(&hdev
->pdev
->dev
,
1390 "tx buffer alloc failed %d\n", ret
);
1397 static int hclge_get_tc_num(struct hclge_dev
*hdev
)
1401 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1402 if (hdev
->hw_tc_map
& BIT(i
))
1407 static int hclge_get_pfc_enalbe_num(struct hclge_dev
*hdev
)
1411 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1412 if (hdev
->hw_tc_map
& BIT(i
) &&
1413 hdev
->tm_info
.hw_pfc_map
& BIT(i
))
1418 /* Get the number of pfc enabled TCs, which have private buffer */
1419 static int hclge_get_pfc_priv_num(struct hclge_dev
*hdev
,
1420 struct hclge_pkt_buf_alloc
*buf_alloc
)
1422 struct hclge_priv_buf
*priv
;
1425 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1426 priv
= &buf_alloc
->priv_buf
[i
];
1427 if ((hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1435 /* Get the number of pfc disabled TCs, which have private buffer */
1436 static int hclge_get_no_pfc_priv_num(struct hclge_dev
*hdev
,
1437 struct hclge_pkt_buf_alloc
*buf_alloc
)
1439 struct hclge_priv_buf
*priv
;
1442 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1443 priv
= &buf_alloc
->priv_buf
[i
];
1444 if (hdev
->hw_tc_map
& BIT(i
) &&
1445 !(hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1453 static u32
hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc
*buf_alloc
)
1455 struct hclge_priv_buf
*priv
;
1459 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1460 priv
= &buf_alloc
->priv_buf
[i
];
1462 rx_priv
+= priv
->buf_size
;
1467 static u32
hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc
*buf_alloc
)
1469 u32 i
, total_tx_size
= 0;
1471 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1472 total_tx_size
+= buf_alloc
->priv_buf
[i
].tx_buf_size
;
1474 return total_tx_size
;
1477 static bool hclge_is_rx_buf_ok(struct hclge_dev
*hdev
,
1478 struct hclge_pkt_buf_alloc
*buf_alloc
,
1481 u32 shared_buf_min
, shared_buf_tc
, shared_std
;
1482 int tc_num
, pfc_enable_num
;
1487 tc_num
= hclge_get_tc_num(hdev
);
1488 pfc_enable_num
= hclge_get_pfc_enalbe_num(hdev
);
1490 if (hnae3_dev_dcb_supported(hdev
))
1491 shared_buf_min
= 2 * hdev
->mps
+ HCLGE_DEFAULT_DV
;
1493 shared_buf_min
= 2 * hdev
->mps
+ HCLGE_DEFAULT_NON_DCB_DV
;
1495 shared_buf_tc
= pfc_enable_num
* hdev
->mps
+
1496 (tc_num
- pfc_enable_num
) * hdev
->mps
/ 2 +
1498 shared_std
= max_t(u32
, shared_buf_min
, shared_buf_tc
);
1500 rx_priv
= hclge_get_rx_priv_buff_alloced(buf_alloc
);
1501 if (rx_all
<= rx_priv
+ shared_std
)
1504 shared_buf
= rx_all
- rx_priv
;
1505 buf_alloc
->s_buf
.buf_size
= shared_buf
;
1506 buf_alloc
->s_buf
.self
.high
= shared_buf
;
1507 buf_alloc
->s_buf
.self
.low
= 2 * hdev
->mps
;
1509 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1510 if ((hdev
->hw_tc_map
& BIT(i
)) &&
1511 (hdev
->tm_info
.hw_pfc_map
& BIT(i
))) {
1512 buf_alloc
->s_buf
.tc_thrd
[i
].low
= hdev
->mps
;
1513 buf_alloc
->s_buf
.tc_thrd
[i
].high
= 2 * hdev
->mps
;
1515 buf_alloc
->s_buf
.tc_thrd
[i
].low
= 0;
1516 buf_alloc
->s_buf
.tc_thrd
[i
].high
= hdev
->mps
;
1523 static int hclge_tx_buffer_calc(struct hclge_dev
*hdev
,
1524 struct hclge_pkt_buf_alloc
*buf_alloc
)
1528 total_size
= hdev
->pkt_buf_size
;
1530 /* alloc tx buffer for all enabled tc */
1531 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1532 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
1534 if (total_size
< HCLGE_DEFAULT_TX_BUF
)
1537 if (hdev
->hw_tc_map
& BIT(i
))
1538 priv
->tx_buf_size
= HCLGE_DEFAULT_TX_BUF
;
1540 priv
->tx_buf_size
= 0;
1542 total_size
-= priv
->tx_buf_size
;
1548 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1549 * @hdev: pointer to struct hclge_dev
1550 * @buf_alloc: pointer to buffer calculation data
1551 * @return: 0: calculate sucessful, negative: fail
1553 static int hclge_rx_buffer_calc(struct hclge_dev
*hdev
,
1554 struct hclge_pkt_buf_alloc
*buf_alloc
)
1556 u32 rx_all
= hdev
->pkt_buf_size
;
1557 int no_pfc_priv_num
, pfc_priv_num
;
1558 struct hclge_priv_buf
*priv
;
1561 rx_all
-= hclge_get_tx_buff_alloced(buf_alloc
);
1563 /* When DCB is not supported, rx private
1564 * buffer is not allocated.
1566 if (!hnae3_dev_dcb_supported(hdev
)) {
1567 if (!hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1573 /* step 1, try to alloc private buffer for all enabled tc */
1574 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1575 priv
= &buf_alloc
->priv_buf
[i
];
1576 if (hdev
->hw_tc_map
& BIT(i
)) {
1578 if (hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1579 priv
->wl
.low
= hdev
->mps
;
1580 priv
->wl
.high
= priv
->wl
.low
+ hdev
->mps
;
1581 priv
->buf_size
= priv
->wl
.high
+
1585 priv
->wl
.high
= 2 * hdev
->mps
;
1586 priv
->buf_size
= priv
->wl
.high
;
1596 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1599 /* step 2, try to decrease the buffer size of
1600 * no pfc TC's private buffer
1602 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1603 priv
= &buf_alloc
->priv_buf
[i
];
1610 if (!(hdev
->hw_tc_map
& BIT(i
)))
1615 if (hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1617 priv
->wl
.high
= priv
->wl
.low
+ hdev
->mps
;
1618 priv
->buf_size
= priv
->wl
.high
+ HCLGE_DEFAULT_DV
;
1621 priv
->wl
.high
= hdev
->mps
;
1622 priv
->buf_size
= priv
->wl
.high
;
1626 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1629 /* step 3, try to reduce the number of pfc disabled TCs,
1630 * which have private buffer
1632 /* get the total no pfc enable TC number, which have private buffer */
1633 no_pfc_priv_num
= hclge_get_no_pfc_priv_num(hdev
, buf_alloc
);
1635 /* let the last to be cleared first */
1636 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
1637 priv
= &buf_alloc
->priv_buf
[i
];
1639 if (hdev
->hw_tc_map
& BIT(i
) &&
1640 !(hdev
->tm_info
.hw_pfc_map
& BIT(i
))) {
1641 /* Clear the no pfc TC private buffer */
1649 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
) ||
1650 no_pfc_priv_num
== 0)
1654 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1657 /* step 4, try to reduce the number of pfc enabled TCs
1658 * which have private buffer.
1660 pfc_priv_num
= hclge_get_pfc_priv_num(hdev
, buf_alloc
);
1662 /* let the last to be cleared first */
1663 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
1664 priv
= &buf_alloc
->priv_buf
[i
];
1666 if (hdev
->hw_tc_map
& BIT(i
) &&
1667 hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1668 /* Reduce the number of pfc TC with private buffer */
1676 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
) ||
1680 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1686 static int hclge_rx_priv_buf_alloc(struct hclge_dev
*hdev
,
1687 struct hclge_pkt_buf_alloc
*buf_alloc
)
1689 struct hclge_rx_priv_buff_cmd
*req
;
1690 struct hclge_desc desc
;
1694 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_PRIV_BUFF_ALLOC
, false);
1695 req
= (struct hclge_rx_priv_buff_cmd
*)desc
.data
;
1697 /* Alloc private buffer TCs */
1698 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1699 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
1702 cpu_to_le16(priv
->buf_size
>> HCLGE_BUF_UNIT_S
);
1704 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B
);
1708 cpu_to_le16((buf_alloc
->s_buf
.buf_size
>> HCLGE_BUF_UNIT_S
) |
1709 (1 << HCLGE_TC0_PRI_BUF_EN_B
));
1711 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1713 dev_err(&hdev
->pdev
->dev
,
1714 "rx private buffer alloc cmd failed %d\n", ret
);
1721 #define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
1723 static int hclge_rx_priv_wl_config(struct hclge_dev
*hdev
,
1724 struct hclge_pkt_buf_alloc
*buf_alloc
)
1726 struct hclge_rx_priv_wl_buf
*req
;
1727 struct hclge_priv_buf
*priv
;
1728 struct hclge_desc desc
[2];
1732 for (i
= 0; i
< 2; i
++) {
1733 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_RX_PRIV_WL_ALLOC
,
1735 req
= (struct hclge_rx_priv_wl_buf
*)desc
[i
].data
;
1737 /* The first descriptor set the NEXT bit to 1 */
1739 desc
[i
].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1741 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1743 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
1744 u32 idx
= i
* HCLGE_TC_NUM_ONE_DESC
+ j
;
1746 priv
= &buf_alloc
->priv_buf
[idx
];
1747 req
->tc_wl
[j
].high
=
1748 cpu_to_le16(priv
->wl
.high
>> HCLGE_BUF_UNIT_S
);
1749 req
->tc_wl
[j
].high
|=
1750 cpu_to_le16(HCLGE_PRIV_ENABLE(priv
->wl
.high
) <<
1751 HCLGE_RX_PRIV_EN_B
);
1753 cpu_to_le16(priv
->wl
.low
>> HCLGE_BUF_UNIT_S
);
1754 req
->tc_wl
[j
].low
|=
1755 cpu_to_le16(HCLGE_PRIV_ENABLE(priv
->wl
.low
) <<
1756 HCLGE_RX_PRIV_EN_B
);
1760 /* Send 2 descriptor at one time */
1761 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
1763 dev_err(&hdev
->pdev
->dev
,
1764 "rx private waterline config cmd failed %d\n",
1771 static int hclge_common_thrd_config(struct hclge_dev
*hdev
,
1772 struct hclge_pkt_buf_alloc
*buf_alloc
)
1774 struct hclge_shared_buf
*s_buf
= &buf_alloc
->s_buf
;
1775 struct hclge_rx_com_thrd
*req
;
1776 struct hclge_desc desc
[2];
1777 struct hclge_tc_thrd
*tc
;
1781 for (i
= 0; i
< 2; i
++) {
1782 hclge_cmd_setup_basic_desc(&desc
[i
],
1783 HCLGE_OPC_RX_COM_THRD_ALLOC
, false);
1784 req
= (struct hclge_rx_com_thrd
*)&desc
[i
].data
;
1786 /* The first descriptor set the NEXT bit to 1 */
1788 desc
[i
].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1790 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1792 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
1793 tc
= &s_buf
->tc_thrd
[i
* HCLGE_TC_NUM_ONE_DESC
+ j
];
1795 req
->com_thrd
[j
].high
=
1796 cpu_to_le16(tc
->high
>> HCLGE_BUF_UNIT_S
);
1797 req
->com_thrd
[j
].high
|=
1798 cpu_to_le16(HCLGE_PRIV_ENABLE(tc
->high
) <<
1799 HCLGE_RX_PRIV_EN_B
);
1800 req
->com_thrd
[j
].low
=
1801 cpu_to_le16(tc
->low
>> HCLGE_BUF_UNIT_S
);
1802 req
->com_thrd
[j
].low
|=
1803 cpu_to_le16(HCLGE_PRIV_ENABLE(tc
->low
) <<
1804 HCLGE_RX_PRIV_EN_B
);
1808 /* Send 2 descriptors at one time */
1809 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
1811 dev_err(&hdev
->pdev
->dev
,
1812 "common threshold config cmd failed %d\n", ret
);
1818 static int hclge_common_wl_config(struct hclge_dev
*hdev
,
1819 struct hclge_pkt_buf_alloc
*buf_alloc
)
1821 struct hclge_shared_buf
*buf
= &buf_alloc
->s_buf
;
1822 struct hclge_rx_com_wl
*req
;
1823 struct hclge_desc desc
;
1826 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_COM_WL_ALLOC
, false);
1828 req
= (struct hclge_rx_com_wl
*)desc
.data
;
1829 req
->com_wl
.high
= cpu_to_le16(buf
->self
.high
>> HCLGE_BUF_UNIT_S
);
1831 cpu_to_le16(HCLGE_PRIV_ENABLE(buf
->self
.high
) <<
1832 HCLGE_RX_PRIV_EN_B
);
1834 req
->com_wl
.low
= cpu_to_le16(buf
->self
.low
>> HCLGE_BUF_UNIT_S
);
1836 cpu_to_le16(HCLGE_PRIV_ENABLE(buf
->self
.low
) <<
1837 HCLGE_RX_PRIV_EN_B
);
1839 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1841 dev_err(&hdev
->pdev
->dev
,
1842 "common waterline config cmd failed %d\n", ret
);
1849 int hclge_buffer_alloc(struct hclge_dev
*hdev
)
1851 struct hclge_pkt_buf_alloc
*pkt_buf
;
1854 pkt_buf
= kzalloc(sizeof(*pkt_buf
), GFP_KERNEL
);
1858 ret
= hclge_tx_buffer_calc(hdev
, pkt_buf
);
1860 dev_err(&hdev
->pdev
->dev
,
1861 "could not calc tx buffer size for all TCs %d\n", ret
);
1865 ret
= hclge_tx_buffer_alloc(hdev
, pkt_buf
);
1867 dev_err(&hdev
->pdev
->dev
,
1868 "could not alloc tx buffers %d\n", ret
);
1872 ret
= hclge_rx_buffer_calc(hdev
, pkt_buf
);
1874 dev_err(&hdev
->pdev
->dev
,
1875 "could not calc rx priv buffer size for all TCs %d\n",
1880 ret
= hclge_rx_priv_buf_alloc(hdev
, pkt_buf
);
1882 dev_err(&hdev
->pdev
->dev
, "could not alloc rx priv buffer %d\n",
1887 if (hnae3_dev_dcb_supported(hdev
)) {
1888 ret
= hclge_rx_priv_wl_config(hdev
, pkt_buf
);
1890 dev_err(&hdev
->pdev
->dev
,
1891 "could not configure rx private waterline %d\n",
1896 ret
= hclge_common_thrd_config(hdev
, pkt_buf
);
1898 dev_err(&hdev
->pdev
->dev
,
1899 "could not configure common threshold %d\n",
1905 ret
= hclge_common_wl_config(hdev
, pkt_buf
);
1907 dev_err(&hdev
->pdev
->dev
,
1908 "could not configure common waterline %d\n", ret
);
1915 static int hclge_init_roce_base_info(struct hclge_vport
*vport
)
1917 struct hnae3_handle
*roce
= &vport
->roce
;
1918 struct hnae3_handle
*nic
= &vport
->nic
;
1920 roce
->rinfo
.num_vectors
= vport
->back
->num_roce_msix
;
1922 if (vport
->back
->num_msi_left
< vport
->roce
.rinfo
.num_vectors
||
1923 vport
->back
->num_msi_left
== 0)
1926 roce
->rinfo
.base_vector
= vport
->back
->roce_base_vector
;
1928 roce
->rinfo
.netdev
= nic
->kinfo
.netdev
;
1929 roce
->rinfo
.roce_io_base
= vport
->back
->hw
.io_base
;
1931 roce
->pdev
= nic
->pdev
;
1932 roce
->ae_algo
= nic
->ae_algo
;
1933 roce
->numa_node_mask
= nic
->numa_node_mask
;
1938 static int hclge_init_msix(struct hclge_dev
*hdev
)
1940 struct pci_dev
*pdev
= hdev
->pdev
;
1943 hdev
->msix_entries
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
1944 sizeof(struct msix_entry
),
1946 if (!hdev
->msix_entries
)
1949 hdev
->vector_status
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
1950 sizeof(u16
), GFP_KERNEL
);
1951 if (!hdev
->vector_status
)
1954 for (i
= 0; i
< hdev
->num_msi
; i
++) {
1955 hdev
->msix_entries
[i
].entry
= i
;
1956 hdev
->vector_status
[i
] = HCLGE_INVALID_VPORT
;
1959 hdev
->num_msi_left
= hdev
->num_msi
;
1960 hdev
->base_msi_vector
= hdev
->pdev
->irq
;
1961 hdev
->roce_base_vector
= hdev
->base_msi_vector
+
1962 HCLGE_ROCE_VECTOR_OFFSET
;
1964 ret
= pci_enable_msix_range(hdev
->pdev
, hdev
->msix_entries
,
1965 hdev
->num_msi
, hdev
->num_msi
);
1967 dev_info(&hdev
->pdev
->dev
,
1968 "MSI-X vector alloc failed: %d\n", ret
);
1975 static int hclge_init_msi(struct hclge_dev
*hdev
)
1977 struct pci_dev
*pdev
= hdev
->pdev
;
1981 hdev
->vector_status
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
1982 sizeof(u16
), GFP_KERNEL
);
1983 if (!hdev
->vector_status
)
1986 for (i
= 0; i
< hdev
->num_msi
; i
++)
1987 hdev
->vector_status
[i
] = HCLGE_INVALID_VPORT
;
1989 vectors
= pci_alloc_irq_vectors(pdev
, 1, hdev
->num_msi
, PCI_IRQ_MSI
);
1991 dev_err(&pdev
->dev
, "MSI vectors enable failed %d\n", vectors
);
1994 hdev
->num_msi
= vectors
;
1995 hdev
->num_msi_left
= vectors
;
1996 hdev
->base_msi_vector
= pdev
->irq
;
1997 hdev
->roce_base_vector
= hdev
->base_msi_vector
+
1998 HCLGE_ROCE_VECTOR_OFFSET
;
2003 static void hclge_check_speed_dup(struct hclge_dev
*hdev
, int duplex
, int speed
)
2005 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2007 if ((speed
== HCLGE_MAC_SPEED_10M
) || (speed
== HCLGE_MAC_SPEED_100M
))
2008 mac
->duplex
= (u8
)duplex
;
2010 mac
->duplex
= HCLGE_MAC_FULL
;
2015 int hclge_cfg_mac_speed_dup(struct hclge_dev
*hdev
, int speed
, u8 duplex
)
2017 struct hclge_config_mac_speed_dup_cmd
*req
;
2018 struct hclge_desc desc
;
2021 req
= (struct hclge_config_mac_speed_dup_cmd
*)desc
.data
;
2023 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_SPEED_DUP
, false);
2025 hnae_set_bit(req
->speed_dup
, HCLGE_CFG_DUPLEX_B
, !!duplex
);
2028 case HCLGE_MAC_SPEED_10M
:
2029 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2030 HCLGE_CFG_SPEED_S
, 6);
2032 case HCLGE_MAC_SPEED_100M
:
2033 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2034 HCLGE_CFG_SPEED_S
, 7);
2036 case HCLGE_MAC_SPEED_1G
:
2037 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2038 HCLGE_CFG_SPEED_S
, 0);
2040 case HCLGE_MAC_SPEED_10G
:
2041 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2042 HCLGE_CFG_SPEED_S
, 1);
2044 case HCLGE_MAC_SPEED_25G
:
2045 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2046 HCLGE_CFG_SPEED_S
, 2);
2048 case HCLGE_MAC_SPEED_40G
:
2049 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2050 HCLGE_CFG_SPEED_S
, 3);
2052 case HCLGE_MAC_SPEED_50G
:
2053 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2054 HCLGE_CFG_SPEED_S
, 4);
2056 case HCLGE_MAC_SPEED_100G
:
2057 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2058 HCLGE_CFG_SPEED_S
, 5);
2061 dev_err(&hdev
->pdev
->dev
, "invalid speed (%d)\n", speed
);
2065 hnae_set_bit(req
->mac_change_fec_en
, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B
,
2068 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2070 dev_err(&hdev
->pdev
->dev
,
2071 "mac speed/duplex config cmd failed %d.\n", ret
);
2075 hclge_check_speed_dup(hdev
, duplex
, speed
);
2080 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle
*handle
, int speed
,
2083 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2084 struct hclge_dev
*hdev
= vport
->back
;
2086 return hclge_cfg_mac_speed_dup(hdev
, speed
, duplex
);
2089 static int hclge_query_mac_an_speed_dup(struct hclge_dev
*hdev
, int *speed
,
2092 struct hclge_query_an_speed_dup_cmd
*req
;
2093 struct hclge_desc desc
;
2097 req
= (struct hclge_query_an_speed_dup_cmd
*)desc
.data
;
2099 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_AN_RESULT
, true);
2100 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2102 dev_err(&hdev
->pdev
->dev
,
2103 "mac speed/autoneg/duplex query cmd failed %d\n",
2108 *duplex
= hnae_get_bit(req
->an_syn_dup_speed
, HCLGE_QUERY_DUPLEX_B
);
2109 speed_tmp
= hnae_get_field(req
->an_syn_dup_speed
, HCLGE_QUERY_SPEED_M
,
2110 HCLGE_QUERY_SPEED_S
);
2112 ret
= hclge_parse_speed(speed_tmp
, speed
);
2114 dev_err(&hdev
->pdev
->dev
,
2115 "could not parse speed(=%d), %d\n", speed_tmp
, ret
);
2122 static int hclge_query_autoneg_result(struct hclge_dev
*hdev
)
2124 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2125 struct hclge_query_an_speed_dup_cmd
*req
;
2126 struct hclge_desc desc
;
2129 req
= (struct hclge_query_an_speed_dup_cmd
*)desc
.data
;
2131 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_AN_RESULT
, true);
2132 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2134 dev_err(&hdev
->pdev
->dev
,
2135 "autoneg result query cmd failed %d.\n", ret
);
2139 mac
->autoneg
= hnae_get_bit(req
->an_syn_dup_speed
, HCLGE_QUERY_AN_B
);
2144 static int hclge_set_autoneg_en(struct hclge_dev
*hdev
, bool enable
)
2146 struct hclge_config_auto_neg_cmd
*req
;
2147 struct hclge_desc desc
;
2151 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_AN_MODE
, false);
2153 req
= (struct hclge_config_auto_neg_cmd
*)desc
.data
;
2154 hnae_set_bit(flag
, HCLGE_MAC_CFG_AN_EN_B
, !!enable
);
2155 req
->cfg_an_cmd_flag
= cpu_to_le32(flag
);
2157 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2159 dev_err(&hdev
->pdev
->dev
, "auto neg set cmd failed %d.\n",
2167 static int hclge_set_autoneg(struct hnae3_handle
*handle
, bool enable
)
2169 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2170 struct hclge_dev
*hdev
= vport
->back
;
2172 return hclge_set_autoneg_en(hdev
, enable
);
2175 static int hclge_get_autoneg(struct hnae3_handle
*handle
)
2177 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2178 struct hclge_dev
*hdev
= vport
->back
;
2180 hclge_query_autoneg_result(hdev
);
2182 return hdev
->hw
.mac
.autoneg
;
2185 static int hclge_mac_init(struct hclge_dev
*hdev
)
2187 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2190 ret
= hclge_cfg_mac_speed_dup(hdev
, hdev
->hw
.mac
.speed
, HCLGE_MAC_FULL
);
2192 dev_err(&hdev
->pdev
->dev
,
2193 "Config mac speed dup fail ret=%d\n", ret
);
2199 ret
= hclge_mac_mdio_config(hdev
);
2201 dev_warn(&hdev
->pdev
->dev
,
2202 "mdio config fail ret=%d\n", ret
);
2206 /* Initialize the MTA table work mode */
2207 hdev
->accept_mta_mc
= true;
2208 hdev
->enable_mta
= true;
2209 hdev
->mta_mac_sel_type
= HCLGE_MAC_ADDR_47_36
;
2211 ret
= hclge_set_mta_filter_mode(hdev
,
2212 hdev
->mta_mac_sel_type
,
2215 dev_err(&hdev
->pdev
->dev
, "set mta filter mode failed %d\n",
2220 return hclge_cfg_func_mta_filter(hdev
, 0, hdev
->accept_mta_mc
);
2223 static void hclge_task_schedule(struct hclge_dev
*hdev
)
2225 if (!test_bit(HCLGE_STATE_DOWN
, &hdev
->state
) &&
2226 !test_bit(HCLGE_STATE_REMOVING
, &hdev
->state
) &&
2227 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
))
2228 (void)schedule_work(&hdev
->service_task
);
2231 static int hclge_get_mac_link_status(struct hclge_dev
*hdev
)
2233 struct hclge_link_status_cmd
*req
;
2234 struct hclge_desc desc
;
2238 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_LINK_STATUS
, true);
2239 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2241 dev_err(&hdev
->pdev
->dev
, "get link status cmd failed %d\n",
2246 req
= (struct hclge_link_status_cmd
*)desc
.data
;
2247 link_status
= req
->status
& HCLGE_LINK_STATUS
;
2249 return !!link_status
;
2252 static int hclge_get_mac_phy_link(struct hclge_dev
*hdev
)
2257 mac_state
= hclge_get_mac_link_status(hdev
);
2259 if (hdev
->hw
.mac
.phydev
) {
2260 if (!genphy_read_status(hdev
->hw
.mac
.phydev
))
2261 link_stat
= mac_state
&
2262 hdev
->hw
.mac
.phydev
->link
;
2267 link_stat
= mac_state
;
2273 static void hclge_update_link_status(struct hclge_dev
*hdev
)
2275 struct hnae3_client
*client
= hdev
->nic_client
;
2276 struct hnae3_handle
*handle
;
2282 state
= hclge_get_mac_phy_link(hdev
);
2283 if (state
!= hdev
->hw
.mac
.link
) {
2284 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
2285 handle
= &hdev
->vport
[i
].nic
;
2286 client
->ops
->link_status_change(handle
, state
);
2288 hdev
->hw
.mac
.link
= state
;
2292 static int hclge_update_speed_duplex(struct hclge_dev
*hdev
)
2294 struct hclge_mac mac
= hdev
->hw
.mac
;
2299 /* get the speed and duplex as autoneg'result from mac cmd when phy
2305 /* update mac->antoneg. */
2306 ret
= hclge_query_autoneg_result(hdev
);
2308 dev_err(&hdev
->pdev
->dev
,
2309 "autoneg result query failed %d\n", ret
);
2316 ret
= hclge_query_mac_an_speed_dup(hdev
, &speed
, &duplex
);
2318 dev_err(&hdev
->pdev
->dev
,
2319 "mac autoneg/speed/duplex query failed %d\n", ret
);
2323 if ((mac
.speed
!= speed
) || (mac
.duplex
!= duplex
)) {
2324 ret
= hclge_cfg_mac_speed_dup(hdev
, speed
, duplex
);
2326 dev_err(&hdev
->pdev
->dev
,
2327 "mac speed/duplex config failed %d\n", ret
);
2335 static int hclge_update_speed_duplex_h(struct hnae3_handle
*handle
)
2337 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2338 struct hclge_dev
*hdev
= vport
->back
;
2340 return hclge_update_speed_duplex(hdev
);
2343 static int hclge_get_status(struct hnae3_handle
*handle
)
2345 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2346 struct hclge_dev
*hdev
= vport
->back
;
2348 hclge_update_link_status(hdev
);
2350 return hdev
->hw
.mac
.link
;
2353 static void hclge_service_timer(struct timer_list
*t
)
2355 struct hclge_dev
*hdev
= from_timer(hdev
, t
, service_timer
);
2357 mod_timer(&hdev
->service_timer
, jiffies
+ HZ
);
2358 hclge_task_schedule(hdev
);
2361 static void hclge_service_complete(struct hclge_dev
*hdev
)
2363 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
));
2365 /* Flush memory before next watchdog */
2366 smp_mb__before_atomic();
2367 clear_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
);
2370 static void hclge_service_task(struct work_struct
*work
)
2372 struct hclge_dev
*hdev
=
2373 container_of(work
, struct hclge_dev
, service_task
);
2375 hclge_update_speed_duplex(hdev
);
2376 hclge_update_link_status(hdev
);
2377 hclge_update_stats_for_all(hdev
);
2378 hclge_service_complete(hdev
);
2381 static void hclge_disable_sriov(struct hclge_dev
*hdev
)
2383 /* If our VFs are assigned we cannot shut down SR-IOV
2384 * without causing issues, so just leave the hardware
2385 * available but disabled
2387 if (pci_vfs_assigned(hdev
->pdev
)) {
2388 dev_warn(&hdev
->pdev
->dev
,
2389 "disabling driver while VFs are assigned\n");
2393 pci_disable_sriov(hdev
->pdev
);
2396 struct hclge_vport
*hclge_get_vport(struct hnae3_handle
*handle
)
2398 /* VF handle has no client */
2399 if (!handle
->client
)
2400 return container_of(handle
, struct hclge_vport
, nic
);
2401 else if (handle
->client
->type
== HNAE3_CLIENT_ROCE
)
2402 return container_of(handle
, struct hclge_vport
, roce
);
2404 return container_of(handle
, struct hclge_vport
, nic
);
2407 static int hclge_get_vector(struct hnae3_handle
*handle
, u16 vector_num
,
2408 struct hnae3_vector_info
*vector_info
)
2410 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2411 struct hnae3_vector_info
*vector
= vector_info
;
2412 struct hclge_dev
*hdev
= vport
->back
;
2416 vector_num
= min(hdev
->num_msi_left
, vector_num
);
2418 for (j
= 0; j
< vector_num
; j
++) {
2419 for (i
= 1; i
< hdev
->num_msi
; i
++) {
2420 if (hdev
->vector_status
[i
] == HCLGE_INVALID_VPORT
) {
2421 vector
->vector
= pci_irq_vector(hdev
->pdev
, i
);
2422 vector
->io_addr
= hdev
->hw
.io_base
+
2423 HCLGE_VECTOR_REG_BASE
+
2424 (i
- 1) * HCLGE_VECTOR_REG_OFFSET
+
2426 HCLGE_VECTOR_VF_OFFSET
;
2427 hdev
->vector_status
[i
] = vport
->vport_id
;
2436 hdev
->num_msi_left
-= alloc
;
2437 hdev
->num_msi_used
+= alloc
;
2442 static int hclge_get_vector_index(struct hclge_dev
*hdev
, int vector
)
2446 for (i
= 0; i
< hdev
->num_msi
; i
++) {
2447 if (hdev
->msix_entries
) {
2448 if (vector
== hdev
->msix_entries
[i
].vector
)
2451 if (vector
== (hdev
->base_msi_vector
+ i
))
2458 static u32
hclge_get_rss_key_size(struct hnae3_handle
*handle
)
2460 return HCLGE_RSS_KEY_SIZE
;
2463 static u32
hclge_get_rss_indir_size(struct hnae3_handle
*handle
)
2465 return HCLGE_RSS_IND_TBL_SIZE
;
2468 static int hclge_get_rss_algo(struct hclge_dev
*hdev
)
2470 struct hclge_rss_config_cmd
*req
;
2471 struct hclge_desc desc
;
2475 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_GENERIC_CONFIG
, true);
2477 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2479 dev_err(&hdev
->pdev
->dev
,
2480 "Get link status error, status =%d\n", ret
);
2484 req
= (struct hclge_rss_config_cmd
*)desc
.data
;
2485 rss_hash_algo
= (req
->hash_config
& HCLGE_RSS_HASH_ALGO_MASK
);
2487 if (rss_hash_algo
== HCLGE_RSS_HASH_ALGO_TOEPLITZ
)
2488 return ETH_RSS_HASH_TOP
;
2493 static int hclge_set_rss_algo_key(struct hclge_dev
*hdev
,
2494 const u8 hfunc
, const u8
*key
)
2496 struct hclge_rss_config_cmd
*req
;
2497 struct hclge_desc desc
;
2502 req
= (struct hclge_rss_config_cmd
*)desc
.data
;
2504 for (key_offset
= 0; key_offset
< 3; key_offset
++) {
2505 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_GENERIC_CONFIG
,
2508 req
->hash_config
|= (hfunc
& HCLGE_RSS_HASH_ALGO_MASK
);
2509 req
->hash_config
|= (key_offset
<< HCLGE_RSS_HASH_KEY_OFFSET_B
);
2511 if (key_offset
== 2)
2513 HCLGE_RSS_KEY_SIZE
- HCLGE_RSS_HASH_KEY_NUM
* 2;
2515 key_size
= HCLGE_RSS_HASH_KEY_NUM
;
2517 memcpy(req
->hash_key
,
2518 key
+ key_offset
* HCLGE_RSS_HASH_KEY_NUM
, key_size
);
2520 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2522 dev_err(&hdev
->pdev
->dev
,
2523 "Configure RSS config fail, status = %d\n",
2531 static int hclge_set_rss_indir_table(struct hclge_dev
*hdev
, const u32
*indir
)
2533 struct hclge_rss_indirection_table_cmd
*req
;
2534 struct hclge_desc desc
;
2538 req
= (struct hclge_rss_indirection_table_cmd
*)desc
.data
;
2540 for (i
= 0; i
< HCLGE_RSS_CFG_TBL_NUM
; i
++) {
2541 hclge_cmd_setup_basic_desc
2542 (&desc
, HCLGE_OPC_RSS_INDIR_TABLE
, false);
2544 req
->start_table_index
=
2545 cpu_to_le16(i
* HCLGE_RSS_CFG_TBL_SIZE
);
2546 req
->rss_set_bitmap
= cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK
);
2548 for (j
= 0; j
< HCLGE_RSS_CFG_TBL_SIZE
; j
++)
2549 req
->rss_result
[j
] =
2550 indir
[i
* HCLGE_RSS_CFG_TBL_SIZE
+ j
];
2552 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2554 dev_err(&hdev
->pdev
->dev
,
2555 "Configure rss indir table fail,status = %d\n",
2563 static int hclge_set_rss_tc_mode(struct hclge_dev
*hdev
, u16
*tc_valid
,
2564 u16
*tc_size
, u16
*tc_offset
)
2566 struct hclge_rss_tc_mode_cmd
*req
;
2567 struct hclge_desc desc
;
2571 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_TC_MODE
, false);
2572 req
= (struct hclge_rss_tc_mode_cmd
*)desc
.data
;
2574 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
2577 hnae_set_bit(mode
, HCLGE_RSS_TC_VALID_B
, (tc_valid
[i
] & 0x1));
2578 hnae_set_field(mode
, HCLGE_RSS_TC_SIZE_M
,
2579 HCLGE_RSS_TC_SIZE_S
, tc_size
[i
]);
2580 hnae_set_field(mode
, HCLGE_RSS_TC_OFFSET_M
,
2581 HCLGE_RSS_TC_OFFSET_S
, tc_offset
[i
]);
2583 req
->rss_tc_mode
[i
] = cpu_to_le16(mode
);
2586 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2588 dev_err(&hdev
->pdev
->dev
,
2589 "Configure rss tc mode fail, status = %d\n", ret
);
2596 static int hclge_set_rss_input_tuple(struct hclge_dev
*hdev
)
2598 struct hclge_rss_input_tuple_cmd
*req
;
2599 struct hclge_desc desc
;
2602 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_INPUT_TUPLE
, false);
2604 req
= (struct hclge_rss_input_tuple_cmd
*)desc
.data
;
2605 req
->ipv4_tcp_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2606 req
->ipv4_udp_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2607 req
->ipv4_sctp_en
= HCLGE_RSS_INPUT_TUPLE_SCTP
;
2608 req
->ipv4_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2609 req
->ipv6_tcp_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2610 req
->ipv6_udp_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2611 req
->ipv6_sctp_en
= HCLGE_RSS_INPUT_TUPLE_SCTP
;
2612 req
->ipv6_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2613 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2615 dev_err(&hdev
->pdev
->dev
,
2616 "Configure rss input fail, status = %d\n", ret
);
2623 static int hclge_get_rss(struct hnae3_handle
*handle
, u32
*indir
,
2626 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2627 struct hclge_dev
*hdev
= vport
->back
;
2630 /* Get hash algorithm */
2632 *hfunc
= hclge_get_rss_algo(hdev
);
2634 /* Get the RSS Key required by the user */
2636 memcpy(key
, vport
->rss_hash_key
, HCLGE_RSS_KEY_SIZE
);
2638 /* Get indirect table */
2640 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
2641 indir
[i
] = vport
->rss_indirection_tbl
[i
];
2646 static int hclge_set_rss(struct hnae3_handle
*handle
, const u32
*indir
,
2647 const u8
*key
, const u8 hfunc
)
2649 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2650 struct hclge_dev
*hdev
= vport
->back
;
2654 /* Set the RSS Hash Key if specififed by the user */
2656 /* Update the shadow RSS key with user specified qids */
2657 memcpy(vport
->rss_hash_key
, key
, HCLGE_RSS_KEY_SIZE
);
2659 if (hfunc
== ETH_RSS_HASH_TOP
||
2660 hfunc
== ETH_RSS_HASH_NO_CHANGE
)
2661 hash_algo
= HCLGE_RSS_HASH_ALGO_TOEPLITZ
;
2664 ret
= hclge_set_rss_algo_key(hdev
, hash_algo
, key
);
2669 /* Update the shadow RSS table with user specified qids */
2670 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
2671 vport
->rss_indirection_tbl
[i
] = indir
[i
];
2673 /* Update the hardware */
2674 ret
= hclge_set_rss_indir_table(hdev
, indir
);
2678 static u8
hclge_get_rss_hash_bits(struct ethtool_rxnfc
*nfc
)
2680 u8 hash_sets
= nfc
->data
& RXH_L4_B_0_1
? HCLGE_S_PORT_BIT
: 0;
2682 if (nfc
->data
& RXH_L4_B_2_3
)
2683 hash_sets
|= HCLGE_D_PORT_BIT
;
2685 hash_sets
&= ~HCLGE_D_PORT_BIT
;
2687 if (nfc
->data
& RXH_IP_SRC
)
2688 hash_sets
|= HCLGE_S_IP_BIT
;
2690 hash_sets
&= ~HCLGE_S_IP_BIT
;
2692 if (nfc
->data
& RXH_IP_DST
)
2693 hash_sets
|= HCLGE_D_IP_BIT
;
2695 hash_sets
&= ~HCLGE_D_IP_BIT
;
2697 if (nfc
->flow_type
== SCTP_V4_FLOW
|| nfc
->flow_type
== SCTP_V6_FLOW
)
2698 hash_sets
|= HCLGE_V_TAG_BIT
;
2703 static int hclge_set_rss_tuple(struct hnae3_handle
*handle
,
2704 struct ethtool_rxnfc
*nfc
)
2706 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2707 struct hclge_dev
*hdev
= vport
->back
;
2708 struct hclge_rss_input_tuple_cmd
*req
;
2709 struct hclge_desc desc
;
2713 if (nfc
->data
& ~(RXH_IP_SRC
| RXH_IP_DST
|
2714 RXH_L4_B_0_1
| RXH_L4_B_2_3
))
2717 req
= (struct hclge_rss_input_tuple_cmd
*)desc
.data
;
2718 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_INPUT_TUPLE
, true);
2719 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2721 dev_err(&hdev
->pdev
->dev
,
2722 "Read rss tuple fail, status = %d\n", ret
);
2726 hclge_cmd_reuse_desc(&desc
, false);
2728 tuple_sets
= hclge_get_rss_hash_bits(nfc
);
2729 switch (nfc
->flow_type
) {
2731 req
->ipv4_tcp_en
= tuple_sets
;
2734 req
->ipv6_tcp_en
= tuple_sets
;
2737 req
->ipv4_udp_en
= tuple_sets
;
2740 req
->ipv6_udp_en
= tuple_sets
;
2743 req
->ipv4_sctp_en
= tuple_sets
;
2746 if ((nfc
->data
& RXH_L4_B_0_1
) ||
2747 (nfc
->data
& RXH_L4_B_2_3
))
2750 req
->ipv6_sctp_en
= tuple_sets
;
2753 req
->ipv4_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2756 req
->ipv6_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2762 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2764 dev_err(&hdev
->pdev
->dev
,
2765 "Set rss tuple fail, status = %d\n", ret
);
2770 static int hclge_get_rss_tuple(struct hnae3_handle
*handle
,
2771 struct ethtool_rxnfc
*nfc
)
2773 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2774 struct hclge_dev
*hdev
= vport
->back
;
2775 struct hclge_rss_input_tuple_cmd
*req
;
2776 struct hclge_desc desc
;
2782 req
= (struct hclge_rss_input_tuple_cmd
*)desc
.data
;
2783 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_INPUT_TUPLE
, true);
2784 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2786 dev_err(&hdev
->pdev
->dev
,
2787 "Read rss tuple fail, status = %d\n", ret
);
2791 switch (nfc
->flow_type
) {
2793 tuple_sets
= req
->ipv4_tcp_en
;
2796 tuple_sets
= req
->ipv4_udp_en
;
2799 tuple_sets
= req
->ipv6_tcp_en
;
2802 tuple_sets
= req
->ipv6_udp_en
;
2805 tuple_sets
= req
->ipv4_sctp_en
;
2808 tuple_sets
= req
->ipv6_sctp_en
;
2812 tuple_sets
= HCLGE_S_IP_BIT
| HCLGE_D_IP_BIT
;
2821 if (tuple_sets
& HCLGE_D_PORT_BIT
)
2822 nfc
->data
|= RXH_L4_B_2_3
;
2823 if (tuple_sets
& HCLGE_S_PORT_BIT
)
2824 nfc
->data
|= RXH_L4_B_0_1
;
2825 if (tuple_sets
& HCLGE_D_IP_BIT
)
2826 nfc
->data
|= RXH_IP_DST
;
2827 if (tuple_sets
& HCLGE_S_IP_BIT
)
2828 nfc
->data
|= RXH_IP_SRC
;
2833 static int hclge_get_tc_size(struct hnae3_handle
*handle
)
2835 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2836 struct hclge_dev
*hdev
= vport
->back
;
2838 return hdev
->rss_size_max
;
2841 int hclge_rss_init_hw(struct hclge_dev
*hdev
)
2843 const u8 hfunc
= HCLGE_RSS_HASH_ALGO_TOEPLITZ
;
2844 struct hclge_vport
*vport
= hdev
->vport
;
2845 u16 tc_offset
[HCLGE_MAX_TC_NUM
];
2846 u8 rss_key
[HCLGE_RSS_KEY_SIZE
];
2847 u16 tc_valid
[HCLGE_MAX_TC_NUM
];
2848 u16 tc_size
[HCLGE_MAX_TC_NUM
];
2849 u32
*rss_indir
= NULL
;
2850 u16 rss_size
= 0, roundup_size
;
2854 rss_indir
= kcalloc(HCLGE_RSS_IND_TBL_SIZE
, sizeof(u32
), GFP_KERNEL
);
2858 /* Get default RSS key */
2859 netdev_rss_key_fill(rss_key
, HCLGE_RSS_KEY_SIZE
);
2861 /* Initialize RSS indirect table for each vport */
2862 for (j
= 0; j
< hdev
->num_vmdq_vport
+ 1; j
++) {
2863 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++) {
2864 vport
[j
].rss_indirection_tbl
[i
] =
2865 i
% vport
[j
].alloc_rss_size
;
2867 /* vport 0 is for PF */
2871 rss_size
= vport
[j
].alloc_rss_size
;
2872 rss_indir
[i
] = vport
[j
].rss_indirection_tbl
[i
];
2875 ret
= hclge_set_rss_indir_table(hdev
, rss_indir
);
2880 ret
= hclge_set_rss_algo_key(hdev
, hfunc
, key
);
2884 ret
= hclge_set_rss_input_tuple(hdev
);
2888 /* Each TC have the same queue size, and tc_size set to hardware is
2889 * the log2 of roundup power of two of rss_size, the acutal queue
2890 * size is limited by indirection table.
2892 if (rss_size
> HCLGE_RSS_TC_SIZE_7
|| rss_size
== 0) {
2893 dev_err(&hdev
->pdev
->dev
,
2894 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
2900 roundup_size
= roundup_pow_of_two(rss_size
);
2901 roundup_size
= ilog2(roundup_size
);
2903 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
2906 if (!(hdev
->hw_tc_map
& BIT(i
)))
2910 tc_size
[i
] = roundup_size
;
2911 tc_offset
[i
] = rss_size
* i
;
2914 ret
= hclge_set_rss_tc_mode(hdev
, tc_valid
, tc_size
, tc_offset
);
2922 int hclge_map_vport_ring_to_vector(struct hclge_vport
*vport
, int vector_id
,
2923 struct hnae3_ring_chain_node
*ring_chain
)
2925 struct hclge_dev
*hdev
= vport
->back
;
2926 struct hclge_ctrl_vector_chain_cmd
*req
;
2927 struct hnae3_ring_chain_node
*node
;
2928 struct hclge_desc desc
;
2932 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_ADD_RING_TO_VECTOR
, false);
2934 req
= (struct hclge_ctrl_vector_chain_cmd
*)desc
.data
;
2935 req
->int_vector_id
= vector_id
;
2938 for (node
= ring_chain
; node
; node
= node
->next
) {
2939 u16 type_and_id
= 0;
2941 hnae_set_field(type_and_id
, HCLGE_INT_TYPE_M
, HCLGE_INT_TYPE_S
,
2942 hnae_get_bit(node
->flag
, HNAE3_RING_TYPE_B
));
2943 hnae_set_field(type_and_id
, HCLGE_TQP_ID_M
, HCLGE_TQP_ID_S
,
2945 hnae_set_field(type_and_id
, HCLGE_INT_GL_IDX_M
,
2947 hnae_get_bit(node
->flag
, HNAE3_RING_TYPE_B
));
2948 req
->tqp_type_and_id
[i
] = cpu_to_le16(type_and_id
);
2949 req
->vfid
= vport
->vport_id
;
2951 if (++i
>= HCLGE_VECTOR_ELEMENTS_PER_CMD
) {
2952 req
->int_cause_num
= HCLGE_VECTOR_ELEMENTS_PER_CMD
;
2954 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2956 dev_err(&hdev
->pdev
->dev
,
2957 "Map TQP fail, status is %d.\n",
2963 hclge_cmd_setup_basic_desc(&desc
,
2964 HCLGE_OPC_ADD_RING_TO_VECTOR
,
2966 req
->int_vector_id
= vector_id
;
2971 req
->int_cause_num
= i
;
2973 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2975 dev_err(&hdev
->pdev
->dev
,
2976 "Map TQP fail, status is %d.\n", ret
);
2984 static int hclge_map_handle_ring_to_vector(
2985 struct hnae3_handle
*handle
, int vector
,
2986 struct hnae3_ring_chain_node
*ring_chain
)
2988 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2989 struct hclge_dev
*hdev
= vport
->back
;
2992 vector_id
= hclge_get_vector_index(hdev
, vector
);
2993 if (vector_id
< 0) {
2994 dev_err(&hdev
->pdev
->dev
,
2995 "Get vector index fail. ret =%d\n", vector_id
);
2999 return hclge_map_vport_ring_to_vector(vport
, vector_id
, ring_chain
);
3002 static int hclge_unmap_ring_from_vector(
3003 struct hnae3_handle
*handle
, int vector
,
3004 struct hnae3_ring_chain_node
*ring_chain
)
3006 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3007 struct hclge_dev
*hdev
= vport
->back
;
3008 struct hclge_ctrl_vector_chain_cmd
*req
;
3009 struct hnae3_ring_chain_node
*node
;
3010 struct hclge_desc desc
;
3014 vector_id
= hclge_get_vector_index(hdev
, vector
);
3015 if (vector_id
< 0) {
3016 dev_err(&handle
->pdev
->dev
,
3017 "Get vector index fail. ret =%d\n", vector_id
);
3021 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_DEL_RING_TO_VECTOR
, false);
3023 req
= (struct hclge_ctrl_vector_chain_cmd
*)desc
.data
;
3024 req
->int_vector_id
= vector_id
;
3027 for (node
= ring_chain
; node
; node
= node
->next
) {
3028 u16 type_and_id
= 0;
3030 hnae_set_field(type_and_id
, HCLGE_INT_TYPE_M
, HCLGE_INT_TYPE_S
,
3031 hnae_get_bit(node
->flag
, HNAE3_RING_TYPE_B
));
3032 hnae_set_field(type_and_id
, HCLGE_TQP_ID_M
, HCLGE_TQP_ID_S
,
3034 hnae_set_field(type_and_id
, HCLGE_INT_GL_IDX_M
,
3036 hnae_get_bit(node
->flag
, HNAE3_RING_TYPE_B
));
3038 req
->tqp_type_and_id
[i
] = cpu_to_le16(type_and_id
);
3039 req
->vfid
= vport
->vport_id
;
3041 if (++i
>= HCLGE_VECTOR_ELEMENTS_PER_CMD
) {
3042 req
->int_cause_num
= HCLGE_VECTOR_ELEMENTS_PER_CMD
;
3044 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3046 dev_err(&hdev
->pdev
->dev
,
3047 "Unmap TQP fail, status is %d.\n",
3052 hclge_cmd_setup_basic_desc(&desc
,
3053 HCLGE_OPC_DEL_RING_TO_VECTOR
,
3055 req
->int_vector_id
= vector_id
;
3060 req
->int_cause_num
= i
;
3062 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3064 dev_err(&hdev
->pdev
->dev
,
3065 "Unmap TQP fail, status is %d.\n", ret
);
3073 int hclge_cmd_set_promisc_mode(struct hclge_dev
*hdev
,
3074 struct hclge_promisc_param
*param
)
3076 struct hclge_promisc_cfg_cmd
*req
;
3077 struct hclge_desc desc
;
3080 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_PROMISC_MODE
, false);
3082 req
= (struct hclge_promisc_cfg_cmd
*)desc
.data
;
3083 req
->vf_id
= param
->vf_id
;
3084 req
->flag
= (param
->enable
<< HCLGE_PROMISC_EN_B
);
3086 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3088 dev_err(&hdev
->pdev
->dev
,
3089 "Set promisc mode fail, status is %d.\n", ret
);
3095 void hclge_promisc_param_init(struct hclge_promisc_param
*param
, bool en_uc
,
3096 bool en_mc
, bool en_bc
, int vport_id
)
3101 memset(param
, 0, sizeof(struct hclge_promisc_param
));
3103 param
->enable
= HCLGE_PROMISC_EN_UC
;
3105 param
->enable
|= HCLGE_PROMISC_EN_MC
;
3107 param
->enable
|= HCLGE_PROMISC_EN_BC
;
3108 param
->vf_id
= vport_id
;
3111 static void hclge_set_promisc_mode(struct hnae3_handle
*handle
, u32 en
)
3113 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3114 struct hclge_dev
*hdev
= vport
->back
;
3115 struct hclge_promisc_param param
;
3117 hclge_promisc_param_init(¶m
, en
, en
, true, vport
->vport_id
);
3118 hclge_cmd_set_promisc_mode(hdev
, ¶m
);
3121 static void hclge_cfg_mac_mode(struct hclge_dev
*hdev
, bool enable
)
3123 struct hclge_desc desc
;
3124 struct hclge_config_mac_mode_cmd
*req
=
3125 (struct hclge_config_mac_mode_cmd
*)desc
.data
;
3129 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAC_MODE
, false);
3130 hnae_set_bit(loop_en
, HCLGE_MAC_TX_EN_B
, enable
);
3131 hnae_set_bit(loop_en
, HCLGE_MAC_RX_EN_B
, enable
);
3132 hnae_set_bit(loop_en
, HCLGE_MAC_PAD_TX_B
, enable
);
3133 hnae_set_bit(loop_en
, HCLGE_MAC_PAD_RX_B
, enable
);
3134 hnae_set_bit(loop_en
, HCLGE_MAC_1588_TX_B
, 0);
3135 hnae_set_bit(loop_en
, HCLGE_MAC_1588_RX_B
, 0);
3136 hnae_set_bit(loop_en
, HCLGE_MAC_APP_LP_B
, 0);
3137 hnae_set_bit(loop_en
, HCLGE_MAC_LINE_LP_B
, 0);
3138 hnae_set_bit(loop_en
, HCLGE_MAC_FCS_TX_B
, enable
);
3139 hnae_set_bit(loop_en
, HCLGE_MAC_RX_FCS_B
, enable
);
3140 hnae_set_bit(loop_en
, HCLGE_MAC_RX_FCS_STRIP_B
, enable
);
3141 hnae_set_bit(loop_en
, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B
, enable
);
3142 hnae_set_bit(loop_en
, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B
, enable
);
3143 hnae_set_bit(loop_en
, HCLGE_MAC_TX_UNDER_MIN_ERR_B
, enable
);
3144 req
->txrx_pad_fcs_loop_en
= cpu_to_le32(loop_en
);
3146 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3148 dev_err(&hdev
->pdev
->dev
,
3149 "mac enable fail, ret =%d.\n", ret
);
3152 static int hclge_tqp_enable(struct hclge_dev
*hdev
, int tqp_id
,
3153 int stream_id
, bool enable
)
3155 struct hclge_desc desc
;
3156 struct hclge_cfg_com_tqp_queue_cmd
*req
=
3157 (struct hclge_cfg_com_tqp_queue_cmd
*)desc
.data
;
3160 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_COM_TQP_QUEUE
, false);
3161 req
->tqp_id
= cpu_to_le16(tqp_id
& HCLGE_RING_ID_MASK
);
3162 req
->stream_id
= cpu_to_le16(stream_id
);
3163 req
->enable
|= enable
<< HCLGE_TQP_ENABLE_B
;
3165 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3167 dev_err(&hdev
->pdev
->dev
,
3168 "Tqp enable fail, status =%d.\n", ret
);
3172 static void hclge_reset_tqp_stats(struct hnae3_handle
*handle
)
3174 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3175 struct hnae3_queue
*queue
;
3176 struct hclge_tqp
*tqp
;
3179 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
3180 queue
= handle
->kinfo
.tqp
[i
];
3181 tqp
= container_of(queue
, struct hclge_tqp
, q
);
3182 memset(&tqp
->tqp_stats
, 0, sizeof(tqp
->tqp_stats
));
3186 static int hclge_ae_start(struct hnae3_handle
*handle
)
3188 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3189 struct hclge_dev
*hdev
= vport
->back
;
3190 int i
, queue_id
, ret
;
3192 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
3193 /* todo clear interrupt */
3195 queue_id
= hclge_get_queue_id(handle
->kinfo
.tqp
[i
]);
3197 dev_warn(&hdev
->pdev
->dev
,
3198 "Get invalid queue id, ignore it\n");
3202 hclge_tqp_enable(hdev
, queue_id
, 0, true);
3205 hclge_cfg_mac_mode(hdev
, true);
3206 clear_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
3207 mod_timer(&hdev
->service_timer
, jiffies
+ HZ
);
3209 ret
= hclge_mac_start_phy(hdev
);
3213 /* reset tqp stats */
3214 hclge_reset_tqp_stats(handle
);
3219 static void hclge_ae_stop(struct hnae3_handle
*handle
)
3221 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3222 struct hclge_dev
*hdev
= vport
->back
;
3225 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
3227 queue_id
= hclge_get_queue_id(handle
->kinfo
.tqp
[i
]);
3229 dev_warn(&hdev
->pdev
->dev
,
3230 "Get invalid queue id, ignore it\n");
3234 hclge_tqp_enable(hdev
, queue_id
, 0, false);
3237 hclge_cfg_mac_mode(hdev
, false);
3239 hclge_mac_stop_phy(hdev
);
3241 /* reset tqp stats */
3242 hclge_reset_tqp_stats(handle
);
3245 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport
*vport
,
3246 u16 cmdq_resp
, u8 resp_code
,
3247 enum hclge_mac_vlan_tbl_opcode op
)
3249 struct hclge_dev
*hdev
= vport
->back
;
3250 int return_status
= -EIO
;
3253 dev_err(&hdev
->pdev
->dev
,
3254 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
3259 if (op
== HCLGE_MAC_VLAN_ADD
) {
3260 if ((!resp_code
) || (resp_code
== 1)) {
3262 } else if (resp_code
== 2) {
3263 return_status
= -EIO
;
3264 dev_err(&hdev
->pdev
->dev
,
3265 "add mac addr failed for uc_overflow.\n");
3266 } else if (resp_code
== 3) {
3267 return_status
= -EIO
;
3268 dev_err(&hdev
->pdev
->dev
,
3269 "add mac addr failed for mc_overflow.\n");
3271 dev_err(&hdev
->pdev
->dev
,
3272 "add mac addr failed for undefined, code=%d.\n",
3275 } else if (op
== HCLGE_MAC_VLAN_REMOVE
) {
3278 } else if (resp_code
== 1) {
3279 return_status
= -EIO
;
3280 dev_dbg(&hdev
->pdev
->dev
,
3281 "remove mac addr failed for miss.\n");
3283 dev_err(&hdev
->pdev
->dev
,
3284 "remove mac addr failed for undefined, code=%d.\n",
3287 } else if (op
== HCLGE_MAC_VLAN_LKUP
) {
3290 } else if (resp_code
== 1) {
3291 return_status
= -EIO
;
3292 dev_dbg(&hdev
->pdev
->dev
,
3293 "lookup mac addr failed for miss.\n");
3295 dev_err(&hdev
->pdev
->dev
,
3296 "lookup mac addr failed for undefined, code=%d.\n",
3300 return_status
= -EIO
;
3301 dev_err(&hdev
->pdev
->dev
,
3302 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
3306 return return_status
;
3309 static int hclge_update_desc_vfid(struct hclge_desc
*desc
, int vfid
, bool clr
)
3314 if (vfid
> 255 || vfid
< 0)
3317 if (vfid
>= 0 && vfid
<= 191) {
3318 word_num
= vfid
/ 32;
3319 bit_num
= vfid
% 32;
3321 desc
[1].data
[word_num
] &= cpu_to_le32(~(1 << bit_num
));
3323 desc
[1].data
[word_num
] |= cpu_to_le32(1 << bit_num
);
3325 word_num
= (vfid
- 192) / 32;
3326 bit_num
= vfid
% 32;
3328 desc
[2].data
[word_num
] &= cpu_to_le32(~(1 << bit_num
));
3330 desc
[2].data
[word_num
] |= cpu_to_le32(1 << bit_num
);
3336 static bool hclge_is_all_function_id_zero(struct hclge_desc
*desc
)
3338 #define HCLGE_DESC_NUMBER 3
3339 #define HCLGE_FUNC_NUMBER_PER_DESC 6
3342 for (i
= 0; i
< HCLGE_DESC_NUMBER
; i
++)
3343 for (j
= 0; j
< HCLGE_FUNC_NUMBER_PER_DESC
; j
++)
3344 if (desc
[i
].data
[j
])
3350 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd
*new_req
,
3353 const unsigned char *mac_addr
= addr
;
3354 u32 high_val
= mac_addr
[2] << 16 | (mac_addr
[3] << 24) |
3355 (mac_addr
[0]) | (mac_addr
[1] << 8);
3356 u32 low_val
= mac_addr
[4] | (mac_addr
[5] << 8);
3358 new_req
->mac_addr_hi32
= cpu_to_le32(high_val
);
3359 new_req
->mac_addr_lo16
= cpu_to_le16(low_val
& 0xffff);
3362 static u16
hclge_get_mac_addr_to_mta_index(struct hclge_vport
*vport
,
3365 u16 high_val
= addr
[1] | (addr
[0] << 8);
3366 struct hclge_dev
*hdev
= vport
->back
;
3367 u32 rsh
= 4 - hdev
->mta_mac_sel_type
;
3368 u16 ret_val
= (high_val
>> rsh
) & 0xfff;
3373 static int hclge_set_mta_filter_mode(struct hclge_dev
*hdev
,
3374 enum hclge_mta_dmac_sel_type mta_mac_sel
,
3377 struct hclge_mta_filter_mode_cmd
*req
;
3378 struct hclge_desc desc
;
3381 req
= (struct hclge_mta_filter_mode_cmd
*)desc
.data
;
3382 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MTA_MAC_MODE_CFG
, false);
3384 hnae_set_bit(req
->dmac_sel_en
, HCLGE_CFG_MTA_MAC_EN_B
,
3386 hnae_set_field(req
->dmac_sel_en
, HCLGE_CFG_MTA_MAC_SEL_M
,
3387 HCLGE_CFG_MTA_MAC_SEL_S
, mta_mac_sel
);
3389 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3391 dev_err(&hdev
->pdev
->dev
,
3392 "Config mat filter mode failed for cmd_send, ret =%d.\n",
3400 int hclge_cfg_func_mta_filter(struct hclge_dev
*hdev
,
3404 struct hclge_cfg_func_mta_filter_cmd
*req
;
3405 struct hclge_desc desc
;
3408 req
= (struct hclge_cfg_func_mta_filter_cmd
*)desc
.data
;
3409 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MTA_MAC_FUNC_CFG
, false);
3411 hnae_set_bit(req
->accept
, HCLGE_CFG_FUNC_MTA_ACCEPT_B
,
3413 req
->function_id
= func_id
;
3415 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3417 dev_err(&hdev
->pdev
->dev
,
3418 "Config func_id enable failed for cmd_send, ret =%d.\n",
3426 static int hclge_set_mta_table_item(struct hclge_vport
*vport
,
3430 struct hclge_dev
*hdev
= vport
->back
;
3431 struct hclge_cfg_func_mta_item_cmd
*req
;
3432 struct hclge_desc desc
;
3436 req
= (struct hclge_cfg_func_mta_item_cmd
*)desc
.data
;
3437 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MTA_TBL_ITEM_CFG
, false);
3438 hnae_set_bit(req
->accept
, HCLGE_CFG_MTA_ITEM_ACCEPT_B
, enable
);
3440 hnae_set_field(item_idx
, HCLGE_CFG_MTA_ITEM_IDX_M
,
3441 HCLGE_CFG_MTA_ITEM_IDX_S
, idx
);
3442 req
->item_idx
= cpu_to_le16(item_idx
);
3444 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3446 dev_err(&hdev
->pdev
->dev
,
3447 "Config mta table item failed for cmd_send, ret =%d.\n",
3455 static int hclge_remove_mac_vlan_tbl(struct hclge_vport
*vport
,
3456 struct hclge_mac_vlan_tbl_entry_cmd
*req
)
3458 struct hclge_dev
*hdev
= vport
->back
;
3459 struct hclge_desc desc
;
3464 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_REMOVE
, false);
3466 memcpy(desc
.data
, req
, sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
3468 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3470 dev_err(&hdev
->pdev
->dev
,
3471 "del mac addr failed for cmd_send, ret =%d.\n",
3475 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
3476 retval
= le16_to_cpu(desc
.retval
);
3478 return hclge_get_mac_vlan_cmd_status(vport
, retval
, resp_code
,
3479 HCLGE_MAC_VLAN_REMOVE
);
3482 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport
*vport
,
3483 struct hclge_mac_vlan_tbl_entry_cmd
*req
,
3484 struct hclge_desc
*desc
,
3487 struct hclge_dev
*hdev
= vport
->back
;
3492 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_MAC_VLAN_ADD
, true);
3494 desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3495 memcpy(desc
[0].data
,
3497 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
3498 hclge_cmd_setup_basic_desc(&desc
[1],
3499 HCLGE_OPC_MAC_VLAN_ADD
,
3501 desc
[1].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3502 hclge_cmd_setup_basic_desc(&desc
[2],
3503 HCLGE_OPC_MAC_VLAN_ADD
,
3505 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 3);
3507 memcpy(desc
[0].data
,
3509 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
3510 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
3513 dev_err(&hdev
->pdev
->dev
,
3514 "lookup mac addr failed for cmd_send, ret =%d.\n",
3518 resp_code
= (le32_to_cpu(desc
[0].data
[0]) >> 8) & 0xff;
3519 retval
= le16_to_cpu(desc
[0].retval
);
3521 return hclge_get_mac_vlan_cmd_status(vport
, retval
, resp_code
,
3522 HCLGE_MAC_VLAN_LKUP
);
3525 static int hclge_add_mac_vlan_tbl(struct hclge_vport
*vport
,
3526 struct hclge_mac_vlan_tbl_entry_cmd
*req
,
3527 struct hclge_desc
*mc_desc
)
3529 struct hclge_dev
*hdev
= vport
->back
;
3536 struct hclge_desc desc
;
3538 hclge_cmd_setup_basic_desc(&desc
,
3539 HCLGE_OPC_MAC_VLAN_ADD
,
3541 memcpy(desc
.data
, req
,
3542 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
3543 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3544 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
3545 retval
= le16_to_cpu(desc
.retval
);
3547 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, retval
,
3549 HCLGE_MAC_VLAN_ADD
);
3551 mc_desc
[0].flag
&= cpu_to_le16(~HCLGE_CMD_FLAG_WR
);
3552 mc_desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3553 mc_desc
[1].flag
&= cpu_to_le16(~HCLGE_CMD_FLAG_WR
);
3554 mc_desc
[1].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3555 mc_desc
[2].flag
&= cpu_to_le16(~HCLGE_CMD_FLAG_WR
);
3556 mc_desc
[2].flag
&= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT
);
3557 memcpy(mc_desc
[0].data
, req
,
3558 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
3559 ret
= hclge_cmd_send(&hdev
->hw
, mc_desc
, 3);
3560 resp_code
= (le32_to_cpu(mc_desc
[0].data
[0]) >> 8) & 0xff;
3561 retval
= le16_to_cpu(mc_desc
[0].retval
);
3563 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, retval
,
3565 HCLGE_MAC_VLAN_ADD
);
3569 dev_err(&hdev
->pdev
->dev
,
3570 "add mac addr failed for cmd_send, ret =%d.\n",
3578 static int hclge_add_uc_addr(struct hnae3_handle
*handle
,
3579 const unsigned char *addr
)
3581 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3583 return hclge_add_uc_addr_common(vport
, addr
);
3586 int hclge_add_uc_addr_common(struct hclge_vport
*vport
,
3587 const unsigned char *addr
)
3589 struct hclge_dev
*hdev
= vport
->back
;
3590 struct hclge_mac_vlan_tbl_entry_cmd req
;
3591 enum hclge_cmd_status status
;
3592 u16 egress_port
= 0;
3594 /* mac addr check */
3595 if (is_zero_ether_addr(addr
) ||
3596 is_broadcast_ether_addr(addr
) ||
3597 is_multicast_ether_addr(addr
)) {
3598 dev_err(&hdev
->pdev
->dev
,
3599 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
3601 is_zero_ether_addr(addr
),
3602 is_broadcast_ether_addr(addr
),
3603 is_multicast_ether_addr(addr
));
3607 memset(&req
, 0, sizeof(req
));
3608 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
3609 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
3610 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 0);
3611 hnae_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
3613 hnae_set_bit(egress_port
, HCLGE_MAC_EPORT_SW_EN_B
, 0);
3614 hnae_set_bit(egress_port
, HCLGE_MAC_EPORT_TYPE_B
, 0);
3615 hnae_set_field(egress_port
, HCLGE_MAC_EPORT_VFID_M
,
3616 HCLGE_MAC_EPORT_VFID_S
, vport
->vport_id
);
3617 hnae_set_field(egress_port
, HCLGE_MAC_EPORT_PFID_M
,
3618 HCLGE_MAC_EPORT_PFID_S
, 0);
3620 req
.egress_port
= cpu_to_le16(egress_port
);
3622 hclge_prepare_mac_addr(&req
, addr
);
3624 status
= hclge_add_mac_vlan_tbl(vport
, &req
, NULL
);
3629 static int hclge_rm_uc_addr(struct hnae3_handle
*handle
,
3630 const unsigned char *addr
)
3632 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3634 return hclge_rm_uc_addr_common(vport
, addr
);
3637 int hclge_rm_uc_addr_common(struct hclge_vport
*vport
,
3638 const unsigned char *addr
)
3640 struct hclge_dev
*hdev
= vport
->back
;
3641 struct hclge_mac_vlan_tbl_entry_cmd req
;
3642 enum hclge_cmd_status status
;
3644 /* mac addr check */
3645 if (is_zero_ether_addr(addr
) ||
3646 is_broadcast_ether_addr(addr
) ||
3647 is_multicast_ether_addr(addr
)) {
3648 dev_dbg(&hdev
->pdev
->dev
,
3649 "Remove mac err! invalid mac:%pM.\n",
3654 memset(&req
, 0, sizeof(req
));
3655 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
3656 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
3657 hclge_prepare_mac_addr(&req
, addr
);
3658 status
= hclge_remove_mac_vlan_tbl(vport
, &req
);
3663 static int hclge_add_mc_addr(struct hnae3_handle
*handle
,
3664 const unsigned char *addr
)
3666 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3668 return hclge_add_mc_addr_common(vport
, addr
);
3671 int hclge_add_mc_addr_common(struct hclge_vport
*vport
,
3672 const unsigned char *addr
)
3674 struct hclge_dev
*hdev
= vport
->back
;
3675 struct hclge_mac_vlan_tbl_entry_cmd req
;
3676 struct hclge_desc desc
[3];
3680 /* mac addr check */
3681 if (!is_multicast_ether_addr(addr
)) {
3682 dev_err(&hdev
->pdev
->dev
,
3683 "Add mc mac err! invalid mac:%pM.\n",
3687 memset(&req
, 0, sizeof(req
));
3688 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
3689 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
3690 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 1);
3691 hnae_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
3692 hclge_prepare_mac_addr(&req
, addr
);
3693 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
3695 /* This mac addr exist, update VFID for it */
3696 hclge_update_desc_vfid(desc
, vport
->vport_id
, false);
3697 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
3699 /* This mac addr do not exist, add new entry for it */
3700 memset(desc
[0].data
, 0, sizeof(desc
[0].data
));
3701 memset(desc
[1].data
, 0, sizeof(desc
[0].data
));
3702 memset(desc
[2].data
, 0, sizeof(desc
[0].data
));
3703 hclge_update_desc_vfid(desc
, vport
->vport_id
, false);
3704 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
3707 /* Set MTA table for this MAC address */
3708 tbl_idx
= hclge_get_mac_addr_to_mta_index(vport
, addr
);
3709 status
= hclge_set_mta_table_item(vport
, tbl_idx
, true);
3714 static int hclge_rm_mc_addr(struct hnae3_handle
*handle
,
3715 const unsigned char *addr
)
3717 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3719 return hclge_rm_mc_addr_common(vport
, addr
);
3722 int hclge_rm_mc_addr_common(struct hclge_vport
*vport
,
3723 const unsigned char *addr
)
3725 struct hclge_dev
*hdev
= vport
->back
;
3726 struct hclge_mac_vlan_tbl_entry_cmd req
;
3727 enum hclge_cmd_status status
;
3728 struct hclge_desc desc
[3];
3731 /* mac addr check */
3732 if (!is_multicast_ether_addr(addr
)) {
3733 dev_dbg(&hdev
->pdev
->dev
,
3734 "Remove mc mac err! invalid mac:%pM.\n",
3739 memset(&req
, 0, sizeof(req
));
3740 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
3741 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
3742 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 1);
3743 hnae_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
3744 hclge_prepare_mac_addr(&req
, addr
);
3745 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
3747 /* This mac addr exist, remove this handle's VFID for it */
3748 hclge_update_desc_vfid(desc
, vport
->vport_id
, true);
3750 if (hclge_is_all_function_id_zero(desc
))
3751 /* All the vfid is zero, so need to delete this entry */
3752 status
= hclge_remove_mac_vlan_tbl(vport
, &req
);
3754 /* Not all the vfid is zero, update the vfid */
3755 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
3758 /* This mac addr do not exist, can't delete it */
3759 dev_err(&hdev
->pdev
->dev
,
3760 "Rm multicast mac addr failed, ret = %d.\n",
3765 /* Set MTB table for this MAC address */
3766 tbl_idx
= hclge_get_mac_addr_to_mta_index(vport
, addr
);
3767 status
= hclge_set_mta_table_item(vport
, tbl_idx
, false);
3772 static void hclge_get_mac_addr(struct hnae3_handle
*handle
, u8
*p
)
3774 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3775 struct hclge_dev
*hdev
= vport
->back
;
3777 ether_addr_copy(p
, hdev
->hw
.mac
.mac_addr
);
3780 static int hclge_set_mac_addr(struct hnae3_handle
*handle
, void *p
)
3782 const unsigned char *new_addr
= (const unsigned char *)p
;
3783 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3784 struct hclge_dev
*hdev
= vport
->back
;
3786 /* mac addr check */
3787 if (is_zero_ether_addr(new_addr
) ||
3788 is_broadcast_ether_addr(new_addr
) ||
3789 is_multicast_ether_addr(new_addr
)) {
3790 dev_err(&hdev
->pdev
->dev
,
3791 "Change uc mac err! invalid mac:%p.\n",
3796 hclge_rm_uc_addr(handle
, hdev
->hw
.mac
.mac_addr
);
3798 if (!hclge_add_uc_addr(handle
, new_addr
)) {
3799 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, new_addr
);
3806 static int hclge_set_vlan_filter_ctrl(struct hclge_dev
*hdev
, u8 vlan_type
,
3809 struct hclge_vlan_filter_ctrl_cmd
*req
;
3810 struct hclge_desc desc
;
3813 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_CTRL
, false);
3815 req
= (struct hclge_vlan_filter_ctrl_cmd
*)desc
.data
;
3816 req
->vlan_type
= vlan_type
;
3817 req
->vlan_fe
= filter_en
;
3819 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3821 dev_err(&hdev
->pdev
->dev
, "set vlan filter fail, ret =%d.\n",
3829 int hclge_set_vf_vlan_common(struct hclge_dev
*hdev
, int vfid
,
3830 bool is_kill
, u16 vlan
, u8 qos
, __be16 proto
)
3832 #define HCLGE_MAX_VF_BYTES 16
3833 struct hclge_vlan_filter_vf_cfg_cmd
*req0
;
3834 struct hclge_vlan_filter_vf_cfg_cmd
*req1
;
3835 struct hclge_desc desc
[2];
3840 hclge_cmd_setup_basic_desc(&desc
[0],
3841 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
3842 hclge_cmd_setup_basic_desc(&desc
[1],
3843 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
3845 desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3847 vf_byte_off
= vfid
/ 8;
3848 vf_byte_val
= 1 << (vfid
% 8);
3850 req0
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[0].data
;
3851 req1
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[1].data
;
3853 req0
->vlan_id
= cpu_to_le16(vlan
);
3854 req0
->vlan_cfg
= is_kill
;
3856 if (vf_byte_off
< HCLGE_MAX_VF_BYTES
)
3857 req0
->vf_bitmap
[vf_byte_off
] = vf_byte_val
;
3859 req1
->vf_bitmap
[vf_byte_off
- HCLGE_MAX_VF_BYTES
] = vf_byte_val
;
3861 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
3863 dev_err(&hdev
->pdev
->dev
,
3864 "Send vf vlan command fail, ret =%d.\n",
3870 if (!req0
->resp_code
|| req0
->resp_code
== 1)
3873 dev_err(&hdev
->pdev
->dev
,
3874 "Add vf vlan filter fail, ret =%d.\n",
3877 if (!req0
->resp_code
)
3880 dev_err(&hdev
->pdev
->dev
,
3881 "Kill vf vlan filter fail, ret =%d.\n",
3888 static int hclge_set_port_vlan_filter(struct hnae3_handle
*handle
,
3889 __be16 proto
, u16 vlan_id
,
3892 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3893 struct hclge_dev
*hdev
= vport
->back
;
3894 struct hclge_vlan_filter_pf_cfg_cmd
*req
;
3895 struct hclge_desc desc
;
3896 u8 vlan_offset_byte_val
;
3897 u8 vlan_offset_byte
;
3901 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_PF_CFG
, false);
3903 vlan_offset_160
= vlan_id
/ 160;
3904 vlan_offset_byte
= (vlan_id
% 160) / 8;
3905 vlan_offset_byte_val
= 1 << (vlan_id
% 8);
3907 req
= (struct hclge_vlan_filter_pf_cfg_cmd
*)desc
.data
;
3908 req
->vlan_offset
= vlan_offset_160
;
3909 req
->vlan_cfg
= is_kill
;
3910 req
->vlan_offset_bitmap
[vlan_offset_byte
] = vlan_offset_byte_val
;
3912 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3914 dev_err(&hdev
->pdev
->dev
,
3915 "port vlan command, send fail, ret =%d.\n",
3920 ret
= hclge_set_vf_vlan_common(hdev
, 0, is_kill
, vlan_id
, 0, proto
);
3922 dev_err(&hdev
->pdev
->dev
,
3923 "Set pf vlan filter config fail, ret =%d.\n",
3931 static int hclge_set_vf_vlan_filter(struct hnae3_handle
*handle
, int vfid
,
3932 u16 vlan
, u8 qos
, __be16 proto
)
3934 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3935 struct hclge_dev
*hdev
= vport
->back
;
3937 if ((vfid
>= hdev
->num_alloc_vfs
) || (vlan
> 4095) || (qos
> 7))
3939 if (proto
!= htons(ETH_P_8021Q
))
3940 return -EPROTONOSUPPORT
;
3942 return hclge_set_vf_vlan_common(hdev
, vfid
, false, vlan
, qos
, proto
);
3945 static int hclge_init_vlan_config(struct hclge_dev
*hdev
)
3947 #define HCLGE_VLAN_TYPE_VF_TABLE 0
3948 #define HCLGE_VLAN_TYPE_PORT_TABLE 1
3949 struct hnae3_handle
*handle
;
3952 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_VLAN_TYPE_VF_TABLE
,
3957 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_VLAN_TYPE_PORT_TABLE
,
3962 handle
= &hdev
->vport
[0].nic
;
3963 return hclge_set_port_vlan_filter(handle
, htons(ETH_P_8021Q
), 0, false);
3966 static int hclge_set_mtu(struct hnae3_handle
*handle
, int new_mtu
)
3968 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3969 struct hclge_config_max_frm_size_cmd
*req
;
3970 struct hclge_dev
*hdev
= vport
->back
;
3971 struct hclge_desc desc
;
3974 if ((new_mtu
< HCLGE_MAC_MIN_MTU
) || (new_mtu
> HCLGE_MAC_MAX_MTU
))
3977 hdev
->mps
= new_mtu
;
3978 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAX_FRM_SIZE
, false);
3980 req
= (struct hclge_config_max_frm_size_cmd
*)desc
.data
;
3981 req
->max_frm_size
= cpu_to_le16(new_mtu
);
3983 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3985 dev_err(&hdev
->pdev
->dev
, "set mtu fail, ret =%d.\n", ret
);
3992 static int hclge_send_reset_tqp_cmd(struct hclge_dev
*hdev
, u16 queue_id
,
3995 struct hclge_reset_tqp_queue_cmd
*req
;
3996 struct hclge_desc desc
;
3999 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, false);
4001 req
= (struct hclge_reset_tqp_queue_cmd
*)desc
.data
;
4002 req
->tqp_id
= cpu_to_le16(queue_id
& HCLGE_RING_ID_MASK
);
4003 hnae_set_bit(req
->reset_req
, HCLGE_TQP_RESET_B
, enable
);
4005 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4007 dev_err(&hdev
->pdev
->dev
,
4008 "Send tqp reset cmd error, status =%d\n", ret
);
4015 static int hclge_get_reset_status(struct hclge_dev
*hdev
, u16 queue_id
)
4017 struct hclge_reset_tqp_queue_cmd
*req
;
4018 struct hclge_desc desc
;
4021 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, true);
4023 req
= (struct hclge_reset_tqp_queue_cmd
*)desc
.data
;
4024 req
->tqp_id
= cpu_to_le16(queue_id
& HCLGE_RING_ID_MASK
);
4026 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4028 dev_err(&hdev
->pdev
->dev
,
4029 "Get reset status error, status =%d\n", ret
);
4033 return hnae_get_bit(req
->ready_to_reset
, HCLGE_TQP_RESET_B
);
4036 static void hclge_reset_tqp(struct hnae3_handle
*handle
, u16 queue_id
)
4038 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4039 struct hclge_dev
*hdev
= vport
->back
;
4040 int reset_try_times
= 0;
4044 ret
= hclge_tqp_enable(hdev
, queue_id
, 0, false);
4046 dev_warn(&hdev
->pdev
->dev
, "Disable tqp fail, ret = %d\n", ret
);
4050 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_id
, true);
4052 dev_warn(&hdev
->pdev
->dev
,
4053 "Send reset tqp cmd fail, ret = %d\n", ret
);
4057 reset_try_times
= 0;
4058 while (reset_try_times
++ < HCLGE_TQP_RESET_TRY_TIMES
) {
4059 /* Wait for tqp hw reset */
4061 reset_status
= hclge_get_reset_status(hdev
, queue_id
);
4066 if (reset_try_times
>= HCLGE_TQP_RESET_TRY_TIMES
) {
4067 dev_warn(&hdev
->pdev
->dev
, "Reset TQP fail\n");
4071 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_id
, false);
4073 dev_warn(&hdev
->pdev
->dev
,
4074 "Deassert the soft reset fail, ret = %d\n", ret
);
4079 static u32
hclge_get_fw_version(struct hnae3_handle
*handle
)
4081 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4082 struct hclge_dev
*hdev
= vport
->back
;
4084 return hdev
->fw_version
;
4087 static void hclge_get_pauseparam(struct hnae3_handle
*handle
, u32
*auto_neg
,
4088 u32
*rx_en
, u32
*tx_en
)
4090 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4091 struct hclge_dev
*hdev
= vport
->back
;
4093 *auto_neg
= hclge_get_autoneg(handle
);
4095 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
) {
4101 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_RX_PAUSE
) {
4104 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_TX_PAUSE
) {
4107 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_FULL
) {
4116 static void hclge_get_ksettings_an_result(struct hnae3_handle
*handle
,
4117 u8
*auto_neg
, u32
*speed
, u8
*duplex
)
4119 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4120 struct hclge_dev
*hdev
= vport
->back
;
4123 *speed
= hdev
->hw
.mac
.speed
;
4125 *duplex
= hdev
->hw
.mac
.duplex
;
4127 *auto_neg
= hdev
->hw
.mac
.autoneg
;
4130 static void hclge_get_media_type(struct hnae3_handle
*handle
, u8
*media_type
)
4132 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4133 struct hclge_dev
*hdev
= vport
->back
;
4136 *media_type
= hdev
->hw
.mac
.media_type
;
4139 static void hclge_get_mdix_mode(struct hnae3_handle
*handle
,
4140 u8
*tp_mdix_ctrl
, u8
*tp_mdix
)
4142 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4143 struct hclge_dev
*hdev
= vport
->back
;
4144 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
4145 int mdix_ctrl
, mdix
, retval
, is_resolved
;
4148 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
4149 *tp_mdix
= ETH_TP_MDI_INVALID
;
4153 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_MDIX
);
4155 retval
= phy_read(phydev
, HCLGE_PHY_CSC_REG
);
4156 mdix_ctrl
= hnae_get_field(retval
, HCLGE_PHY_MDIX_CTRL_M
,
4157 HCLGE_PHY_MDIX_CTRL_S
);
4159 retval
= phy_read(phydev
, HCLGE_PHY_CSS_REG
);
4160 mdix
= hnae_get_bit(retval
, HCLGE_PHY_MDIX_STATUS_B
);
4161 is_resolved
= hnae_get_bit(retval
, HCLGE_PHY_SPEED_DUP_RESOLVE_B
);
4163 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_COPPER
);
4165 switch (mdix_ctrl
) {
4167 *tp_mdix_ctrl
= ETH_TP_MDI
;
4170 *tp_mdix_ctrl
= ETH_TP_MDI_X
;
4173 *tp_mdix_ctrl
= ETH_TP_MDI_AUTO
;
4176 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
4181 *tp_mdix
= ETH_TP_MDI_INVALID
;
4183 *tp_mdix
= ETH_TP_MDI_X
;
4185 *tp_mdix
= ETH_TP_MDI
;
4188 static int hclge_init_client_instance(struct hnae3_client
*client
,
4189 struct hnae3_ae_dev
*ae_dev
)
4191 struct hclge_dev
*hdev
= ae_dev
->priv
;
4192 struct hclge_vport
*vport
;
4195 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
4196 vport
= &hdev
->vport
[i
];
4198 switch (client
->type
) {
4199 case HNAE3_CLIENT_KNIC
:
4201 hdev
->nic_client
= client
;
4202 vport
->nic
.client
= client
;
4203 ret
= client
->ops
->init_instance(&vport
->nic
);
4207 if (hdev
->roce_client
&&
4208 hnae3_dev_roce_supported(hdev
)) {
4209 struct hnae3_client
*rc
= hdev
->roce_client
;
4211 ret
= hclge_init_roce_base_info(vport
);
4215 ret
= rc
->ops
->init_instance(&vport
->roce
);
4221 case HNAE3_CLIENT_UNIC
:
4222 hdev
->nic_client
= client
;
4223 vport
->nic
.client
= client
;
4225 ret
= client
->ops
->init_instance(&vport
->nic
);
4230 case HNAE3_CLIENT_ROCE
:
4231 if (hnae3_dev_roce_supported(hdev
)) {
4232 hdev
->roce_client
= client
;
4233 vport
->roce
.client
= client
;
4236 if (hdev
->roce_client
) {
4237 ret
= hclge_init_roce_base_info(vport
);
4241 ret
= client
->ops
->init_instance(&vport
->roce
);
4253 static void hclge_uninit_client_instance(struct hnae3_client
*client
,
4254 struct hnae3_ae_dev
*ae_dev
)
4256 struct hclge_dev
*hdev
= ae_dev
->priv
;
4257 struct hclge_vport
*vport
;
4260 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
4261 vport
= &hdev
->vport
[i
];
4262 if (hdev
->roce_client
)
4263 hdev
->roce_client
->ops
->uninit_instance(&vport
->roce
,
4265 if (client
->type
== HNAE3_CLIENT_ROCE
)
4267 if (client
->ops
->uninit_instance
)
4268 client
->ops
->uninit_instance(&vport
->nic
, 0);
4272 static int hclge_pci_init(struct hclge_dev
*hdev
)
4274 struct pci_dev
*pdev
= hdev
->pdev
;
4275 struct hclge_hw
*hw
;
4278 ret
= pci_enable_device(pdev
);
4280 dev_err(&pdev
->dev
, "failed to enable PCI device\n");
4281 goto err_no_drvdata
;
4284 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
4286 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
4289 "can't set consistent PCI DMA");
4290 goto err_disable_device
;
4292 dev_warn(&pdev
->dev
, "set DMA mask to 32 bits\n");
4295 ret
= pci_request_regions(pdev
, HCLGE_DRIVER_NAME
);
4297 dev_err(&pdev
->dev
, "PCI request regions failed %d\n", ret
);
4298 goto err_disable_device
;
4301 pci_set_master(pdev
);
4304 hw
->io_base
= pcim_iomap(pdev
, 2, 0);
4306 dev_err(&pdev
->dev
, "Can't map configuration register space\n");
4308 goto err_clr_master
;
4313 pci_clear_master(pdev
);
4314 pci_release_regions(pdev
);
4316 pci_disable_device(pdev
);
4318 pci_set_drvdata(pdev
, NULL
);
4323 static void hclge_pci_uninit(struct hclge_dev
*hdev
)
4325 struct pci_dev
*pdev
= hdev
->pdev
;
4327 if (hdev
->flag
& HCLGE_FLAG_USE_MSIX
) {
4328 pci_disable_msix(pdev
);
4329 devm_kfree(&pdev
->dev
, hdev
->msix_entries
);
4330 hdev
->msix_entries
= NULL
;
4332 pci_disable_msi(pdev
);
4335 pci_clear_master(pdev
);
4336 pci_release_mem_regions(pdev
);
4337 pci_disable_device(pdev
);
4340 static int hclge_init_ae_dev(struct hnae3_ae_dev
*ae_dev
)
4342 struct pci_dev
*pdev
= ae_dev
->pdev
;
4343 struct hclge_dev
*hdev
;
4346 hdev
= devm_kzalloc(&pdev
->dev
, sizeof(*hdev
), GFP_KERNEL
);
4352 hdev
->flag
|= HCLGE_FLAG_USE_MSIX
;
4354 hdev
->ae_dev
= ae_dev
;
4355 ae_dev
->priv
= hdev
;
4357 ret
= hclge_pci_init(hdev
);
4359 dev_err(&pdev
->dev
, "PCI init failed\n");
4363 /* Command queue initialize */
4364 ret
= hclge_cmd_init(hdev
);
4368 ret
= hclge_get_cap(hdev
);
4370 dev_err(&pdev
->dev
, "get hw capability error, ret = %d.\n",
4375 ret
= hclge_configure(hdev
);
4377 dev_err(&pdev
->dev
, "Configure dev error, ret = %d.\n", ret
);
4381 if (hdev
->flag
& HCLGE_FLAG_USE_MSIX
)
4382 ret
= hclge_init_msix(hdev
);
4384 ret
= hclge_init_msi(hdev
);
4386 dev_err(&pdev
->dev
, "Init msix/msi error, ret = %d.\n", ret
);
4390 ret
= hclge_alloc_tqps(hdev
);
4392 dev_err(&pdev
->dev
, "Allocate TQPs error, ret = %d.\n", ret
);
4396 ret
= hclge_alloc_vport(hdev
);
4398 dev_err(&pdev
->dev
, "Allocate vport error, ret = %d.\n", ret
);
4402 ret
= hclge_mac_init(hdev
);
4404 dev_err(&pdev
->dev
, "Mac init error, ret = %d\n", ret
);
4407 ret
= hclge_buffer_alloc(hdev
);
4409 dev_err(&pdev
->dev
, "Buffer allocate fail, ret =%d\n", ret
);
4413 ret
= hclge_config_tso(hdev
, HCLGE_TSO_MSS_MIN
, HCLGE_TSO_MSS_MAX
);
4415 dev_err(&pdev
->dev
, "Enable tso fail, ret =%d\n", ret
);
4419 ret
= hclge_init_vlan_config(hdev
);
4421 dev_err(&pdev
->dev
, "VLAN init fail, ret =%d\n", ret
);
4425 ret
= hclge_tm_schd_init(hdev
);
4427 dev_err(&pdev
->dev
, "tm schd init fail, ret =%d\n", ret
);
4431 ret
= hclge_rss_init_hw(hdev
);
4433 dev_err(&pdev
->dev
, "Rss init fail, ret =%d\n", ret
);
4437 hclge_dcb_ops_set(hdev
);
4439 timer_setup(&hdev
->service_timer
, hclge_service_timer
, 0);
4440 INIT_WORK(&hdev
->service_task
, hclge_service_task
);
4442 set_bit(HCLGE_STATE_SERVICE_INITED
, &hdev
->state
);
4443 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
4445 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME
);
4449 pci_release_regions(pdev
);
4451 pci_set_drvdata(pdev
, NULL
);
4456 static void hclge_uninit_ae_dev(struct hnae3_ae_dev
*ae_dev
)
4458 struct hclge_dev
*hdev
= ae_dev
->priv
;
4459 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
4461 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
4463 if (IS_ENABLED(CONFIG_PCI_IOV
))
4464 hclge_disable_sriov(hdev
);
4466 if (hdev
->service_timer
.function
)
4467 del_timer_sync(&hdev
->service_timer
);
4468 if (hdev
->service_task
.func
)
4469 cancel_work_sync(&hdev
->service_task
);
4472 mdiobus_unregister(mac
->mdio_bus
);
4474 hclge_destroy_cmd_queue(&hdev
->hw
);
4475 hclge_pci_uninit(hdev
);
4476 ae_dev
->priv
= NULL
;
4479 static const struct hnae3_ae_ops hclge_ops
= {
4480 .init_ae_dev
= hclge_init_ae_dev
,
4481 .uninit_ae_dev
= hclge_uninit_ae_dev
,
4482 .init_client_instance
= hclge_init_client_instance
,
4483 .uninit_client_instance
= hclge_uninit_client_instance
,
4484 .map_ring_to_vector
= hclge_map_handle_ring_to_vector
,
4485 .unmap_ring_from_vector
= hclge_unmap_ring_from_vector
,
4486 .get_vector
= hclge_get_vector
,
4487 .set_promisc_mode
= hclge_set_promisc_mode
,
4488 .start
= hclge_ae_start
,
4489 .stop
= hclge_ae_stop
,
4490 .get_status
= hclge_get_status
,
4491 .get_ksettings_an_result
= hclge_get_ksettings_an_result
,
4492 .update_speed_duplex_h
= hclge_update_speed_duplex_h
,
4493 .cfg_mac_speed_dup_h
= hclge_cfg_mac_speed_dup_h
,
4494 .get_media_type
= hclge_get_media_type
,
4495 .get_rss_key_size
= hclge_get_rss_key_size
,
4496 .get_rss_indir_size
= hclge_get_rss_indir_size
,
4497 .get_rss
= hclge_get_rss
,
4498 .set_rss
= hclge_set_rss
,
4499 .set_rss_tuple
= hclge_set_rss_tuple
,
4500 .get_rss_tuple
= hclge_get_rss_tuple
,
4501 .get_tc_size
= hclge_get_tc_size
,
4502 .get_mac_addr
= hclge_get_mac_addr
,
4503 .set_mac_addr
= hclge_set_mac_addr
,
4504 .add_uc_addr
= hclge_add_uc_addr
,
4505 .rm_uc_addr
= hclge_rm_uc_addr
,
4506 .add_mc_addr
= hclge_add_mc_addr
,
4507 .rm_mc_addr
= hclge_rm_mc_addr
,
4508 .set_autoneg
= hclge_set_autoneg
,
4509 .get_autoneg
= hclge_get_autoneg
,
4510 .get_pauseparam
= hclge_get_pauseparam
,
4511 .set_mtu
= hclge_set_mtu
,
4512 .reset_queue
= hclge_reset_tqp
,
4513 .get_stats
= hclge_get_stats
,
4514 .update_stats
= hclge_update_stats
,
4515 .get_strings
= hclge_get_strings
,
4516 .get_sset_count
= hclge_get_sset_count
,
4517 .get_fw_version
= hclge_get_fw_version
,
4518 .get_mdix_mode
= hclge_get_mdix_mode
,
4519 .set_vlan_filter
= hclge_set_port_vlan_filter
,
4520 .set_vf_vlan_filter
= hclge_set_vf_vlan_filter
,
4523 static struct hnae3_ae_algo ae_algo
= {
4526 .pdev_id_table
= ae_algo_pci_tbl
,
4529 static int hclge_init(void)
4531 pr_info("%s is initializing\n", HCLGE_NAME
);
4533 return hnae3_register_ae_algo(&ae_algo
);
4536 static void hclge_exit(void)
4538 hnae3_unregister_ae_algo(&ae_algo
);
4540 module_init(hclge_init
);
4541 module_exit(hclge_exit
);
4543 MODULE_LICENSE("GPL");
4544 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
4545 MODULE_DESCRIPTION("HCLGE Driver");
4546 MODULE_VERSION(HCLGE_MOD_VERSION
);