2 * Copyright (c) 2016-2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/acpi.h>
11 #include <linux/device.h>
12 #include <linux/etherdevice.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/pci.h>
19 #include <linux/platform_device.h>
20 #include <linux/if_vlan.h>
21 #include <net/rtnetlink.h>
22 #include "hclge_cmd.h"
23 #include "hclge_dcb.h"
24 #include "hclge_main.h"
25 #include "hclge_mbx.h"
26 #include "hclge_mdio.h"
30 #define HCLGE_NAME "hclge"
31 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
32 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
33 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
34 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
36 static int hclge_set_mta_filter_mode(struct hclge_dev
*hdev
,
37 enum hclge_mta_dmac_sel_type mta_mac_sel
,
39 static int hclge_set_mtu(struct hnae3_handle
*handle
, int new_mtu
);
40 static int hclge_init_vlan_config(struct hclge_dev
*hdev
);
41 static int hclge_reset_ae_dev(struct hnae3_ae_dev
*ae_dev
);
42 static int hclge_update_led_status(struct hclge_dev
*hdev
);
44 static struct hnae3_ae_algo ae_algo
;
46 static const struct pci_device_id ae_algo_pci_tbl
[] = {
47 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
48 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
49 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
), 0},
50 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
), 0},
51 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
), 0},
52 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
), 0},
53 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
), 0},
54 /* required last entry */
58 MODULE_DEVICE_TABLE(pci
, ae_algo_pci_tbl
);
60 static const char hns3_nic_test_strs
[][ETH_GSTRING_LEN
] = {
62 "Serdes Loopback test",
66 static const struct hclge_comm_stats_str g_all_64bit_stats_string
[] = {
67 {"igu_rx_oversize_pkt",
68 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt
)},
69 {"igu_rx_undersize_pkt",
70 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt
)},
71 {"igu_rx_out_all_pkt",
72 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt
)},
74 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt
)},
76 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt
)},
78 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt
)},
79 {"egu_tx_out_all_pkt",
80 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt
)},
82 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt
)},
84 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt
)},
86 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt
)},
87 {"ssu_ppp_mac_key_num",
88 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num
)},
89 {"ssu_ppp_host_key_num",
90 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num
)},
91 {"ppp_ssu_mac_rlt_num",
92 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num
)},
93 {"ppp_ssu_host_rlt_num",
94 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num
)},
96 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num
)},
98 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num
)},
100 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num
)},
102 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num
)}
105 static const struct hclge_comm_stats_str g_all_32bit_stats_string
[] = {
107 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt
)},
108 {"igu_rx_no_eof_pkt",
109 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt
)},
110 {"igu_rx_no_sof_pkt",
111 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt
)},
113 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt
)},
114 {"ssu_full_drop_num",
115 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num
)},
116 {"ssu_part_drop_num",
117 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num
)},
119 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num
)},
121 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num
)},
123 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num
)},
125 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt
)},
127 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt
)},
129 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt
)},
130 {"qcn_fb_invaild_cnt",
131 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt
)},
132 {"rx_packet_tc0_in_cnt",
133 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt
)},
134 {"rx_packet_tc1_in_cnt",
135 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt
)},
136 {"rx_packet_tc2_in_cnt",
137 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt
)},
138 {"rx_packet_tc3_in_cnt",
139 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt
)},
140 {"rx_packet_tc4_in_cnt",
141 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt
)},
142 {"rx_packet_tc5_in_cnt",
143 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt
)},
144 {"rx_packet_tc6_in_cnt",
145 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt
)},
146 {"rx_packet_tc7_in_cnt",
147 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt
)},
148 {"rx_packet_tc0_out_cnt",
149 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt
)},
150 {"rx_packet_tc1_out_cnt",
151 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt
)},
152 {"rx_packet_tc2_out_cnt",
153 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt
)},
154 {"rx_packet_tc3_out_cnt",
155 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt
)},
156 {"rx_packet_tc4_out_cnt",
157 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt
)},
158 {"rx_packet_tc5_out_cnt",
159 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt
)},
160 {"rx_packet_tc6_out_cnt",
161 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt
)},
162 {"rx_packet_tc7_out_cnt",
163 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt
)},
164 {"tx_packet_tc0_in_cnt",
165 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt
)},
166 {"tx_packet_tc1_in_cnt",
167 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt
)},
168 {"tx_packet_tc2_in_cnt",
169 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt
)},
170 {"tx_packet_tc3_in_cnt",
171 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt
)},
172 {"tx_packet_tc4_in_cnt",
173 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt
)},
174 {"tx_packet_tc5_in_cnt",
175 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt
)},
176 {"tx_packet_tc6_in_cnt",
177 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt
)},
178 {"tx_packet_tc7_in_cnt",
179 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt
)},
180 {"tx_packet_tc0_out_cnt",
181 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt
)},
182 {"tx_packet_tc1_out_cnt",
183 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt
)},
184 {"tx_packet_tc2_out_cnt",
185 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt
)},
186 {"tx_packet_tc3_out_cnt",
187 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt
)},
188 {"tx_packet_tc4_out_cnt",
189 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt
)},
190 {"tx_packet_tc5_out_cnt",
191 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt
)},
192 {"tx_packet_tc6_out_cnt",
193 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt
)},
194 {"tx_packet_tc7_out_cnt",
195 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt
)},
196 {"pkt_curr_buf_tc0_cnt",
197 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt
)},
198 {"pkt_curr_buf_tc1_cnt",
199 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt
)},
200 {"pkt_curr_buf_tc2_cnt",
201 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt
)},
202 {"pkt_curr_buf_tc3_cnt",
203 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt
)},
204 {"pkt_curr_buf_tc4_cnt",
205 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt
)},
206 {"pkt_curr_buf_tc5_cnt",
207 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt
)},
208 {"pkt_curr_buf_tc6_cnt",
209 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt
)},
210 {"pkt_curr_buf_tc7_cnt",
211 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt
)},
213 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num
)},
214 {"lo_pri_unicast_rlt_drop_num",
215 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num
)},
216 {"hi_pri_multicast_rlt_drop_num",
217 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num
)},
218 {"lo_pri_multicast_rlt_drop_num",
219 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num
)},
220 {"rx_oq_drop_pkt_cnt",
221 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt
)},
222 {"tx_oq_drop_pkt_cnt",
223 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt
)},
224 {"nic_l2_err_drop_pkt_cnt",
225 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt
)},
226 {"roc_l2_err_drop_pkt_cnt",
227 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt
)}
230 static const struct hclge_comm_stats_str g_mac_stats_string
[] = {
231 {"mac_tx_mac_pause_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num
)},
233 {"mac_rx_mac_pause_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num
)},
235 {"mac_tx_pfc_pri0_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num
)},
237 {"mac_tx_pfc_pri1_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num
)},
239 {"mac_tx_pfc_pri2_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num
)},
241 {"mac_tx_pfc_pri3_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num
)},
243 {"mac_tx_pfc_pri4_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num
)},
245 {"mac_tx_pfc_pri5_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num
)},
247 {"mac_tx_pfc_pri6_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num
)},
249 {"mac_tx_pfc_pri7_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num
)},
251 {"mac_rx_pfc_pri0_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num
)},
253 {"mac_rx_pfc_pri1_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num
)},
255 {"mac_rx_pfc_pri2_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num
)},
257 {"mac_rx_pfc_pri3_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num
)},
259 {"mac_rx_pfc_pri4_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num
)},
261 {"mac_rx_pfc_pri5_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num
)},
263 {"mac_rx_pfc_pri6_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num
)},
265 {"mac_rx_pfc_pri7_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num
)},
267 {"mac_tx_total_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num
)},
269 {"mac_tx_total_oct_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num
)},
271 {"mac_tx_good_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num
)},
273 {"mac_tx_bad_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num
)},
275 {"mac_tx_good_oct_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num
)},
277 {"mac_tx_bad_oct_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num
)},
279 {"mac_tx_uni_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num
)},
281 {"mac_tx_multi_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num
)},
283 {"mac_tx_broad_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num
)},
285 {"mac_tx_undersize_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num
)},
287 {"mac_tx_oversize_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num
)},
289 {"mac_tx_64_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num
)},
291 {"mac_tx_65_127_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num
)},
293 {"mac_tx_128_255_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num
)},
295 {"mac_tx_256_511_oct_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num
)},
297 {"mac_tx_512_1023_oct_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num
)},
299 {"mac_tx_1024_1518_oct_pkt_num",
300 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num
)},
301 {"mac_tx_1519_2047_oct_pkt_num",
302 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num
)},
303 {"mac_tx_2048_4095_oct_pkt_num",
304 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num
)},
305 {"mac_tx_4096_8191_oct_pkt_num",
306 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num
)},
307 {"mac_tx_8192_9216_oct_pkt_num",
308 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num
)},
309 {"mac_tx_9217_12287_oct_pkt_num",
310 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num
)},
311 {"mac_tx_12288_16383_oct_pkt_num",
312 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num
)},
313 {"mac_tx_1519_max_good_pkt_num",
314 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num
)},
315 {"mac_tx_1519_max_bad_pkt_num",
316 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num
)},
317 {"mac_rx_total_pkt_num",
318 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num
)},
319 {"mac_rx_total_oct_num",
320 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num
)},
321 {"mac_rx_good_pkt_num",
322 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num
)},
323 {"mac_rx_bad_pkt_num",
324 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num
)},
325 {"mac_rx_good_oct_num",
326 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num
)},
327 {"mac_rx_bad_oct_num",
328 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num
)},
329 {"mac_rx_uni_pkt_num",
330 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num
)},
331 {"mac_rx_multi_pkt_num",
332 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num
)},
333 {"mac_rx_broad_pkt_num",
334 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num
)},
335 {"mac_rx_undersize_pkt_num",
336 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num
)},
337 {"mac_rx_oversize_pkt_num",
338 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num
)},
339 {"mac_rx_64_oct_pkt_num",
340 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num
)},
341 {"mac_rx_65_127_oct_pkt_num",
342 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num
)},
343 {"mac_rx_128_255_oct_pkt_num",
344 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num
)},
345 {"mac_rx_256_511_oct_pkt_num",
346 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num
)},
347 {"mac_rx_512_1023_oct_pkt_num",
348 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num
)},
349 {"mac_rx_1024_1518_oct_pkt_num",
350 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num
)},
351 {"mac_rx_1519_2047_oct_pkt_num",
352 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num
)},
353 {"mac_rx_2048_4095_oct_pkt_num",
354 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num
)},
355 {"mac_rx_4096_8191_oct_pkt_num",
356 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num
)},
357 {"mac_rx_8192_9216_oct_pkt_num",
358 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num
)},
359 {"mac_rx_9217_12287_oct_pkt_num",
360 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num
)},
361 {"mac_rx_12288_16383_oct_pkt_num",
362 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num
)},
363 {"mac_rx_1519_max_good_pkt_num",
364 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num
)},
365 {"mac_rx_1519_max_bad_pkt_num",
366 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num
)},
368 {"mac_tx_fragment_pkt_num",
369 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num
)},
370 {"mac_tx_undermin_pkt_num",
371 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num
)},
372 {"mac_tx_jabber_pkt_num",
373 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num
)},
374 {"mac_tx_err_all_pkt_num",
375 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num
)},
376 {"mac_tx_from_app_good_pkt_num",
377 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num
)},
378 {"mac_tx_from_app_bad_pkt_num",
379 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num
)},
380 {"mac_rx_fragment_pkt_num",
381 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num
)},
382 {"mac_rx_undermin_pkt_num",
383 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num
)},
384 {"mac_rx_jabber_pkt_num",
385 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num
)},
386 {"mac_rx_fcs_err_pkt_num",
387 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num
)},
388 {"mac_rx_send_app_good_pkt_num",
389 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num
)},
390 {"mac_rx_send_app_bad_pkt_num",
391 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num
)}
394 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table
[] = {
396 .flags
= HCLGE_MAC_MGR_MASK_VLAN_B
,
397 .ethter_type
= cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP
),
398 .mac_addr_hi32
= cpu_to_le32(htonl(0x0180C200)),
399 .mac_addr_lo16
= cpu_to_le16(htons(0x000E)),
400 .i_port_bitmap
= 0x1,
404 static int hclge_64_bit_update_stats(struct hclge_dev
*hdev
)
406 #define HCLGE_64_BIT_CMD_NUM 5
407 #define HCLGE_64_BIT_RTN_DATANUM 4
408 u64
*data
= (u64
*)(&hdev
->hw_stats
.all_64_bit_stats
);
409 struct hclge_desc desc
[HCLGE_64_BIT_CMD_NUM
];
414 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_64_BIT
, true);
415 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_64_BIT_CMD_NUM
);
417 dev_err(&hdev
->pdev
->dev
,
418 "Get 64 bit pkt stats fail, status = %d.\n", ret
);
422 for (i
= 0; i
< HCLGE_64_BIT_CMD_NUM
; i
++) {
423 if (unlikely(i
== 0)) {
424 desc_data
= (__le64
*)(&desc
[i
].data
[0]);
425 n
= HCLGE_64_BIT_RTN_DATANUM
- 1;
427 desc_data
= (__le64
*)(&desc
[i
]);
428 n
= HCLGE_64_BIT_RTN_DATANUM
;
430 for (k
= 0; k
< n
; k
++) {
431 *data
++ += le64_to_cpu(*desc_data
);
439 static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats
*stats
)
441 stats
->pkt_curr_buf_cnt
= 0;
442 stats
->pkt_curr_buf_tc0_cnt
= 0;
443 stats
->pkt_curr_buf_tc1_cnt
= 0;
444 stats
->pkt_curr_buf_tc2_cnt
= 0;
445 stats
->pkt_curr_buf_tc3_cnt
= 0;
446 stats
->pkt_curr_buf_tc4_cnt
= 0;
447 stats
->pkt_curr_buf_tc5_cnt
= 0;
448 stats
->pkt_curr_buf_tc6_cnt
= 0;
449 stats
->pkt_curr_buf_tc7_cnt
= 0;
452 static int hclge_32_bit_update_stats(struct hclge_dev
*hdev
)
454 #define HCLGE_32_BIT_CMD_NUM 8
455 #define HCLGE_32_BIT_RTN_DATANUM 8
457 struct hclge_desc desc
[HCLGE_32_BIT_CMD_NUM
];
458 struct hclge_32_bit_stats
*all_32_bit_stats
;
464 all_32_bit_stats
= &hdev
->hw_stats
.all_32_bit_stats
;
465 data
= (u64
*)(&all_32_bit_stats
->egu_tx_1588_pkt
);
467 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_32_BIT
, true);
468 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_32_BIT_CMD_NUM
);
470 dev_err(&hdev
->pdev
->dev
,
471 "Get 32 bit pkt stats fail, status = %d.\n", ret
);
476 hclge_reset_partial_32bit_counter(all_32_bit_stats
);
477 for (i
= 0; i
< HCLGE_32_BIT_CMD_NUM
; i
++) {
478 if (unlikely(i
== 0)) {
479 __le16
*desc_data_16bit
;
481 all_32_bit_stats
->igu_rx_err_pkt
+=
482 le32_to_cpu(desc
[i
].data
[0]);
484 desc_data_16bit
= (__le16
*)&desc
[i
].data
[1];
485 all_32_bit_stats
->igu_rx_no_eof_pkt
+=
486 le16_to_cpu(*desc_data_16bit
);
489 all_32_bit_stats
->igu_rx_no_sof_pkt
+=
490 le16_to_cpu(*desc_data_16bit
);
492 desc_data
= &desc
[i
].data
[2];
493 n
= HCLGE_32_BIT_RTN_DATANUM
- 4;
495 desc_data
= (__le32
*)&desc
[i
];
496 n
= HCLGE_32_BIT_RTN_DATANUM
;
498 for (k
= 0; k
< n
; k
++) {
499 *data
++ += le32_to_cpu(*desc_data
);
507 static int hclge_mac_get_traffic_stats(struct hclge_dev
*hdev
)
509 struct hclge_mac_stats
*mac_stats
= &hdev
->hw_stats
.mac_stats
;
510 struct hclge_desc desc
;
514 /* for fiber port, need to query the total rx/tx packets statstics,
515 * used for data transferring checking.
517 if (hdev
->hw
.mac
.media_type
!= HNAE3_MEDIA_TYPE_FIBER
)
520 if (test_bit(HCLGE_STATE_STATISTICS_UPDATING
, &hdev
->state
))
523 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_STATS_MAC_TRAFFIC
, true);
524 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
526 dev_err(&hdev
->pdev
->dev
,
527 "Get MAC total pkt stats fail, ret = %d\n", ret
);
532 desc_data
= (__le64
*)(&desc
.data
[0]);
533 mac_stats
->mac_tx_total_pkt_num
+= le64_to_cpu(*desc_data
++);
534 mac_stats
->mac_rx_total_pkt_num
+= le64_to_cpu(*desc_data
);
539 static int hclge_mac_update_stats(struct hclge_dev
*hdev
)
541 #define HCLGE_MAC_CMD_NUM 21
542 #define HCLGE_RTN_DATA_NUM 4
544 u64
*data
= (u64
*)(&hdev
->hw_stats
.mac_stats
);
545 struct hclge_desc desc
[HCLGE_MAC_CMD_NUM
];
550 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_MAC
, true);
551 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_MAC_CMD_NUM
);
553 dev_err(&hdev
->pdev
->dev
,
554 "Get MAC pkt stats fail, status = %d.\n", ret
);
559 for (i
= 0; i
< HCLGE_MAC_CMD_NUM
; i
++) {
560 if (unlikely(i
== 0)) {
561 desc_data
= (__le64
*)(&desc
[i
].data
[0]);
562 n
= HCLGE_RTN_DATA_NUM
- 2;
564 desc_data
= (__le64
*)(&desc
[i
]);
565 n
= HCLGE_RTN_DATA_NUM
;
567 for (k
= 0; k
< n
; k
++) {
568 *data
++ += le64_to_cpu(*desc_data
);
576 static int hclge_tqps_update_stats(struct hnae3_handle
*handle
)
578 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
579 struct hclge_vport
*vport
= hclge_get_vport(handle
);
580 struct hclge_dev
*hdev
= vport
->back
;
581 struct hnae3_queue
*queue
;
582 struct hclge_desc desc
[1];
583 struct hclge_tqp
*tqp
;
586 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
587 queue
= handle
->kinfo
.tqp
[i
];
588 tqp
= container_of(queue
, struct hclge_tqp
, q
);
589 /* command : HCLGE_OPC_QUERY_IGU_STAT */
590 hclge_cmd_setup_basic_desc(&desc
[0],
591 HCLGE_OPC_QUERY_RX_STATUS
,
594 desc
[0].data
[0] = cpu_to_le32((tqp
->index
& 0x1ff));
595 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
597 dev_err(&hdev
->pdev
->dev
,
598 "Query tqp stat fail, status = %d,queue = %d\n",
602 tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
+=
603 le32_to_cpu(desc
[0].data
[1]);
606 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
607 queue
= handle
->kinfo
.tqp
[i
];
608 tqp
= container_of(queue
, struct hclge_tqp
, q
);
609 /* command : HCLGE_OPC_QUERY_IGU_STAT */
610 hclge_cmd_setup_basic_desc(&desc
[0],
611 HCLGE_OPC_QUERY_TX_STATUS
,
614 desc
[0].data
[0] = cpu_to_le32((tqp
->index
& 0x1ff));
615 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
617 dev_err(&hdev
->pdev
->dev
,
618 "Query tqp stat fail, status = %d,queue = %d\n",
622 tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
+=
623 le32_to_cpu(desc
[0].data
[1]);
629 static u64
*hclge_tqps_get_stats(struct hnae3_handle
*handle
, u64
*data
)
631 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
632 struct hclge_tqp
*tqp
;
636 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
637 tqp
= container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
638 *buff
++ = tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
;
641 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
642 tqp
= container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
643 *buff
++ = tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
;
649 static int hclge_tqps_get_sset_count(struct hnae3_handle
*handle
, int stringset
)
651 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
653 return kinfo
->num_tqps
* (2);
656 static u8
*hclge_tqps_get_strings(struct hnae3_handle
*handle
, u8
*data
)
658 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
662 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
663 struct hclge_tqp
*tqp
= container_of(handle
->kinfo
.tqp
[i
],
664 struct hclge_tqp
, q
);
665 snprintf(buff
, ETH_GSTRING_LEN
, "txq#%d_pktnum_rcd",
667 buff
= buff
+ ETH_GSTRING_LEN
;
670 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
671 struct hclge_tqp
*tqp
= container_of(kinfo
->tqp
[i
],
672 struct hclge_tqp
, q
);
673 snprintf(buff
, ETH_GSTRING_LEN
, "rxq#%d_pktnum_rcd",
675 buff
= buff
+ ETH_GSTRING_LEN
;
681 static u64
*hclge_comm_get_stats(void *comm_stats
,
682 const struct hclge_comm_stats_str strs
[],
688 for (i
= 0; i
< size
; i
++)
689 buf
[i
] = HCLGE_STATS_READ(comm_stats
, strs
[i
].offset
);
694 static u8
*hclge_comm_get_strings(u32 stringset
,
695 const struct hclge_comm_stats_str strs
[],
698 char *buff
= (char *)data
;
701 if (stringset
!= ETH_SS_STATS
)
704 for (i
= 0; i
< size
; i
++) {
705 snprintf(buff
, ETH_GSTRING_LEN
,
707 buff
= buff
+ ETH_GSTRING_LEN
;
713 static void hclge_update_netstat(struct hclge_hw_stats
*hw_stats
,
714 struct net_device_stats
*net_stats
)
716 net_stats
->tx_dropped
= 0;
717 net_stats
->rx_dropped
= hw_stats
->all_32_bit_stats
.ssu_full_drop_num
;
718 net_stats
->rx_dropped
+= hw_stats
->all_32_bit_stats
.ppp_key_drop_num
;
719 net_stats
->rx_dropped
+= hw_stats
->all_32_bit_stats
.ssu_key_drop_num
;
721 net_stats
->rx_errors
= hw_stats
->mac_stats
.mac_rx_oversize_pkt_num
;
722 net_stats
->rx_errors
+= hw_stats
->mac_stats
.mac_rx_undersize_pkt_num
;
723 net_stats
->rx_errors
+= hw_stats
->all_32_bit_stats
.igu_rx_no_eof_pkt
;
724 net_stats
->rx_errors
+= hw_stats
->all_32_bit_stats
.igu_rx_no_sof_pkt
;
725 net_stats
->rx_errors
+= hw_stats
->mac_stats
.mac_rx_fcs_err_pkt_num
;
727 net_stats
->multicast
= hw_stats
->mac_stats
.mac_tx_multi_pkt_num
;
728 net_stats
->multicast
+= hw_stats
->mac_stats
.mac_rx_multi_pkt_num
;
730 net_stats
->rx_crc_errors
= hw_stats
->mac_stats
.mac_rx_fcs_err_pkt_num
;
731 net_stats
->rx_length_errors
=
732 hw_stats
->mac_stats
.mac_rx_undersize_pkt_num
;
733 net_stats
->rx_length_errors
+=
734 hw_stats
->mac_stats
.mac_rx_oversize_pkt_num
;
735 net_stats
->rx_over_errors
=
736 hw_stats
->mac_stats
.mac_rx_oversize_pkt_num
;
739 static void hclge_update_stats_for_all(struct hclge_dev
*hdev
)
741 struct hnae3_handle
*handle
;
744 handle
= &hdev
->vport
[0].nic
;
745 if (handle
->client
) {
746 status
= hclge_tqps_update_stats(handle
);
748 dev_err(&hdev
->pdev
->dev
,
749 "Update TQPS stats fail, status = %d.\n",
754 status
= hclge_mac_update_stats(hdev
);
756 dev_err(&hdev
->pdev
->dev
,
757 "Update MAC stats fail, status = %d.\n", status
);
759 status
= hclge_32_bit_update_stats(hdev
);
761 dev_err(&hdev
->pdev
->dev
,
762 "Update 32 bit stats fail, status = %d.\n",
765 hclge_update_netstat(&hdev
->hw_stats
, &handle
->kinfo
.netdev
->stats
);
768 static void hclge_update_stats(struct hnae3_handle
*handle
,
769 struct net_device_stats
*net_stats
)
771 struct hclge_vport
*vport
= hclge_get_vport(handle
);
772 struct hclge_dev
*hdev
= vport
->back
;
773 struct hclge_hw_stats
*hw_stats
= &hdev
->hw_stats
;
776 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING
, &hdev
->state
))
779 status
= hclge_mac_update_stats(hdev
);
781 dev_err(&hdev
->pdev
->dev
,
782 "Update MAC stats fail, status = %d.\n",
785 status
= hclge_32_bit_update_stats(hdev
);
787 dev_err(&hdev
->pdev
->dev
,
788 "Update 32 bit stats fail, status = %d.\n",
791 status
= hclge_64_bit_update_stats(hdev
);
793 dev_err(&hdev
->pdev
->dev
,
794 "Update 64 bit stats fail, status = %d.\n",
797 status
= hclge_tqps_update_stats(handle
);
799 dev_err(&hdev
->pdev
->dev
,
800 "Update TQPS stats fail, status = %d.\n",
803 hclge_update_netstat(hw_stats
, net_stats
);
805 clear_bit(HCLGE_STATE_STATISTICS_UPDATING
, &hdev
->state
);
808 static int hclge_get_sset_count(struct hnae3_handle
*handle
, int stringset
)
810 #define HCLGE_LOOPBACK_TEST_FLAGS 0x7
812 struct hclge_vport
*vport
= hclge_get_vport(handle
);
813 struct hclge_dev
*hdev
= vport
->back
;
816 /* Loopback test support rules:
817 * mac: only GE mode support
818 * serdes: all mac mode will support include GE/XGE/LGE/CGE
819 * phy: only support when phy device exist on board
821 if (stringset
== ETH_SS_TEST
) {
822 /* clear loopback bit flags at first */
823 handle
->flags
= (handle
->flags
& (~HCLGE_LOOPBACK_TEST_FLAGS
));
824 if (hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_10M
||
825 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_100M
||
826 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_1G
) {
828 handle
->flags
|= HNAE3_SUPPORT_MAC_LOOPBACK
;
832 } else if (stringset
== ETH_SS_STATS
) {
833 count
= ARRAY_SIZE(g_mac_stats_string
) +
834 ARRAY_SIZE(g_all_32bit_stats_string
) +
835 ARRAY_SIZE(g_all_64bit_stats_string
) +
836 hclge_tqps_get_sset_count(handle
, stringset
);
842 static void hclge_get_strings(struct hnae3_handle
*handle
,
846 u8
*p
= (char *)data
;
849 if (stringset
== ETH_SS_STATS
) {
850 size
= ARRAY_SIZE(g_mac_stats_string
);
851 p
= hclge_comm_get_strings(stringset
,
855 size
= ARRAY_SIZE(g_all_32bit_stats_string
);
856 p
= hclge_comm_get_strings(stringset
,
857 g_all_32bit_stats_string
,
860 size
= ARRAY_SIZE(g_all_64bit_stats_string
);
861 p
= hclge_comm_get_strings(stringset
,
862 g_all_64bit_stats_string
,
865 p
= hclge_tqps_get_strings(handle
, p
);
866 } else if (stringset
== ETH_SS_TEST
) {
867 if (handle
->flags
& HNAE3_SUPPORT_MAC_LOOPBACK
) {
869 hns3_nic_test_strs
[HNAE3_MAC_INTER_LOOP_MAC
],
871 p
+= ETH_GSTRING_LEN
;
873 if (handle
->flags
& HNAE3_SUPPORT_SERDES_LOOPBACK
) {
875 hns3_nic_test_strs
[HNAE3_MAC_INTER_LOOP_SERDES
],
877 p
+= ETH_GSTRING_LEN
;
879 if (handle
->flags
& HNAE3_SUPPORT_PHY_LOOPBACK
) {
881 hns3_nic_test_strs
[HNAE3_MAC_INTER_LOOP_PHY
],
883 p
+= ETH_GSTRING_LEN
;
888 static void hclge_get_stats(struct hnae3_handle
*handle
, u64
*data
)
890 struct hclge_vport
*vport
= hclge_get_vport(handle
);
891 struct hclge_dev
*hdev
= vport
->back
;
894 p
= hclge_comm_get_stats(&hdev
->hw_stats
.mac_stats
,
896 ARRAY_SIZE(g_mac_stats_string
),
898 p
= hclge_comm_get_stats(&hdev
->hw_stats
.all_32_bit_stats
,
899 g_all_32bit_stats_string
,
900 ARRAY_SIZE(g_all_32bit_stats_string
),
902 p
= hclge_comm_get_stats(&hdev
->hw_stats
.all_64_bit_stats
,
903 g_all_64bit_stats_string
,
904 ARRAY_SIZE(g_all_64bit_stats_string
),
906 p
= hclge_tqps_get_stats(handle
, p
);
909 static int hclge_parse_func_status(struct hclge_dev
*hdev
,
910 struct hclge_func_status_cmd
*status
)
912 if (!(status
->pf_state
& HCLGE_PF_STATE_DONE
))
915 /* Set the pf to main pf */
916 if (status
->pf_state
& HCLGE_PF_STATE_MAIN
)
917 hdev
->flag
|= HCLGE_FLAG_MAIN
;
919 hdev
->flag
&= ~HCLGE_FLAG_MAIN
;
924 static int hclge_query_function_status(struct hclge_dev
*hdev
)
926 struct hclge_func_status_cmd
*req
;
927 struct hclge_desc desc
;
931 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_FUNC_STATUS
, true);
932 req
= (struct hclge_func_status_cmd
*)desc
.data
;
935 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
937 dev_err(&hdev
->pdev
->dev
,
938 "query function status failed %d.\n",
944 /* Check pf reset is done */
947 usleep_range(1000, 2000);
948 } while (timeout
++ < 5);
950 ret
= hclge_parse_func_status(hdev
, req
);
955 static int hclge_query_pf_resource(struct hclge_dev
*hdev
)
957 struct hclge_pf_res_cmd
*req
;
958 struct hclge_desc desc
;
961 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_PF_RSRC
, true);
962 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
964 dev_err(&hdev
->pdev
->dev
,
965 "query pf resource failed %d.\n", ret
);
969 req
= (struct hclge_pf_res_cmd
*)desc
.data
;
970 hdev
->num_tqps
= __le16_to_cpu(req
->tqp_num
);
971 hdev
->pkt_buf_size
= __le16_to_cpu(req
->buf_size
) << HCLGE_BUF_UNIT_S
;
973 if (hnae3_dev_roce_supported(hdev
)) {
975 hnae_get_field(__le16_to_cpu(req
->pf_intr_vector_number
),
976 HCLGE_PF_VEC_NUM_M
, HCLGE_PF_VEC_NUM_S
);
978 /* PF should have NIC vectors and Roce vectors,
979 * NIC vectors are queued before Roce vectors.
981 hdev
->num_msi
= hdev
->num_roce_msi
+ HCLGE_ROCE_VECTOR_OFFSET
;
984 hnae_get_field(__le16_to_cpu(req
->pf_intr_vector_number
),
985 HCLGE_PF_VEC_NUM_M
, HCLGE_PF_VEC_NUM_S
);
991 static int hclge_parse_speed(int speed_cmd
, int *speed
)
995 *speed
= HCLGE_MAC_SPEED_10M
;
998 *speed
= HCLGE_MAC_SPEED_100M
;
1001 *speed
= HCLGE_MAC_SPEED_1G
;
1004 *speed
= HCLGE_MAC_SPEED_10G
;
1007 *speed
= HCLGE_MAC_SPEED_25G
;
1010 *speed
= HCLGE_MAC_SPEED_40G
;
1013 *speed
= HCLGE_MAC_SPEED_50G
;
1016 *speed
= HCLGE_MAC_SPEED_100G
;
1025 static void hclge_parse_fiber_link_mode(struct hclge_dev
*hdev
,
1028 unsigned long *supported
= hdev
->hw
.mac
.supported
;
1030 if (speed_ability
& HCLGE_SUPPORT_1G_BIT
)
1031 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT
,
1034 if (speed_ability
& HCLGE_SUPPORT_10G_BIT
)
1035 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT
,
1038 if (speed_ability
& HCLGE_SUPPORT_25G_BIT
)
1039 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
1042 if (speed_ability
& HCLGE_SUPPORT_50G_BIT
)
1043 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT
,
1046 if (speed_ability
& HCLGE_SUPPORT_100G_BIT
)
1047 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
,
1050 set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT
, supported
);
1051 set_bit(ETHTOOL_LINK_MODE_Pause_BIT
, supported
);
1054 static void hclge_parse_link_mode(struct hclge_dev
*hdev
, u8 speed_ability
)
1056 u8 media_type
= hdev
->hw
.mac
.media_type
;
1058 if (media_type
!= HNAE3_MEDIA_TYPE_FIBER
)
1061 hclge_parse_fiber_link_mode(hdev
, speed_ability
);
1064 static void hclge_parse_cfg(struct hclge_cfg
*cfg
, struct hclge_desc
*desc
)
1066 struct hclge_cfg_param_cmd
*req
;
1067 u64 mac_addr_tmp_high
;
1071 req
= (struct hclge_cfg_param_cmd
*)desc
[0].data
;
1073 /* get the configuration */
1074 cfg
->vmdq_vport_num
= hnae_get_field(__le32_to_cpu(req
->param
[0]),
1077 cfg
->tc_num
= hnae_get_field(__le32_to_cpu(req
->param
[0]),
1078 HCLGE_CFG_TC_NUM_M
, HCLGE_CFG_TC_NUM_S
);
1079 cfg
->tqp_desc_num
= hnae_get_field(__le32_to_cpu(req
->param
[0]),
1080 HCLGE_CFG_TQP_DESC_N_M
,
1081 HCLGE_CFG_TQP_DESC_N_S
);
1083 cfg
->phy_addr
= hnae_get_field(__le32_to_cpu(req
->param
[1]),
1084 HCLGE_CFG_PHY_ADDR_M
,
1085 HCLGE_CFG_PHY_ADDR_S
);
1086 cfg
->media_type
= hnae_get_field(__le32_to_cpu(req
->param
[1]),
1087 HCLGE_CFG_MEDIA_TP_M
,
1088 HCLGE_CFG_MEDIA_TP_S
);
1089 cfg
->rx_buf_len
= hnae_get_field(__le32_to_cpu(req
->param
[1]),
1090 HCLGE_CFG_RX_BUF_LEN_M
,
1091 HCLGE_CFG_RX_BUF_LEN_S
);
1092 /* get mac_address */
1093 mac_addr_tmp
= __le32_to_cpu(req
->param
[2]);
1094 mac_addr_tmp_high
= hnae_get_field(__le32_to_cpu(req
->param
[3]),
1095 HCLGE_CFG_MAC_ADDR_H_M
,
1096 HCLGE_CFG_MAC_ADDR_H_S
);
1098 mac_addr_tmp
|= (mac_addr_tmp_high
<< 31) << 1;
1100 cfg
->default_speed
= hnae_get_field(__le32_to_cpu(req
->param
[3]),
1101 HCLGE_CFG_DEFAULT_SPEED_M
,
1102 HCLGE_CFG_DEFAULT_SPEED_S
);
1103 cfg
->rss_size_max
= hnae_get_field(__le32_to_cpu(req
->param
[3]),
1104 HCLGE_CFG_RSS_SIZE_M
,
1105 HCLGE_CFG_RSS_SIZE_S
);
1107 for (i
= 0; i
< ETH_ALEN
; i
++)
1108 cfg
->mac_addr
[i
] = (mac_addr_tmp
>> (8 * i
)) & 0xff;
1110 req
= (struct hclge_cfg_param_cmd
*)desc
[1].data
;
1111 cfg
->numa_node_map
= __le32_to_cpu(req
->param
[0]);
1113 cfg
->speed_ability
= hnae_get_field(__le32_to_cpu(req
->param
[1]),
1114 HCLGE_CFG_SPEED_ABILITY_M
,
1115 HCLGE_CFG_SPEED_ABILITY_S
);
1118 /* hclge_get_cfg: query the static parameter from flash
1119 * @hdev: pointer to struct hclge_dev
1120 * @hcfg: the config structure to be getted
1122 static int hclge_get_cfg(struct hclge_dev
*hdev
, struct hclge_cfg
*hcfg
)
1124 struct hclge_desc desc
[HCLGE_PF_CFG_DESC_NUM
];
1125 struct hclge_cfg_param_cmd
*req
;
1128 for (i
= 0; i
< HCLGE_PF_CFG_DESC_NUM
; i
++) {
1131 req
= (struct hclge_cfg_param_cmd
*)desc
[i
].data
;
1132 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_GET_CFG_PARAM
,
1134 hnae_set_field(offset
, HCLGE_CFG_OFFSET_M
,
1135 HCLGE_CFG_OFFSET_S
, i
* HCLGE_CFG_RD_LEN_BYTES
);
1136 /* Len should be united by 4 bytes when send to hardware */
1137 hnae_set_field(offset
, HCLGE_CFG_RD_LEN_M
, HCLGE_CFG_RD_LEN_S
,
1138 HCLGE_CFG_RD_LEN_BYTES
/ HCLGE_CFG_RD_LEN_UNIT
);
1139 req
->offset
= cpu_to_le32(offset
);
1142 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_PF_CFG_DESC_NUM
);
1144 dev_err(&hdev
->pdev
->dev
,
1145 "get config failed %d.\n", ret
);
1149 hclge_parse_cfg(hcfg
, desc
);
1153 static int hclge_get_cap(struct hclge_dev
*hdev
)
1157 ret
= hclge_query_function_status(hdev
);
1159 dev_err(&hdev
->pdev
->dev
,
1160 "query function status error %d.\n", ret
);
1164 /* get pf resource */
1165 ret
= hclge_query_pf_resource(hdev
);
1167 dev_err(&hdev
->pdev
->dev
,
1168 "query pf resource error %d.\n", ret
);
1175 static int hclge_configure(struct hclge_dev
*hdev
)
1177 struct hclge_cfg cfg
;
1180 ret
= hclge_get_cfg(hdev
, &cfg
);
1182 dev_err(&hdev
->pdev
->dev
, "get mac mode error %d.\n", ret
);
1186 hdev
->num_vmdq_vport
= cfg
.vmdq_vport_num
;
1187 hdev
->base_tqp_pid
= 0;
1188 hdev
->rss_size_max
= cfg
.rss_size_max
;
1189 hdev
->rx_buf_len
= cfg
.rx_buf_len
;
1190 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, cfg
.mac_addr
);
1191 hdev
->hw
.mac
.media_type
= cfg
.media_type
;
1192 hdev
->hw
.mac
.phy_addr
= cfg
.phy_addr
;
1193 hdev
->num_desc
= cfg
.tqp_desc_num
;
1194 hdev
->tm_info
.num_pg
= 1;
1195 hdev
->tc_max
= cfg
.tc_num
;
1196 hdev
->tm_info
.hw_pfc_map
= 0;
1198 ret
= hclge_parse_speed(cfg
.default_speed
, &hdev
->hw
.mac
.speed
);
1200 dev_err(&hdev
->pdev
->dev
, "Get wrong speed ret=%d.\n", ret
);
1204 hclge_parse_link_mode(hdev
, cfg
.speed_ability
);
1206 if ((hdev
->tc_max
> HNAE3_MAX_TC
) ||
1207 (hdev
->tc_max
< 1)) {
1208 dev_warn(&hdev
->pdev
->dev
, "TC num = %d.\n",
1213 /* Dev does not support DCB */
1214 if (!hnae3_dev_dcb_supported(hdev
)) {
1218 hdev
->pfc_max
= hdev
->tc_max
;
1221 hdev
->tm_info
.num_tc
= hdev
->tc_max
;
1223 /* Currently not support uncontiuous tc */
1224 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++)
1225 hnae_set_bit(hdev
->hw_tc_map
, i
, 1);
1227 hdev
->tx_sch_mode
= HCLGE_FLAG_TC_BASE_SCH_MODE
;
1232 static int hclge_config_tso(struct hclge_dev
*hdev
, int tso_mss_min
,
1235 struct hclge_cfg_tso_status_cmd
*req
;
1236 struct hclge_desc desc
;
1239 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TSO_GENERIC_CONFIG
, false);
1241 req
= (struct hclge_cfg_tso_status_cmd
*)desc
.data
;
1244 hnae_set_field(tso_mss
, HCLGE_TSO_MSS_MIN_M
,
1245 HCLGE_TSO_MSS_MIN_S
, tso_mss_min
);
1246 req
->tso_mss_min
= cpu_to_le16(tso_mss
);
1249 hnae_set_field(tso_mss
, HCLGE_TSO_MSS_MIN_M
,
1250 HCLGE_TSO_MSS_MIN_S
, tso_mss_max
);
1251 req
->tso_mss_max
= cpu_to_le16(tso_mss
);
1253 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1256 static int hclge_alloc_tqps(struct hclge_dev
*hdev
)
1258 struct hclge_tqp
*tqp
;
1261 hdev
->htqp
= devm_kcalloc(&hdev
->pdev
->dev
, hdev
->num_tqps
,
1262 sizeof(struct hclge_tqp
), GFP_KERNEL
);
1268 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
1269 tqp
->dev
= &hdev
->pdev
->dev
;
1272 tqp
->q
.ae_algo
= &ae_algo
;
1273 tqp
->q
.buf_size
= hdev
->rx_buf_len
;
1274 tqp
->q
.desc_num
= hdev
->num_desc
;
1275 tqp
->q
.io_base
= hdev
->hw
.io_base
+ HCLGE_TQP_REG_OFFSET
+
1276 i
* HCLGE_TQP_REG_SIZE
;
1284 static int hclge_map_tqps_to_func(struct hclge_dev
*hdev
, u16 func_id
,
1285 u16 tqp_pid
, u16 tqp_vid
, bool is_pf
)
1287 struct hclge_tqp_map_cmd
*req
;
1288 struct hclge_desc desc
;
1291 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_SET_TQP_MAP
, false);
1293 req
= (struct hclge_tqp_map_cmd
*)desc
.data
;
1294 req
->tqp_id
= cpu_to_le16(tqp_pid
);
1295 req
->tqp_vf
= func_id
;
1296 req
->tqp_flag
= !is_pf
<< HCLGE_TQP_MAP_TYPE_B
|
1297 1 << HCLGE_TQP_MAP_EN_B
;
1298 req
->tqp_vid
= cpu_to_le16(tqp_vid
);
1300 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1302 dev_err(&hdev
->pdev
->dev
, "TQP map failed %d.\n",
1310 static int hclge_assign_tqp(struct hclge_vport
*vport
,
1311 struct hnae3_queue
**tqp
, u16 num_tqps
)
1313 struct hclge_dev
*hdev
= vport
->back
;
1316 for (i
= 0, alloced
= 0; i
< hdev
->num_tqps
&&
1317 alloced
< num_tqps
; i
++) {
1318 if (!hdev
->htqp
[i
].alloced
) {
1319 hdev
->htqp
[i
].q
.handle
= &vport
->nic
;
1320 hdev
->htqp
[i
].q
.tqp_index
= alloced
;
1321 tqp
[alloced
] = &hdev
->htqp
[i
].q
;
1322 hdev
->htqp
[i
].alloced
= true;
1326 vport
->alloc_tqps
= num_tqps
;
1331 static int hclge_knic_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1333 struct hnae3_handle
*nic
= &vport
->nic
;
1334 struct hnae3_knic_private_info
*kinfo
= &nic
->kinfo
;
1335 struct hclge_dev
*hdev
= vport
->back
;
1338 kinfo
->num_desc
= hdev
->num_desc
;
1339 kinfo
->rx_buf_len
= hdev
->rx_buf_len
;
1340 kinfo
->num_tc
= min_t(u16
, num_tqps
, hdev
->tm_info
.num_tc
);
1342 = min_t(u16
, hdev
->rss_size_max
, num_tqps
/ kinfo
->num_tc
);
1343 kinfo
->num_tqps
= kinfo
->rss_size
* kinfo
->num_tc
;
1345 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
1346 if (hdev
->hw_tc_map
& BIT(i
)) {
1347 kinfo
->tc_info
[i
].enable
= true;
1348 kinfo
->tc_info
[i
].tqp_offset
= i
* kinfo
->rss_size
;
1349 kinfo
->tc_info
[i
].tqp_count
= kinfo
->rss_size
;
1350 kinfo
->tc_info
[i
].tc
= i
;
1352 /* Set to default queue if TC is disable */
1353 kinfo
->tc_info
[i
].enable
= false;
1354 kinfo
->tc_info
[i
].tqp_offset
= 0;
1355 kinfo
->tc_info
[i
].tqp_count
= 1;
1356 kinfo
->tc_info
[i
].tc
= 0;
1360 kinfo
->tqp
= devm_kcalloc(&hdev
->pdev
->dev
, kinfo
->num_tqps
,
1361 sizeof(struct hnae3_queue
*), GFP_KERNEL
);
1365 ret
= hclge_assign_tqp(vport
, kinfo
->tqp
, kinfo
->num_tqps
);
1367 dev_err(&hdev
->pdev
->dev
, "fail to assign TQPs %d.\n", ret
);
1374 static int hclge_map_tqp_to_vport(struct hclge_dev
*hdev
,
1375 struct hclge_vport
*vport
)
1377 struct hnae3_handle
*nic
= &vport
->nic
;
1378 struct hnae3_knic_private_info
*kinfo
;
1381 kinfo
= &nic
->kinfo
;
1382 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
1383 struct hclge_tqp
*q
=
1384 container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
1388 is_pf
= !(vport
->vport_id
);
1389 ret
= hclge_map_tqps_to_func(hdev
, vport
->vport_id
, q
->index
,
1398 static int hclge_map_tqp(struct hclge_dev
*hdev
)
1400 struct hclge_vport
*vport
= hdev
->vport
;
1403 num_vport
= hdev
->num_vmdq_vport
+ hdev
->num_req_vfs
+ 1;
1404 for (i
= 0; i
< num_vport
; i
++) {
1407 ret
= hclge_map_tqp_to_vport(hdev
, vport
);
1417 static void hclge_unic_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1419 /* this would be initialized later */
1422 static int hclge_vport_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1424 struct hnae3_handle
*nic
= &vport
->nic
;
1425 struct hclge_dev
*hdev
= vport
->back
;
1428 nic
->pdev
= hdev
->pdev
;
1429 nic
->ae_algo
= &ae_algo
;
1430 nic
->numa_node_mask
= hdev
->numa_node_mask
;
1432 if (hdev
->ae_dev
->dev_type
== HNAE3_DEV_KNIC
) {
1433 ret
= hclge_knic_setup(vport
, num_tqps
);
1435 dev_err(&hdev
->pdev
->dev
, "knic setup failed %d\n",
1440 hclge_unic_setup(vport
, num_tqps
);
1446 static int hclge_alloc_vport(struct hclge_dev
*hdev
)
1448 struct pci_dev
*pdev
= hdev
->pdev
;
1449 struct hclge_vport
*vport
;
1455 /* We need to alloc a vport for main NIC of PF */
1456 num_vport
= hdev
->num_vmdq_vport
+ hdev
->num_req_vfs
+ 1;
1458 if (hdev
->num_tqps
< num_vport
) {
1459 dev_err(&hdev
->pdev
->dev
, "tqps(%d) is less than vports(%d)",
1460 hdev
->num_tqps
, num_vport
);
1464 /* Alloc the same number of TQPs for every vport */
1465 tqp_per_vport
= hdev
->num_tqps
/ num_vport
;
1466 tqp_main_vport
= tqp_per_vport
+ hdev
->num_tqps
% num_vport
;
1468 vport
= devm_kcalloc(&pdev
->dev
, num_vport
, sizeof(struct hclge_vport
),
1473 hdev
->vport
= vport
;
1474 hdev
->num_alloc_vport
= num_vport
;
1476 if (IS_ENABLED(CONFIG_PCI_IOV
))
1477 hdev
->num_alloc_vfs
= hdev
->num_req_vfs
;
1479 for (i
= 0; i
< num_vport
; i
++) {
1481 vport
->vport_id
= i
;
1484 ret
= hclge_vport_setup(vport
, tqp_main_vport
);
1486 ret
= hclge_vport_setup(vport
, tqp_per_vport
);
1489 "vport setup failed for vport %d, %d\n",
1500 static int hclge_cmd_alloc_tx_buff(struct hclge_dev
*hdev
,
1501 struct hclge_pkt_buf_alloc
*buf_alloc
)
1503 /* TX buffer size is unit by 128 byte */
1504 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1505 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1506 struct hclge_tx_buff_alloc_cmd
*req
;
1507 struct hclge_desc desc
;
1511 req
= (struct hclge_tx_buff_alloc_cmd
*)desc
.data
;
1513 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TX_BUFF_ALLOC
, 0);
1514 for (i
= 0; i
< HCLGE_TC_NUM
; i
++) {
1515 u32 buf_size
= buf_alloc
->priv_buf
[i
].tx_buf_size
;
1517 req
->tx_pkt_buff
[i
] =
1518 cpu_to_le16((buf_size
>> HCLGE_BUF_SIZE_UNIT_SHIFT
) |
1519 HCLGE_BUF_SIZE_UPDATE_EN_MSK
);
1522 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1524 dev_err(&hdev
->pdev
->dev
, "tx buffer alloc cmd failed %d.\n",
1532 static int hclge_tx_buffer_alloc(struct hclge_dev
*hdev
,
1533 struct hclge_pkt_buf_alloc
*buf_alloc
)
1535 int ret
= hclge_cmd_alloc_tx_buff(hdev
, buf_alloc
);
1538 dev_err(&hdev
->pdev
->dev
,
1539 "tx buffer alloc failed %d\n", ret
);
1546 static int hclge_get_tc_num(struct hclge_dev
*hdev
)
1550 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1551 if (hdev
->hw_tc_map
& BIT(i
))
1556 static int hclge_get_pfc_enalbe_num(struct hclge_dev
*hdev
)
1560 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1561 if (hdev
->hw_tc_map
& BIT(i
) &&
1562 hdev
->tm_info
.hw_pfc_map
& BIT(i
))
1567 /* Get the number of pfc enabled TCs, which have private buffer */
1568 static int hclge_get_pfc_priv_num(struct hclge_dev
*hdev
,
1569 struct hclge_pkt_buf_alloc
*buf_alloc
)
1571 struct hclge_priv_buf
*priv
;
1574 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1575 priv
= &buf_alloc
->priv_buf
[i
];
1576 if ((hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1584 /* Get the number of pfc disabled TCs, which have private buffer */
1585 static int hclge_get_no_pfc_priv_num(struct hclge_dev
*hdev
,
1586 struct hclge_pkt_buf_alloc
*buf_alloc
)
1588 struct hclge_priv_buf
*priv
;
1591 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1592 priv
= &buf_alloc
->priv_buf
[i
];
1593 if (hdev
->hw_tc_map
& BIT(i
) &&
1594 !(hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1602 static u32
hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc
*buf_alloc
)
1604 struct hclge_priv_buf
*priv
;
1608 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1609 priv
= &buf_alloc
->priv_buf
[i
];
1611 rx_priv
+= priv
->buf_size
;
1616 static u32
hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc
*buf_alloc
)
1618 u32 i
, total_tx_size
= 0;
1620 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1621 total_tx_size
+= buf_alloc
->priv_buf
[i
].tx_buf_size
;
1623 return total_tx_size
;
1626 static bool hclge_is_rx_buf_ok(struct hclge_dev
*hdev
,
1627 struct hclge_pkt_buf_alloc
*buf_alloc
,
1630 u32 shared_buf_min
, shared_buf_tc
, shared_std
;
1631 int tc_num
, pfc_enable_num
;
1636 tc_num
= hclge_get_tc_num(hdev
);
1637 pfc_enable_num
= hclge_get_pfc_enalbe_num(hdev
);
1639 if (hnae3_dev_dcb_supported(hdev
))
1640 shared_buf_min
= 2 * hdev
->mps
+ HCLGE_DEFAULT_DV
;
1642 shared_buf_min
= 2 * hdev
->mps
+ HCLGE_DEFAULT_NON_DCB_DV
;
1644 shared_buf_tc
= pfc_enable_num
* hdev
->mps
+
1645 (tc_num
- pfc_enable_num
) * hdev
->mps
/ 2 +
1647 shared_std
= max_t(u32
, shared_buf_min
, shared_buf_tc
);
1649 rx_priv
= hclge_get_rx_priv_buff_alloced(buf_alloc
);
1650 if (rx_all
<= rx_priv
+ shared_std
)
1653 shared_buf
= rx_all
- rx_priv
;
1654 buf_alloc
->s_buf
.buf_size
= shared_buf
;
1655 buf_alloc
->s_buf
.self
.high
= shared_buf
;
1656 buf_alloc
->s_buf
.self
.low
= 2 * hdev
->mps
;
1658 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1659 if ((hdev
->hw_tc_map
& BIT(i
)) &&
1660 (hdev
->tm_info
.hw_pfc_map
& BIT(i
))) {
1661 buf_alloc
->s_buf
.tc_thrd
[i
].low
= hdev
->mps
;
1662 buf_alloc
->s_buf
.tc_thrd
[i
].high
= 2 * hdev
->mps
;
1664 buf_alloc
->s_buf
.tc_thrd
[i
].low
= 0;
1665 buf_alloc
->s_buf
.tc_thrd
[i
].high
= hdev
->mps
;
1672 static int hclge_tx_buffer_calc(struct hclge_dev
*hdev
,
1673 struct hclge_pkt_buf_alloc
*buf_alloc
)
1677 total_size
= hdev
->pkt_buf_size
;
1679 /* alloc tx buffer for all enabled tc */
1680 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1681 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
1683 if (total_size
< HCLGE_DEFAULT_TX_BUF
)
1686 if (hdev
->hw_tc_map
& BIT(i
))
1687 priv
->tx_buf_size
= HCLGE_DEFAULT_TX_BUF
;
1689 priv
->tx_buf_size
= 0;
1691 total_size
-= priv
->tx_buf_size
;
1697 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1698 * @hdev: pointer to struct hclge_dev
1699 * @buf_alloc: pointer to buffer calculation data
1700 * @return: 0: calculate sucessful, negative: fail
1702 static int hclge_rx_buffer_calc(struct hclge_dev
*hdev
,
1703 struct hclge_pkt_buf_alloc
*buf_alloc
)
1705 u32 rx_all
= hdev
->pkt_buf_size
;
1706 int no_pfc_priv_num
, pfc_priv_num
;
1707 struct hclge_priv_buf
*priv
;
1710 rx_all
-= hclge_get_tx_buff_alloced(buf_alloc
);
1712 /* When DCB is not supported, rx private
1713 * buffer is not allocated.
1715 if (!hnae3_dev_dcb_supported(hdev
)) {
1716 if (!hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1722 /* step 1, try to alloc private buffer for all enabled tc */
1723 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1724 priv
= &buf_alloc
->priv_buf
[i
];
1725 if (hdev
->hw_tc_map
& BIT(i
)) {
1727 if (hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1728 priv
->wl
.low
= hdev
->mps
;
1729 priv
->wl
.high
= priv
->wl
.low
+ hdev
->mps
;
1730 priv
->buf_size
= priv
->wl
.high
+
1734 priv
->wl
.high
= 2 * hdev
->mps
;
1735 priv
->buf_size
= priv
->wl
.high
;
1745 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1748 /* step 2, try to decrease the buffer size of
1749 * no pfc TC's private buffer
1751 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1752 priv
= &buf_alloc
->priv_buf
[i
];
1759 if (!(hdev
->hw_tc_map
& BIT(i
)))
1764 if (hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1766 priv
->wl
.high
= priv
->wl
.low
+ hdev
->mps
;
1767 priv
->buf_size
= priv
->wl
.high
+ HCLGE_DEFAULT_DV
;
1770 priv
->wl
.high
= hdev
->mps
;
1771 priv
->buf_size
= priv
->wl
.high
;
1775 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1778 /* step 3, try to reduce the number of pfc disabled TCs,
1779 * which have private buffer
1781 /* get the total no pfc enable TC number, which have private buffer */
1782 no_pfc_priv_num
= hclge_get_no_pfc_priv_num(hdev
, buf_alloc
);
1784 /* let the last to be cleared first */
1785 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
1786 priv
= &buf_alloc
->priv_buf
[i
];
1788 if (hdev
->hw_tc_map
& BIT(i
) &&
1789 !(hdev
->tm_info
.hw_pfc_map
& BIT(i
))) {
1790 /* Clear the no pfc TC private buffer */
1798 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
) ||
1799 no_pfc_priv_num
== 0)
1803 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1806 /* step 4, try to reduce the number of pfc enabled TCs
1807 * which have private buffer.
1809 pfc_priv_num
= hclge_get_pfc_priv_num(hdev
, buf_alloc
);
1811 /* let the last to be cleared first */
1812 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
1813 priv
= &buf_alloc
->priv_buf
[i
];
1815 if (hdev
->hw_tc_map
& BIT(i
) &&
1816 hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1817 /* Reduce the number of pfc TC with private buffer */
1825 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
) ||
1829 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1835 static int hclge_rx_priv_buf_alloc(struct hclge_dev
*hdev
,
1836 struct hclge_pkt_buf_alloc
*buf_alloc
)
1838 struct hclge_rx_priv_buff_cmd
*req
;
1839 struct hclge_desc desc
;
1843 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_PRIV_BUFF_ALLOC
, false);
1844 req
= (struct hclge_rx_priv_buff_cmd
*)desc
.data
;
1846 /* Alloc private buffer TCs */
1847 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1848 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
1851 cpu_to_le16(priv
->buf_size
>> HCLGE_BUF_UNIT_S
);
1853 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B
);
1857 cpu_to_le16((buf_alloc
->s_buf
.buf_size
>> HCLGE_BUF_UNIT_S
) |
1858 (1 << HCLGE_TC0_PRI_BUF_EN_B
));
1860 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1862 dev_err(&hdev
->pdev
->dev
,
1863 "rx private buffer alloc cmd failed %d\n", ret
);
1870 #define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
1872 static int hclge_rx_priv_wl_config(struct hclge_dev
*hdev
,
1873 struct hclge_pkt_buf_alloc
*buf_alloc
)
1875 struct hclge_rx_priv_wl_buf
*req
;
1876 struct hclge_priv_buf
*priv
;
1877 struct hclge_desc desc
[2];
1881 for (i
= 0; i
< 2; i
++) {
1882 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_RX_PRIV_WL_ALLOC
,
1884 req
= (struct hclge_rx_priv_wl_buf
*)desc
[i
].data
;
1886 /* The first descriptor set the NEXT bit to 1 */
1888 desc
[i
].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1890 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1892 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
1893 u32 idx
= i
* HCLGE_TC_NUM_ONE_DESC
+ j
;
1895 priv
= &buf_alloc
->priv_buf
[idx
];
1896 req
->tc_wl
[j
].high
=
1897 cpu_to_le16(priv
->wl
.high
>> HCLGE_BUF_UNIT_S
);
1898 req
->tc_wl
[j
].high
|=
1899 cpu_to_le16(HCLGE_PRIV_ENABLE(priv
->wl
.high
) <<
1900 HCLGE_RX_PRIV_EN_B
);
1902 cpu_to_le16(priv
->wl
.low
>> HCLGE_BUF_UNIT_S
);
1903 req
->tc_wl
[j
].low
|=
1904 cpu_to_le16(HCLGE_PRIV_ENABLE(priv
->wl
.low
) <<
1905 HCLGE_RX_PRIV_EN_B
);
1909 /* Send 2 descriptor at one time */
1910 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
1912 dev_err(&hdev
->pdev
->dev
,
1913 "rx private waterline config cmd failed %d\n",
1920 static int hclge_common_thrd_config(struct hclge_dev
*hdev
,
1921 struct hclge_pkt_buf_alloc
*buf_alloc
)
1923 struct hclge_shared_buf
*s_buf
= &buf_alloc
->s_buf
;
1924 struct hclge_rx_com_thrd
*req
;
1925 struct hclge_desc desc
[2];
1926 struct hclge_tc_thrd
*tc
;
1930 for (i
= 0; i
< 2; i
++) {
1931 hclge_cmd_setup_basic_desc(&desc
[i
],
1932 HCLGE_OPC_RX_COM_THRD_ALLOC
, false);
1933 req
= (struct hclge_rx_com_thrd
*)&desc
[i
].data
;
1935 /* The first descriptor set the NEXT bit to 1 */
1937 desc
[i
].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1939 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1941 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
1942 tc
= &s_buf
->tc_thrd
[i
* HCLGE_TC_NUM_ONE_DESC
+ j
];
1944 req
->com_thrd
[j
].high
=
1945 cpu_to_le16(tc
->high
>> HCLGE_BUF_UNIT_S
);
1946 req
->com_thrd
[j
].high
|=
1947 cpu_to_le16(HCLGE_PRIV_ENABLE(tc
->high
) <<
1948 HCLGE_RX_PRIV_EN_B
);
1949 req
->com_thrd
[j
].low
=
1950 cpu_to_le16(tc
->low
>> HCLGE_BUF_UNIT_S
);
1951 req
->com_thrd
[j
].low
|=
1952 cpu_to_le16(HCLGE_PRIV_ENABLE(tc
->low
) <<
1953 HCLGE_RX_PRIV_EN_B
);
1957 /* Send 2 descriptors at one time */
1958 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
1960 dev_err(&hdev
->pdev
->dev
,
1961 "common threshold config cmd failed %d\n", ret
);
1967 static int hclge_common_wl_config(struct hclge_dev
*hdev
,
1968 struct hclge_pkt_buf_alloc
*buf_alloc
)
1970 struct hclge_shared_buf
*buf
= &buf_alloc
->s_buf
;
1971 struct hclge_rx_com_wl
*req
;
1972 struct hclge_desc desc
;
1975 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_COM_WL_ALLOC
, false);
1977 req
= (struct hclge_rx_com_wl
*)desc
.data
;
1978 req
->com_wl
.high
= cpu_to_le16(buf
->self
.high
>> HCLGE_BUF_UNIT_S
);
1980 cpu_to_le16(HCLGE_PRIV_ENABLE(buf
->self
.high
) <<
1981 HCLGE_RX_PRIV_EN_B
);
1983 req
->com_wl
.low
= cpu_to_le16(buf
->self
.low
>> HCLGE_BUF_UNIT_S
);
1985 cpu_to_le16(HCLGE_PRIV_ENABLE(buf
->self
.low
) <<
1986 HCLGE_RX_PRIV_EN_B
);
1988 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1990 dev_err(&hdev
->pdev
->dev
,
1991 "common waterline config cmd failed %d\n", ret
);
1998 int hclge_buffer_alloc(struct hclge_dev
*hdev
)
2000 struct hclge_pkt_buf_alloc
*pkt_buf
;
2003 pkt_buf
= kzalloc(sizeof(*pkt_buf
), GFP_KERNEL
);
2007 ret
= hclge_tx_buffer_calc(hdev
, pkt_buf
);
2009 dev_err(&hdev
->pdev
->dev
,
2010 "could not calc tx buffer size for all TCs %d\n", ret
);
2014 ret
= hclge_tx_buffer_alloc(hdev
, pkt_buf
);
2016 dev_err(&hdev
->pdev
->dev
,
2017 "could not alloc tx buffers %d\n", ret
);
2021 ret
= hclge_rx_buffer_calc(hdev
, pkt_buf
);
2023 dev_err(&hdev
->pdev
->dev
,
2024 "could not calc rx priv buffer size for all TCs %d\n",
2029 ret
= hclge_rx_priv_buf_alloc(hdev
, pkt_buf
);
2031 dev_err(&hdev
->pdev
->dev
, "could not alloc rx priv buffer %d\n",
2036 if (hnae3_dev_dcb_supported(hdev
)) {
2037 ret
= hclge_rx_priv_wl_config(hdev
, pkt_buf
);
2039 dev_err(&hdev
->pdev
->dev
,
2040 "could not configure rx private waterline %d\n",
2045 ret
= hclge_common_thrd_config(hdev
, pkt_buf
);
2047 dev_err(&hdev
->pdev
->dev
,
2048 "could not configure common threshold %d\n",
2054 ret
= hclge_common_wl_config(hdev
, pkt_buf
);
2056 dev_err(&hdev
->pdev
->dev
,
2057 "could not configure common waterline %d\n", ret
);
2064 static int hclge_init_roce_base_info(struct hclge_vport
*vport
)
2066 struct hnae3_handle
*roce
= &vport
->roce
;
2067 struct hnae3_handle
*nic
= &vport
->nic
;
2069 roce
->rinfo
.num_vectors
= vport
->back
->num_roce_msi
;
2071 if (vport
->back
->num_msi_left
< vport
->roce
.rinfo
.num_vectors
||
2072 vport
->back
->num_msi_left
== 0)
2075 roce
->rinfo
.base_vector
= vport
->back
->roce_base_vector
;
2077 roce
->rinfo
.netdev
= nic
->kinfo
.netdev
;
2078 roce
->rinfo
.roce_io_base
= vport
->back
->hw
.io_base
;
2080 roce
->pdev
= nic
->pdev
;
2081 roce
->ae_algo
= nic
->ae_algo
;
2082 roce
->numa_node_mask
= nic
->numa_node_mask
;
2087 static int hclge_init_msi(struct hclge_dev
*hdev
)
2089 struct pci_dev
*pdev
= hdev
->pdev
;
2093 vectors
= pci_alloc_irq_vectors(pdev
, 1, hdev
->num_msi
,
2094 PCI_IRQ_MSI
| PCI_IRQ_MSIX
);
2097 "failed(%d) to allocate MSI/MSI-X vectors\n",
2101 if (vectors
< hdev
->num_msi
)
2102 dev_warn(&hdev
->pdev
->dev
,
2103 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2104 hdev
->num_msi
, vectors
);
2106 hdev
->num_msi
= vectors
;
2107 hdev
->num_msi_left
= vectors
;
2108 hdev
->base_msi_vector
= pdev
->irq
;
2109 hdev
->roce_base_vector
= hdev
->base_msi_vector
+
2110 HCLGE_ROCE_VECTOR_OFFSET
;
2112 hdev
->vector_status
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
2113 sizeof(u16
), GFP_KERNEL
);
2114 if (!hdev
->vector_status
) {
2115 pci_free_irq_vectors(pdev
);
2119 for (i
= 0; i
< hdev
->num_msi
; i
++)
2120 hdev
->vector_status
[i
] = HCLGE_INVALID_VPORT
;
2122 hdev
->vector_irq
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
2123 sizeof(int), GFP_KERNEL
);
2124 if (!hdev
->vector_irq
) {
2125 pci_free_irq_vectors(pdev
);
2132 static void hclge_check_speed_dup(struct hclge_dev
*hdev
, int duplex
, int speed
)
2134 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2136 if ((speed
== HCLGE_MAC_SPEED_10M
) || (speed
== HCLGE_MAC_SPEED_100M
))
2137 mac
->duplex
= (u8
)duplex
;
2139 mac
->duplex
= HCLGE_MAC_FULL
;
2144 int hclge_cfg_mac_speed_dup(struct hclge_dev
*hdev
, int speed
, u8 duplex
)
2146 struct hclge_config_mac_speed_dup_cmd
*req
;
2147 struct hclge_desc desc
;
2150 req
= (struct hclge_config_mac_speed_dup_cmd
*)desc
.data
;
2152 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_SPEED_DUP
, false);
2154 hnae_set_bit(req
->speed_dup
, HCLGE_CFG_DUPLEX_B
, !!duplex
);
2157 case HCLGE_MAC_SPEED_10M
:
2158 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2159 HCLGE_CFG_SPEED_S
, 6);
2161 case HCLGE_MAC_SPEED_100M
:
2162 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2163 HCLGE_CFG_SPEED_S
, 7);
2165 case HCLGE_MAC_SPEED_1G
:
2166 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2167 HCLGE_CFG_SPEED_S
, 0);
2169 case HCLGE_MAC_SPEED_10G
:
2170 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2171 HCLGE_CFG_SPEED_S
, 1);
2173 case HCLGE_MAC_SPEED_25G
:
2174 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2175 HCLGE_CFG_SPEED_S
, 2);
2177 case HCLGE_MAC_SPEED_40G
:
2178 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2179 HCLGE_CFG_SPEED_S
, 3);
2181 case HCLGE_MAC_SPEED_50G
:
2182 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2183 HCLGE_CFG_SPEED_S
, 4);
2185 case HCLGE_MAC_SPEED_100G
:
2186 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2187 HCLGE_CFG_SPEED_S
, 5);
2190 dev_err(&hdev
->pdev
->dev
, "invalid speed (%d)\n", speed
);
2194 hnae_set_bit(req
->mac_change_fec_en
, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B
,
2197 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2199 dev_err(&hdev
->pdev
->dev
,
2200 "mac speed/duplex config cmd failed %d.\n", ret
);
2204 hclge_check_speed_dup(hdev
, duplex
, speed
);
2209 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle
*handle
, int speed
,
2212 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2213 struct hclge_dev
*hdev
= vport
->back
;
2215 return hclge_cfg_mac_speed_dup(hdev
, speed
, duplex
);
2218 static int hclge_query_mac_an_speed_dup(struct hclge_dev
*hdev
, int *speed
,
2221 struct hclge_query_an_speed_dup_cmd
*req
;
2222 struct hclge_desc desc
;
2226 req
= (struct hclge_query_an_speed_dup_cmd
*)desc
.data
;
2228 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_AN_RESULT
, true);
2229 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2231 dev_err(&hdev
->pdev
->dev
,
2232 "mac speed/autoneg/duplex query cmd failed %d\n",
2237 *duplex
= hnae_get_bit(req
->an_syn_dup_speed
, HCLGE_QUERY_DUPLEX_B
);
2238 speed_tmp
= hnae_get_field(req
->an_syn_dup_speed
, HCLGE_QUERY_SPEED_M
,
2239 HCLGE_QUERY_SPEED_S
);
2241 ret
= hclge_parse_speed(speed_tmp
, speed
);
2243 dev_err(&hdev
->pdev
->dev
,
2244 "could not parse speed(=%d), %d\n", speed_tmp
, ret
);
2251 static int hclge_set_autoneg_en(struct hclge_dev
*hdev
, bool enable
)
2253 struct hclge_config_auto_neg_cmd
*req
;
2254 struct hclge_desc desc
;
2258 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_AN_MODE
, false);
2260 req
= (struct hclge_config_auto_neg_cmd
*)desc
.data
;
2261 hnae_set_bit(flag
, HCLGE_MAC_CFG_AN_EN_B
, !!enable
);
2262 req
->cfg_an_cmd_flag
= cpu_to_le32(flag
);
2264 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2266 dev_err(&hdev
->pdev
->dev
, "auto neg set cmd failed %d.\n",
2274 static int hclge_set_autoneg(struct hnae3_handle
*handle
, bool enable
)
2276 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2277 struct hclge_dev
*hdev
= vport
->back
;
2279 return hclge_set_autoneg_en(hdev
, enable
);
2282 static int hclge_get_autoneg(struct hnae3_handle
*handle
)
2284 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2285 struct hclge_dev
*hdev
= vport
->back
;
2286 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
2289 return phydev
->autoneg
;
2291 return hdev
->hw
.mac
.autoneg
;
2294 static int hclge_set_default_mac_vlan_mask(struct hclge_dev
*hdev
,
2298 struct hclge_mac_vlan_mask_entry_cmd
*req
;
2299 struct hclge_desc desc
;
2302 req
= (struct hclge_mac_vlan_mask_entry_cmd
*)desc
.data
;
2303 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_MASK_SET
, false);
2305 hnae_set_bit(req
->vlan_mask
, HCLGE_VLAN_MASK_EN_B
,
2307 ether_addr_copy(req
->mac_mask
, mac_mask
);
2309 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2311 dev_err(&hdev
->pdev
->dev
,
2312 "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
2318 static int hclge_mac_init(struct hclge_dev
*hdev
)
2320 struct hnae3_handle
*handle
= &hdev
->vport
[0].nic
;
2321 struct net_device
*netdev
= handle
->kinfo
.netdev
;
2322 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2323 u8 mac_mask
[ETH_ALEN
] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
2327 ret
= hclge_cfg_mac_speed_dup(hdev
, hdev
->hw
.mac
.speed
, HCLGE_MAC_FULL
);
2329 dev_err(&hdev
->pdev
->dev
,
2330 "Config mac speed dup fail ret=%d\n", ret
);
2336 /* Initialize the MTA table work mode */
2337 hdev
->accept_mta_mc
= true;
2338 hdev
->enable_mta
= true;
2339 hdev
->mta_mac_sel_type
= HCLGE_MAC_ADDR_47_36
;
2341 ret
= hclge_set_mta_filter_mode(hdev
,
2342 hdev
->mta_mac_sel_type
,
2345 dev_err(&hdev
->pdev
->dev
, "set mta filter mode failed %d\n",
2350 ret
= hclge_cfg_func_mta_filter(hdev
, 0, hdev
->accept_mta_mc
);
2352 dev_err(&hdev
->pdev
->dev
,
2353 "set mta filter mode fail ret=%d\n", ret
);
2357 ret
= hclge_set_default_mac_vlan_mask(hdev
, true, mac_mask
);
2359 dev_err(&hdev
->pdev
->dev
,
2360 "set default mac_vlan_mask fail ret=%d\n", ret
);
2369 ret
= hclge_set_mtu(handle
, mtu
);
2371 dev_err(&hdev
->pdev
->dev
,
2372 "set mtu failed ret=%d\n", ret
);
2379 static void hclge_mbx_task_schedule(struct hclge_dev
*hdev
)
2381 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
))
2382 schedule_work(&hdev
->mbx_service_task
);
2385 static void hclge_reset_task_schedule(struct hclge_dev
*hdev
)
2387 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
))
2388 schedule_work(&hdev
->rst_service_task
);
2391 static void hclge_task_schedule(struct hclge_dev
*hdev
)
2393 if (!test_bit(HCLGE_STATE_DOWN
, &hdev
->state
) &&
2394 !test_bit(HCLGE_STATE_REMOVING
, &hdev
->state
) &&
2395 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
))
2396 (void)schedule_work(&hdev
->service_task
);
2399 static int hclge_get_mac_link_status(struct hclge_dev
*hdev
)
2401 struct hclge_link_status_cmd
*req
;
2402 struct hclge_desc desc
;
2406 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_LINK_STATUS
, true);
2407 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2409 dev_err(&hdev
->pdev
->dev
, "get link status cmd failed %d\n",
2414 req
= (struct hclge_link_status_cmd
*)desc
.data
;
2415 link_status
= req
->status
& HCLGE_LINK_STATUS
;
2417 return !!link_status
;
2420 static int hclge_get_mac_phy_link(struct hclge_dev
*hdev
)
2425 mac_state
= hclge_get_mac_link_status(hdev
);
2427 if (hdev
->hw
.mac
.phydev
) {
2428 if (!genphy_read_status(hdev
->hw
.mac
.phydev
))
2429 link_stat
= mac_state
&
2430 hdev
->hw
.mac
.phydev
->link
;
2435 link_stat
= mac_state
;
2441 static void hclge_update_link_status(struct hclge_dev
*hdev
)
2443 struct hnae3_client
*client
= hdev
->nic_client
;
2444 struct hnae3_handle
*handle
;
2450 state
= hclge_get_mac_phy_link(hdev
);
2451 if (state
!= hdev
->hw
.mac
.link
) {
2452 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
2453 handle
= &hdev
->vport
[i
].nic
;
2454 client
->ops
->link_status_change(handle
, state
);
2456 hdev
->hw
.mac
.link
= state
;
2460 static int hclge_update_speed_duplex(struct hclge_dev
*hdev
)
2462 struct hclge_mac mac
= hdev
->hw
.mac
;
2467 /* get the speed and duplex as autoneg'result from mac cmd when phy
2470 if (mac
.phydev
|| !mac
.autoneg
)
2473 ret
= hclge_query_mac_an_speed_dup(hdev
, &speed
, &duplex
);
2475 dev_err(&hdev
->pdev
->dev
,
2476 "mac autoneg/speed/duplex query failed %d\n", ret
);
2480 if ((mac
.speed
!= speed
) || (mac
.duplex
!= duplex
)) {
2481 ret
= hclge_cfg_mac_speed_dup(hdev
, speed
, duplex
);
2483 dev_err(&hdev
->pdev
->dev
,
2484 "mac speed/duplex config failed %d\n", ret
);
2492 static int hclge_update_speed_duplex_h(struct hnae3_handle
*handle
)
2494 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2495 struct hclge_dev
*hdev
= vport
->back
;
2497 return hclge_update_speed_duplex(hdev
);
2500 static int hclge_get_status(struct hnae3_handle
*handle
)
2502 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2503 struct hclge_dev
*hdev
= vport
->back
;
2505 hclge_update_link_status(hdev
);
2507 return hdev
->hw
.mac
.link
;
2510 static void hclge_service_timer(struct timer_list
*t
)
2512 struct hclge_dev
*hdev
= from_timer(hdev
, t
, service_timer
);
2514 mod_timer(&hdev
->service_timer
, jiffies
+ HZ
);
2515 hdev
->hw_stats
.stats_timer
++;
2516 hclge_task_schedule(hdev
);
2519 static void hclge_service_complete(struct hclge_dev
*hdev
)
2521 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
));
2523 /* Flush memory before next watchdog */
2524 smp_mb__before_atomic();
2525 clear_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
);
2528 static u32
hclge_check_event_cause(struct hclge_dev
*hdev
, u32
*clearval
)
2533 /* fetch the events from their corresponding regs */
2534 rst_src_reg
= hclge_read_dev(&hdev
->hw
, HCLGE_MISC_RESET_STS_REG
);
2535 cmdq_src_reg
= hclge_read_dev(&hdev
->hw
, HCLGE_VECTOR0_CMDQ_SRC_REG
);
2537 /* Assumption: If by any chance reset and mailbox events are reported
2538 * together then we will only process reset event in this go and will
2539 * defer the processing of the mailbox events. Since, we would have not
2540 * cleared RX CMDQ event this time we would receive again another
2541 * interrupt from H/W just for the mailbox.
2544 /* check for vector0 reset event sources */
2545 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
) & rst_src_reg
) {
2546 set_bit(HNAE3_GLOBAL_RESET
, &hdev
->reset_pending
);
2547 *clearval
= BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
);
2548 return HCLGE_VECTOR0_EVENT_RST
;
2551 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B
) & rst_src_reg
) {
2552 set_bit(HNAE3_CORE_RESET
, &hdev
->reset_pending
);
2553 *clearval
= BIT(HCLGE_VECTOR0_CORERESET_INT_B
);
2554 return HCLGE_VECTOR0_EVENT_RST
;
2557 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B
) & rst_src_reg
) {
2558 set_bit(HNAE3_IMP_RESET
, &hdev
->reset_pending
);
2559 *clearval
= BIT(HCLGE_VECTOR0_IMPRESET_INT_B
);
2560 return HCLGE_VECTOR0_EVENT_RST
;
2563 /* check for vector0 mailbox(=CMDQ RX) event source */
2564 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B
) & cmdq_src_reg
) {
2565 cmdq_src_reg
&= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B
);
2566 *clearval
= cmdq_src_reg
;
2567 return HCLGE_VECTOR0_EVENT_MBX
;
2570 return HCLGE_VECTOR0_EVENT_OTHER
;
2573 static void hclge_clear_event_cause(struct hclge_dev
*hdev
, u32 event_type
,
2576 switch (event_type
) {
2577 case HCLGE_VECTOR0_EVENT_RST
:
2578 hclge_write_dev(&hdev
->hw
, HCLGE_MISC_RESET_STS_REG
, regclr
);
2580 case HCLGE_VECTOR0_EVENT_MBX
:
2581 hclge_write_dev(&hdev
->hw
, HCLGE_VECTOR0_CMDQ_SRC_REG
, regclr
);
2586 static void hclge_enable_vector(struct hclge_misc_vector
*vector
, bool enable
)
2588 writel(enable
? 1 : 0, vector
->addr
);
2591 static irqreturn_t
hclge_misc_irq_handle(int irq
, void *data
)
2593 struct hclge_dev
*hdev
= data
;
2597 hclge_enable_vector(&hdev
->misc_vector
, false);
2598 event_cause
= hclge_check_event_cause(hdev
, &clearval
);
2600 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2601 switch (event_cause
) {
2602 case HCLGE_VECTOR0_EVENT_RST
:
2603 hclge_reset_task_schedule(hdev
);
2605 case HCLGE_VECTOR0_EVENT_MBX
:
2606 /* If we are here then,
2607 * 1. Either we are not handling any mbx task and we are not
2610 * 2. We could be handling a mbx task but nothing more is
2612 * In both cases, we should schedule mbx task as there are more
2613 * mbx messages reported by this interrupt.
2615 hclge_mbx_task_schedule(hdev
);
2618 dev_dbg(&hdev
->pdev
->dev
,
2619 "received unknown or unhandled event of vector0\n");
2623 /* we should clear the source of interrupt */
2624 hclge_clear_event_cause(hdev
, event_cause
, clearval
);
2625 hclge_enable_vector(&hdev
->misc_vector
, true);
2630 static void hclge_free_vector(struct hclge_dev
*hdev
, int vector_id
)
2632 hdev
->vector_status
[vector_id
] = HCLGE_INVALID_VPORT
;
2633 hdev
->num_msi_left
+= 1;
2634 hdev
->num_msi_used
-= 1;
2637 static void hclge_get_misc_vector(struct hclge_dev
*hdev
)
2639 struct hclge_misc_vector
*vector
= &hdev
->misc_vector
;
2641 vector
->vector_irq
= pci_irq_vector(hdev
->pdev
, 0);
2643 vector
->addr
= hdev
->hw
.io_base
+ HCLGE_MISC_VECTOR_REG_BASE
;
2644 hdev
->vector_status
[0] = 0;
2646 hdev
->num_msi_left
-= 1;
2647 hdev
->num_msi_used
+= 1;
2650 static int hclge_misc_irq_init(struct hclge_dev
*hdev
)
2654 hclge_get_misc_vector(hdev
);
2656 /* this would be explicitly freed in the end */
2657 ret
= request_irq(hdev
->misc_vector
.vector_irq
, hclge_misc_irq_handle
,
2658 0, "hclge_misc", hdev
);
2660 hclge_free_vector(hdev
, 0);
2661 dev_err(&hdev
->pdev
->dev
, "request misc irq(%d) fail\n",
2662 hdev
->misc_vector
.vector_irq
);
2668 static void hclge_misc_irq_uninit(struct hclge_dev
*hdev
)
2670 free_irq(hdev
->misc_vector
.vector_irq
, hdev
);
2671 hclge_free_vector(hdev
, 0);
2674 static int hclge_notify_client(struct hclge_dev
*hdev
,
2675 enum hnae3_reset_notify_type type
)
2677 struct hnae3_client
*client
= hdev
->nic_client
;
2680 if (!client
->ops
->reset_notify
)
2683 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
2684 struct hnae3_handle
*handle
= &hdev
->vport
[i
].nic
;
2687 ret
= client
->ops
->reset_notify(handle
, type
);
2695 static int hclge_reset_wait(struct hclge_dev
*hdev
)
2697 #define HCLGE_RESET_WATI_MS 100
2698 #define HCLGE_RESET_WAIT_CNT 5
2699 u32 val
, reg
, reg_bit
;
2702 switch (hdev
->reset_type
) {
2703 case HNAE3_GLOBAL_RESET
:
2704 reg
= HCLGE_GLOBAL_RESET_REG
;
2705 reg_bit
= HCLGE_GLOBAL_RESET_BIT
;
2707 case HNAE3_CORE_RESET
:
2708 reg
= HCLGE_GLOBAL_RESET_REG
;
2709 reg_bit
= HCLGE_CORE_RESET_BIT
;
2711 case HNAE3_FUNC_RESET
:
2712 reg
= HCLGE_FUN_RST_ING
;
2713 reg_bit
= HCLGE_FUN_RST_ING_B
;
2716 dev_err(&hdev
->pdev
->dev
,
2717 "Wait for unsupported reset type: %d\n",
2722 val
= hclge_read_dev(&hdev
->hw
, reg
);
2723 while (hnae_get_bit(val
, reg_bit
) && cnt
< HCLGE_RESET_WAIT_CNT
) {
2724 msleep(HCLGE_RESET_WATI_MS
);
2725 val
= hclge_read_dev(&hdev
->hw
, reg
);
2729 if (cnt
>= HCLGE_RESET_WAIT_CNT
) {
2730 dev_warn(&hdev
->pdev
->dev
,
2731 "Wait for reset timeout: %d\n", hdev
->reset_type
);
2738 int hclge_func_reset_cmd(struct hclge_dev
*hdev
, int func_id
)
2740 struct hclge_desc desc
;
2741 struct hclge_reset_cmd
*req
= (struct hclge_reset_cmd
*)desc
.data
;
2744 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_RST_TRIGGER
, false);
2745 hnae_set_bit(req
->mac_func_reset
, HCLGE_CFG_RESET_MAC_B
, 0);
2746 hnae_set_bit(req
->mac_func_reset
, HCLGE_CFG_RESET_FUNC_B
, 1);
2747 req
->fun_reset_vfid
= func_id
;
2749 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2751 dev_err(&hdev
->pdev
->dev
,
2752 "send function reset cmd fail, status =%d\n", ret
);
2757 static void hclge_do_reset(struct hclge_dev
*hdev
)
2759 struct pci_dev
*pdev
= hdev
->pdev
;
2762 switch (hdev
->reset_type
) {
2763 case HNAE3_GLOBAL_RESET
:
2764 val
= hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
);
2765 hnae_set_bit(val
, HCLGE_GLOBAL_RESET_BIT
, 1);
2766 hclge_write_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
, val
);
2767 dev_info(&pdev
->dev
, "Global Reset requested\n");
2769 case HNAE3_CORE_RESET
:
2770 val
= hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
);
2771 hnae_set_bit(val
, HCLGE_CORE_RESET_BIT
, 1);
2772 hclge_write_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
, val
);
2773 dev_info(&pdev
->dev
, "Core Reset requested\n");
2775 case HNAE3_FUNC_RESET
:
2776 dev_info(&pdev
->dev
, "PF Reset requested\n");
2777 hclge_func_reset_cmd(hdev
, 0);
2778 /* schedule again to check later */
2779 set_bit(HNAE3_FUNC_RESET
, &hdev
->reset_pending
);
2780 hclge_reset_task_schedule(hdev
);
2783 dev_warn(&pdev
->dev
,
2784 "Unsupported reset type: %d\n", hdev
->reset_type
);
2789 static enum hnae3_reset_type
hclge_get_reset_level(struct hclge_dev
*hdev
,
2790 unsigned long *addr
)
2792 enum hnae3_reset_type rst_level
= HNAE3_NONE_RESET
;
2794 /* return the highest priority reset level amongst all */
2795 if (test_bit(HNAE3_GLOBAL_RESET
, addr
))
2796 rst_level
= HNAE3_GLOBAL_RESET
;
2797 else if (test_bit(HNAE3_CORE_RESET
, addr
))
2798 rst_level
= HNAE3_CORE_RESET
;
2799 else if (test_bit(HNAE3_IMP_RESET
, addr
))
2800 rst_level
= HNAE3_IMP_RESET
;
2801 else if (test_bit(HNAE3_FUNC_RESET
, addr
))
2802 rst_level
= HNAE3_FUNC_RESET
;
2804 /* now, clear all other resets */
2805 clear_bit(HNAE3_GLOBAL_RESET
, addr
);
2806 clear_bit(HNAE3_CORE_RESET
, addr
);
2807 clear_bit(HNAE3_IMP_RESET
, addr
);
2808 clear_bit(HNAE3_FUNC_RESET
, addr
);
2813 static void hclge_reset(struct hclge_dev
*hdev
)
2815 /* perform reset of the stack & ae device for a client */
2817 hclge_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
2819 if (!hclge_reset_wait(hdev
)) {
2821 hclge_notify_client(hdev
, HNAE3_UNINIT_CLIENT
);
2822 hclge_reset_ae_dev(hdev
->ae_dev
);
2823 hclge_notify_client(hdev
, HNAE3_INIT_CLIENT
);
2826 /* schedule again to check pending resets later */
2827 set_bit(hdev
->reset_type
, &hdev
->reset_pending
);
2828 hclge_reset_task_schedule(hdev
);
2831 hclge_notify_client(hdev
, HNAE3_UP_CLIENT
);
2834 static void hclge_reset_event(struct hnae3_handle
*handle
)
2836 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2837 struct hclge_dev
*hdev
= vport
->back
;
2839 /* check if this is a new reset request and we are not here just because
2840 * last reset attempt did not succeed and watchdog hit us again. We will
2841 * know this if last reset request did not occur very recently (watchdog
2842 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
2843 * In case of new request we reset the "reset level" to PF reset.
2845 if (time_after(jiffies
, (handle
->last_reset_time
+ 4 * 5 * HZ
)))
2846 handle
->reset_level
= HNAE3_FUNC_RESET
;
2848 dev_info(&hdev
->pdev
->dev
, "received reset event , reset type is %d",
2849 handle
->reset_level
);
2851 /* request reset & schedule reset task */
2852 set_bit(handle
->reset_level
, &hdev
->reset_request
);
2853 hclge_reset_task_schedule(hdev
);
2855 if (handle
->reset_level
< HNAE3_GLOBAL_RESET
)
2856 handle
->reset_level
++;
2858 handle
->last_reset_time
= jiffies
;
2861 static void hclge_reset_subtask(struct hclge_dev
*hdev
)
2863 /* check if there is any ongoing reset in the hardware. This status can
2864 * be checked from reset_pending. If there is then, we need to wait for
2865 * hardware to complete reset.
2866 * a. If we are able to figure out in reasonable time that hardware
2867 * has fully resetted then, we can proceed with driver, client
2869 * b. else, we can come back later to check this status so re-sched
2872 hdev
->reset_type
= hclge_get_reset_level(hdev
, &hdev
->reset_pending
);
2873 if (hdev
->reset_type
!= HNAE3_NONE_RESET
)
2876 /* check if we got any *new* reset requests to be honored */
2877 hdev
->reset_type
= hclge_get_reset_level(hdev
, &hdev
->reset_request
);
2878 if (hdev
->reset_type
!= HNAE3_NONE_RESET
)
2879 hclge_do_reset(hdev
);
2881 hdev
->reset_type
= HNAE3_NONE_RESET
;
2884 static void hclge_reset_service_task(struct work_struct
*work
)
2886 struct hclge_dev
*hdev
=
2887 container_of(work
, struct hclge_dev
, rst_service_task
);
2889 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
2892 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
);
2894 hclge_reset_subtask(hdev
);
2896 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
2899 static void hclge_mailbox_service_task(struct work_struct
*work
)
2901 struct hclge_dev
*hdev
=
2902 container_of(work
, struct hclge_dev
, mbx_service_task
);
2904 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
))
2907 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
2909 hclge_mbx_handler(hdev
);
2911 clear_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
);
2914 static void hclge_service_task(struct work_struct
*work
)
2916 struct hclge_dev
*hdev
=
2917 container_of(work
, struct hclge_dev
, service_task
);
2919 /* The total rx/tx packets statstics are wanted to be updated
2920 * per second. Both hclge_update_stats_for_all() and
2921 * hclge_mac_get_traffic_stats() can do it.
2923 if (hdev
->hw_stats
.stats_timer
>= HCLGE_STATS_TIMER_INTERVAL
) {
2924 hclge_update_stats_for_all(hdev
);
2925 hdev
->hw_stats
.stats_timer
= 0;
2927 hclge_mac_get_traffic_stats(hdev
);
2930 hclge_update_speed_duplex(hdev
);
2931 hclge_update_link_status(hdev
);
2932 hclge_update_led_status(hdev
);
2933 hclge_service_complete(hdev
);
2936 struct hclge_vport
*hclge_get_vport(struct hnae3_handle
*handle
)
2938 /* VF handle has no client */
2939 if (!handle
->client
)
2940 return container_of(handle
, struct hclge_vport
, nic
);
2941 else if (handle
->client
->type
== HNAE3_CLIENT_ROCE
)
2942 return container_of(handle
, struct hclge_vport
, roce
);
2944 return container_of(handle
, struct hclge_vport
, nic
);
2947 static int hclge_get_vector(struct hnae3_handle
*handle
, u16 vector_num
,
2948 struct hnae3_vector_info
*vector_info
)
2950 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2951 struct hnae3_vector_info
*vector
= vector_info
;
2952 struct hclge_dev
*hdev
= vport
->back
;
2956 vector_num
= min(hdev
->num_msi_left
, vector_num
);
2958 for (j
= 0; j
< vector_num
; j
++) {
2959 for (i
= 1; i
< hdev
->num_msi
; i
++) {
2960 if (hdev
->vector_status
[i
] == HCLGE_INVALID_VPORT
) {
2961 vector
->vector
= pci_irq_vector(hdev
->pdev
, i
);
2962 vector
->io_addr
= hdev
->hw
.io_base
+
2963 HCLGE_VECTOR_REG_BASE
+
2964 (i
- 1) * HCLGE_VECTOR_REG_OFFSET
+
2966 HCLGE_VECTOR_VF_OFFSET
;
2967 hdev
->vector_status
[i
] = vport
->vport_id
;
2968 hdev
->vector_irq
[i
] = vector
->vector
;
2977 hdev
->num_msi_left
-= alloc
;
2978 hdev
->num_msi_used
+= alloc
;
2983 static int hclge_get_vector_index(struct hclge_dev
*hdev
, int vector
)
2987 for (i
= 0; i
< hdev
->num_msi
; i
++)
2988 if (vector
== hdev
->vector_irq
[i
])
2994 static int hclge_put_vector(struct hnae3_handle
*handle
, int vector
)
2996 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2997 struct hclge_dev
*hdev
= vport
->back
;
3000 vector_id
= hclge_get_vector_index(hdev
, vector
);
3001 if (vector_id
< 0) {
3002 dev_err(&hdev
->pdev
->dev
,
3003 "Get vector index fail. vector_id =%d\n", vector_id
);
3007 hclge_free_vector(hdev
, vector_id
);
3012 static u32
hclge_get_rss_key_size(struct hnae3_handle
*handle
)
3014 return HCLGE_RSS_KEY_SIZE
;
3017 static u32
hclge_get_rss_indir_size(struct hnae3_handle
*handle
)
3019 return HCLGE_RSS_IND_TBL_SIZE
;
3022 static int hclge_set_rss_algo_key(struct hclge_dev
*hdev
,
3023 const u8 hfunc
, const u8
*key
)
3025 struct hclge_rss_config_cmd
*req
;
3026 struct hclge_desc desc
;
3031 req
= (struct hclge_rss_config_cmd
*)desc
.data
;
3033 for (key_offset
= 0; key_offset
< 3; key_offset
++) {
3034 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_GENERIC_CONFIG
,
3037 req
->hash_config
|= (hfunc
& HCLGE_RSS_HASH_ALGO_MASK
);
3038 req
->hash_config
|= (key_offset
<< HCLGE_RSS_HASH_KEY_OFFSET_B
);
3040 if (key_offset
== 2)
3042 HCLGE_RSS_KEY_SIZE
- HCLGE_RSS_HASH_KEY_NUM
* 2;
3044 key_size
= HCLGE_RSS_HASH_KEY_NUM
;
3046 memcpy(req
->hash_key
,
3047 key
+ key_offset
* HCLGE_RSS_HASH_KEY_NUM
, key_size
);
3049 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3051 dev_err(&hdev
->pdev
->dev
,
3052 "Configure RSS config fail, status = %d\n",
3060 static int hclge_set_rss_indir_table(struct hclge_dev
*hdev
, const u8
*indir
)
3062 struct hclge_rss_indirection_table_cmd
*req
;
3063 struct hclge_desc desc
;
3067 req
= (struct hclge_rss_indirection_table_cmd
*)desc
.data
;
3069 for (i
= 0; i
< HCLGE_RSS_CFG_TBL_NUM
; i
++) {
3070 hclge_cmd_setup_basic_desc
3071 (&desc
, HCLGE_OPC_RSS_INDIR_TABLE
, false);
3073 req
->start_table_index
=
3074 cpu_to_le16(i
* HCLGE_RSS_CFG_TBL_SIZE
);
3075 req
->rss_set_bitmap
= cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK
);
3077 for (j
= 0; j
< HCLGE_RSS_CFG_TBL_SIZE
; j
++)
3078 req
->rss_result
[j
] =
3079 indir
[i
* HCLGE_RSS_CFG_TBL_SIZE
+ j
];
3081 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3083 dev_err(&hdev
->pdev
->dev
,
3084 "Configure rss indir table fail,status = %d\n",
3092 static int hclge_set_rss_tc_mode(struct hclge_dev
*hdev
, u16
*tc_valid
,
3093 u16
*tc_size
, u16
*tc_offset
)
3095 struct hclge_rss_tc_mode_cmd
*req
;
3096 struct hclge_desc desc
;
3100 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_TC_MODE
, false);
3101 req
= (struct hclge_rss_tc_mode_cmd
*)desc
.data
;
3103 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
3106 hnae_set_bit(mode
, HCLGE_RSS_TC_VALID_B
, (tc_valid
[i
] & 0x1));
3107 hnae_set_field(mode
, HCLGE_RSS_TC_SIZE_M
,
3108 HCLGE_RSS_TC_SIZE_S
, tc_size
[i
]);
3109 hnae_set_field(mode
, HCLGE_RSS_TC_OFFSET_M
,
3110 HCLGE_RSS_TC_OFFSET_S
, tc_offset
[i
]);
3112 req
->rss_tc_mode
[i
] = cpu_to_le16(mode
);
3115 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3117 dev_err(&hdev
->pdev
->dev
,
3118 "Configure rss tc mode fail, status = %d\n", ret
);
3125 static int hclge_set_rss_input_tuple(struct hclge_dev
*hdev
)
3127 struct hclge_rss_input_tuple_cmd
*req
;
3128 struct hclge_desc desc
;
3131 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_INPUT_TUPLE
, false);
3133 req
= (struct hclge_rss_input_tuple_cmd
*)desc
.data
;
3135 /* Get the tuple cfg from pf */
3136 req
->ipv4_tcp_en
= hdev
->vport
[0].rss_tuple_sets
.ipv4_tcp_en
;
3137 req
->ipv4_udp_en
= hdev
->vport
[0].rss_tuple_sets
.ipv4_udp_en
;
3138 req
->ipv4_sctp_en
= hdev
->vport
[0].rss_tuple_sets
.ipv4_sctp_en
;
3139 req
->ipv4_fragment_en
= hdev
->vport
[0].rss_tuple_sets
.ipv4_fragment_en
;
3140 req
->ipv6_tcp_en
= hdev
->vport
[0].rss_tuple_sets
.ipv6_tcp_en
;
3141 req
->ipv6_udp_en
= hdev
->vport
[0].rss_tuple_sets
.ipv6_udp_en
;
3142 req
->ipv6_sctp_en
= hdev
->vport
[0].rss_tuple_sets
.ipv6_sctp_en
;
3143 req
->ipv6_fragment_en
= hdev
->vport
[0].rss_tuple_sets
.ipv6_fragment_en
;
3144 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3146 dev_err(&hdev
->pdev
->dev
,
3147 "Configure rss input fail, status = %d\n", ret
);
3154 static int hclge_get_rss(struct hnae3_handle
*handle
, u32
*indir
,
3157 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3160 /* Get hash algorithm */
3162 *hfunc
= vport
->rss_algo
;
3164 /* Get the RSS Key required by the user */
3166 memcpy(key
, vport
->rss_hash_key
, HCLGE_RSS_KEY_SIZE
);
3168 /* Get indirect table */
3170 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
3171 indir
[i
] = vport
->rss_indirection_tbl
[i
];
3176 static int hclge_set_rss(struct hnae3_handle
*handle
, const u32
*indir
,
3177 const u8
*key
, const u8 hfunc
)
3179 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3180 struct hclge_dev
*hdev
= vport
->back
;
3184 /* Set the RSS Hash Key if specififed by the user */
3187 if (hfunc
== ETH_RSS_HASH_TOP
||
3188 hfunc
== ETH_RSS_HASH_NO_CHANGE
)
3189 hash_algo
= HCLGE_RSS_HASH_ALGO_TOEPLITZ
;
3192 ret
= hclge_set_rss_algo_key(hdev
, hash_algo
, key
);
3196 /* Update the shadow RSS key with user specified qids */
3197 memcpy(vport
->rss_hash_key
, key
, HCLGE_RSS_KEY_SIZE
);
3198 vport
->rss_algo
= hash_algo
;
3201 /* Update the shadow RSS table with user specified qids */
3202 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
3203 vport
->rss_indirection_tbl
[i
] = indir
[i
];
3205 /* Update the hardware */
3206 return hclge_set_rss_indir_table(hdev
, vport
->rss_indirection_tbl
);
3209 static u8
hclge_get_rss_hash_bits(struct ethtool_rxnfc
*nfc
)
3211 u8 hash_sets
= nfc
->data
& RXH_L4_B_0_1
? HCLGE_S_PORT_BIT
: 0;
3213 if (nfc
->data
& RXH_L4_B_2_3
)
3214 hash_sets
|= HCLGE_D_PORT_BIT
;
3216 hash_sets
&= ~HCLGE_D_PORT_BIT
;
3218 if (nfc
->data
& RXH_IP_SRC
)
3219 hash_sets
|= HCLGE_S_IP_BIT
;
3221 hash_sets
&= ~HCLGE_S_IP_BIT
;
3223 if (nfc
->data
& RXH_IP_DST
)
3224 hash_sets
|= HCLGE_D_IP_BIT
;
3226 hash_sets
&= ~HCLGE_D_IP_BIT
;
3228 if (nfc
->flow_type
== SCTP_V4_FLOW
|| nfc
->flow_type
== SCTP_V6_FLOW
)
3229 hash_sets
|= HCLGE_V_TAG_BIT
;
3234 static int hclge_set_rss_tuple(struct hnae3_handle
*handle
,
3235 struct ethtool_rxnfc
*nfc
)
3237 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3238 struct hclge_dev
*hdev
= vport
->back
;
3239 struct hclge_rss_input_tuple_cmd
*req
;
3240 struct hclge_desc desc
;
3244 if (nfc
->data
& ~(RXH_IP_SRC
| RXH_IP_DST
|
3245 RXH_L4_B_0_1
| RXH_L4_B_2_3
))
3248 req
= (struct hclge_rss_input_tuple_cmd
*)desc
.data
;
3249 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_INPUT_TUPLE
, false);
3251 req
->ipv4_tcp_en
= vport
->rss_tuple_sets
.ipv4_tcp_en
;
3252 req
->ipv4_udp_en
= vport
->rss_tuple_sets
.ipv4_udp_en
;
3253 req
->ipv4_sctp_en
= vport
->rss_tuple_sets
.ipv4_sctp_en
;
3254 req
->ipv4_fragment_en
= vport
->rss_tuple_sets
.ipv4_fragment_en
;
3255 req
->ipv6_tcp_en
= vport
->rss_tuple_sets
.ipv6_tcp_en
;
3256 req
->ipv6_udp_en
= vport
->rss_tuple_sets
.ipv6_udp_en
;
3257 req
->ipv6_sctp_en
= vport
->rss_tuple_sets
.ipv6_sctp_en
;
3258 req
->ipv6_fragment_en
= vport
->rss_tuple_sets
.ipv6_fragment_en
;
3260 tuple_sets
= hclge_get_rss_hash_bits(nfc
);
3261 switch (nfc
->flow_type
) {
3263 req
->ipv4_tcp_en
= tuple_sets
;
3266 req
->ipv6_tcp_en
= tuple_sets
;
3269 req
->ipv4_udp_en
= tuple_sets
;
3272 req
->ipv6_udp_en
= tuple_sets
;
3275 req
->ipv4_sctp_en
= tuple_sets
;
3278 if ((nfc
->data
& RXH_L4_B_0_1
) ||
3279 (nfc
->data
& RXH_L4_B_2_3
))
3282 req
->ipv6_sctp_en
= tuple_sets
;
3285 req
->ipv4_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3288 req
->ipv6_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3294 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3296 dev_err(&hdev
->pdev
->dev
,
3297 "Set rss tuple fail, status = %d\n", ret
);
3301 vport
->rss_tuple_sets
.ipv4_tcp_en
= req
->ipv4_tcp_en
;
3302 vport
->rss_tuple_sets
.ipv4_udp_en
= req
->ipv4_udp_en
;
3303 vport
->rss_tuple_sets
.ipv4_sctp_en
= req
->ipv4_sctp_en
;
3304 vport
->rss_tuple_sets
.ipv4_fragment_en
= req
->ipv4_fragment_en
;
3305 vport
->rss_tuple_sets
.ipv6_tcp_en
= req
->ipv6_tcp_en
;
3306 vport
->rss_tuple_sets
.ipv6_udp_en
= req
->ipv6_udp_en
;
3307 vport
->rss_tuple_sets
.ipv6_sctp_en
= req
->ipv6_sctp_en
;
3308 vport
->rss_tuple_sets
.ipv6_fragment_en
= req
->ipv6_fragment_en
;
3312 static int hclge_get_rss_tuple(struct hnae3_handle
*handle
,
3313 struct ethtool_rxnfc
*nfc
)
3315 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3320 switch (nfc
->flow_type
) {
3322 tuple_sets
= vport
->rss_tuple_sets
.ipv4_tcp_en
;
3325 tuple_sets
= vport
->rss_tuple_sets
.ipv4_udp_en
;
3328 tuple_sets
= vport
->rss_tuple_sets
.ipv6_tcp_en
;
3331 tuple_sets
= vport
->rss_tuple_sets
.ipv6_udp_en
;
3334 tuple_sets
= vport
->rss_tuple_sets
.ipv4_sctp_en
;
3337 tuple_sets
= vport
->rss_tuple_sets
.ipv6_sctp_en
;
3341 tuple_sets
= HCLGE_S_IP_BIT
| HCLGE_D_IP_BIT
;
3350 if (tuple_sets
& HCLGE_D_PORT_BIT
)
3351 nfc
->data
|= RXH_L4_B_2_3
;
3352 if (tuple_sets
& HCLGE_S_PORT_BIT
)
3353 nfc
->data
|= RXH_L4_B_0_1
;
3354 if (tuple_sets
& HCLGE_D_IP_BIT
)
3355 nfc
->data
|= RXH_IP_DST
;
3356 if (tuple_sets
& HCLGE_S_IP_BIT
)
3357 nfc
->data
|= RXH_IP_SRC
;
3362 static int hclge_get_tc_size(struct hnae3_handle
*handle
)
3364 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3365 struct hclge_dev
*hdev
= vport
->back
;
3367 return hdev
->rss_size_max
;
3370 int hclge_rss_init_hw(struct hclge_dev
*hdev
)
3372 struct hclge_vport
*vport
= hdev
->vport
;
3373 u8
*rss_indir
= vport
[0].rss_indirection_tbl
;
3374 u16 rss_size
= vport
[0].alloc_rss_size
;
3375 u8
*key
= vport
[0].rss_hash_key
;
3376 u8 hfunc
= vport
[0].rss_algo
;
3377 u16 tc_offset
[HCLGE_MAX_TC_NUM
];
3378 u16 tc_valid
[HCLGE_MAX_TC_NUM
];
3379 u16 tc_size
[HCLGE_MAX_TC_NUM
];
3383 ret
= hclge_set_rss_indir_table(hdev
, rss_indir
);
3387 ret
= hclge_set_rss_algo_key(hdev
, hfunc
, key
);
3391 ret
= hclge_set_rss_input_tuple(hdev
);
3395 /* Each TC have the same queue size, and tc_size set to hardware is
3396 * the log2 of roundup power of two of rss_size, the acutal queue
3397 * size is limited by indirection table.
3399 if (rss_size
> HCLGE_RSS_TC_SIZE_7
|| rss_size
== 0) {
3400 dev_err(&hdev
->pdev
->dev
,
3401 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3406 roundup_size
= roundup_pow_of_two(rss_size
);
3407 roundup_size
= ilog2(roundup_size
);
3409 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
3412 if (!(hdev
->hw_tc_map
& BIT(i
)))
3416 tc_size
[i
] = roundup_size
;
3417 tc_offset
[i
] = rss_size
* i
;
3420 return hclge_set_rss_tc_mode(hdev
, tc_valid
, tc_size
, tc_offset
);
3423 void hclge_rss_indir_init_cfg(struct hclge_dev
*hdev
)
3425 struct hclge_vport
*vport
= hdev
->vport
;
3428 for (j
= 0; j
< hdev
->num_vmdq_vport
+ 1; j
++) {
3429 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
3430 vport
[j
].rss_indirection_tbl
[i
] =
3431 i
% vport
[j
].alloc_rss_size
;
3435 static void hclge_rss_init_cfg(struct hclge_dev
*hdev
)
3437 struct hclge_vport
*vport
= hdev
->vport
;
3440 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
3441 vport
[i
].rss_tuple_sets
.ipv4_tcp_en
=
3442 HCLGE_RSS_INPUT_TUPLE_OTHER
;
3443 vport
[i
].rss_tuple_sets
.ipv4_udp_en
=
3444 HCLGE_RSS_INPUT_TUPLE_OTHER
;
3445 vport
[i
].rss_tuple_sets
.ipv4_sctp_en
=
3446 HCLGE_RSS_INPUT_TUPLE_SCTP
;
3447 vport
[i
].rss_tuple_sets
.ipv4_fragment_en
=
3448 HCLGE_RSS_INPUT_TUPLE_OTHER
;
3449 vport
[i
].rss_tuple_sets
.ipv6_tcp_en
=
3450 HCLGE_RSS_INPUT_TUPLE_OTHER
;
3451 vport
[i
].rss_tuple_sets
.ipv6_udp_en
=
3452 HCLGE_RSS_INPUT_TUPLE_OTHER
;
3453 vport
[i
].rss_tuple_sets
.ipv6_sctp_en
=
3454 HCLGE_RSS_INPUT_TUPLE_SCTP
;
3455 vport
[i
].rss_tuple_sets
.ipv6_fragment_en
=
3456 HCLGE_RSS_INPUT_TUPLE_OTHER
;
3458 vport
[i
].rss_algo
= HCLGE_RSS_HASH_ALGO_TOEPLITZ
;
3460 netdev_rss_key_fill(vport
[i
].rss_hash_key
, HCLGE_RSS_KEY_SIZE
);
3463 hclge_rss_indir_init_cfg(hdev
);
3466 int hclge_bind_ring_with_vector(struct hclge_vport
*vport
,
3467 int vector_id
, bool en
,
3468 struct hnae3_ring_chain_node
*ring_chain
)
3470 struct hclge_dev
*hdev
= vport
->back
;
3471 struct hnae3_ring_chain_node
*node
;
3472 struct hclge_desc desc
;
3473 struct hclge_ctrl_vector_chain_cmd
*req
3474 = (struct hclge_ctrl_vector_chain_cmd
*)desc
.data
;
3475 enum hclge_cmd_status status
;
3476 enum hclge_opcode_type op
;
3477 u16 tqp_type_and_id
;
3480 op
= en
? HCLGE_OPC_ADD_RING_TO_VECTOR
: HCLGE_OPC_DEL_RING_TO_VECTOR
;
3481 hclge_cmd_setup_basic_desc(&desc
, op
, false);
3482 req
->int_vector_id
= vector_id
;
3485 for (node
= ring_chain
; node
; node
= node
->next
) {
3486 tqp_type_and_id
= le16_to_cpu(req
->tqp_type_and_id
[i
]);
3487 hnae_set_field(tqp_type_and_id
, HCLGE_INT_TYPE_M
,
3489 hnae_get_bit(node
->flag
, HNAE3_RING_TYPE_B
));
3490 hnae_set_field(tqp_type_and_id
, HCLGE_TQP_ID_M
,
3491 HCLGE_TQP_ID_S
, node
->tqp_index
);
3492 hnae_set_field(tqp_type_and_id
, HCLGE_INT_GL_IDX_M
,
3494 hnae_get_field(node
->int_gl_idx
,
3495 HNAE3_RING_GL_IDX_M
,
3496 HNAE3_RING_GL_IDX_S
));
3497 req
->tqp_type_and_id
[i
] = cpu_to_le16(tqp_type_and_id
);
3498 if (++i
>= HCLGE_VECTOR_ELEMENTS_PER_CMD
) {
3499 req
->int_cause_num
= HCLGE_VECTOR_ELEMENTS_PER_CMD
;
3500 req
->vfid
= vport
->vport_id
;
3502 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3504 dev_err(&hdev
->pdev
->dev
,
3505 "Map TQP fail, status is %d.\n",
3511 hclge_cmd_setup_basic_desc(&desc
,
3514 req
->int_vector_id
= vector_id
;
3519 req
->int_cause_num
= i
;
3520 req
->vfid
= vport
->vport_id
;
3521 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3523 dev_err(&hdev
->pdev
->dev
,
3524 "Map TQP fail, status is %d.\n", status
);
3532 static int hclge_map_ring_to_vector(struct hnae3_handle
*handle
,
3534 struct hnae3_ring_chain_node
*ring_chain
)
3536 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3537 struct hclge_dev
*hdev
= vport
->back
;
3540 vector_id
= hclge_get_vector_index(hdev
, vector
);
3541 if (vector_id
< 0) {
3542 dev_err(&hdev
->pdev
->dev
,
3543 "Get vector index fail. vector_id =%d\n", vector_id
);
3547 return hclge_bind_ring_with_vector(vport
, vector_id
, true, ring_chain
);
3550 static int hclge_unmap_ring_frm_vector(struct hnae3_handle
*handle
,
3552 struct hnae3_ring_chain_node
*ring_chain
)
3554 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3555 struct hclge_dev
*hdev
= vport
->back
;
3558 if (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
3561 vector_id
= hclge_get_vector_index(hdev
, vector
);
3562 if (vector_id
< 0) {
3563 dev_err(&handle
->pdev
->dev
,
3564 "Get vector index fail. ret =%d\n", vector_id
);
3568 ret
= hclge_bind_ring_with_vector(vport
, vector_id
, false, ring_chain
);
3570 dev_err(&handle
->pdev
->dev
,
3571 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3578 int hclge_cmd_set_promisc_mode(struct hclge_dev
*hdev
,
3579 struct hclge_promisc_param
*param
)
3581 struct hclge_promisc_cfg_cmd
*req
;
3582 struct hclge_desc desc
;
3585 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_PROMISC_MODE
, false);
3587 req
= (struct hclge_promisc_cfg_cmd
*)desc
.data
;
3588 req
->vf_id
= param
->vf_id
;
3589 req
->flag
= (param
->enable
<< HCLGE_PROMISC_EN_B
);
3591 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3593 dev_err(&hdev
->pdev
->dev
,
3594 "Set promisc mode fail, status is %d.\n", ret
);
3600 void hclge_promisc_param_init(struct hclge_promisc_param
*param
, bool en_uc
,
3601 bool en_mc
, bool en_bc
, int vport_id
)
3606 memset(param
, 0, sizeof(struct hclge_promisc_param
));
3608 param
->enable
= HCLGE_PROMISC_EN_UC
;
3610 param
->enable
|= HCLGE_PROMISC_EN_MC
;
3612 param
->enable
|= HCLGE_PROMISC_EN_BC
;
3613 param
->vf_id
= vport_id
;
3616 static void hclge_set_promisc_mode(struct hnae3_handle
*handle
, u32 en
)
3618 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3619 struct hclge_dev
*hdev
= vport
->back
;
3620 struct hclge_promisc_param param
;
3622 hclge_promisc_param_init(¶m
, en
, en
, true, vport
->vport_id
);
3623 hclge_cmd_set_promisc_mode(hdev
, ¶m
);
3626 static void hclge_cfg_mac_mode(struct hclge_dev
*hdev
, bool enable
)
3628 struct hclge_desc desc
;
3629 struct hclge_config_mac_mode_cmd
*req
=
3630 (struct hclge_config_mac_mode_cmd
*)desc
.data
;
3634 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAC_MODE
, false);
3635 hnae_set_bit(loop_en
, HCLGE_MAC_TX_EN_B
, enable
);
3636 hnae_set_bit(loop_en
, HCLGE_MAC_RX_EN_B
, enable
);
3637 hnae_set_bit(loop_en
, HCLGE_MAC_PAD_TX_B
, enable
);
3638 hnae_set_bit(loop_en
, HCLGE_MAC_PAD_RX_B
, enable
);
3639 hnae_set_bit(loop_en
, HCLGE_MAC_1588_TX_B
, 0);
3640 hnae_set_bit(loop_en
, HCLGE_MAC_1588_RX_B
, 0);
3641 hnae_set_bit(loop_en
, HCLGE_MAC_APP_LP_B
, 0);
3642 hnae_set_bit(loop_en
, HCLGE_MAC_LINE_LP_B
, 0);
3643 hnae_set_bit(loop_en
, HCLGE_MAC_FCS_TX_B
, enable
);
3644 hnae_set_bit(loop_en
, HCLGE_MAC_RX_FCS_B
, enable
);
3645 hnae_set_bit(loop_en
, HCLGE_MAC_RX_FCS_STRIP_B
, enable
);
3646 hnae_set_bit(loop_en
, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B
, enable
);
3647 hnae_set_bit(loop_en
, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B
, enable
);
3648 hnae_set_bit(loop_en
, HCLGE_MAC_TX_UNDER_MIN_ERR_B
, enable
);
3649 req
->txrx_pad_fcs_loop_en
= cpu_to_le32(loop_en
);
3651 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3653 dev_err(&hdev
->pdev
->dev
,
3654 "mac enable fail, ret =%d.\n", ret
);
3657 static int hclge_set_mac_loopback(struct hclge_dev
*hdev
, bool en
)
3659 struct hclge_config_mac_mode_cmd
*req
;
3660 struct hclge_desc desc
;
3664 req
= (struct hclge_config_mac_mode_cmd
*)&desc
.data
[0];
3665 /* 1 Read out the MAC mode config at first */
3666 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAC_MODE
, true);
3667 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3669 dev_err(&hdev
->pdev
->dev
,
3670 "mac loopback get fail, ret =%d.\n", ret
);
3674 /* 2 Then setup the loopback flag */
3675 loop_en
= le32_to_cpu(req
->txrx_pad_fcs_loop_en
);
3676 hnae_set_bit(loop_en
, HCLGE_MAC_APP_LP_B
, en
? 1 : 0);
3678 req
->txrx_pad_fcs_loop_en
= cpu_to_le32(loop_en
);
3680 /* 3 Config mac work mode with loopback flag
3681 * and its original configure parameters
3683 hclge_cmd_reuse_desc(&desc
, false);
3684 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3686 dev_err(&hdev
->pdev
->dev
,
3687 "mac loopback set fail, ret =%d.\n", ret
);
3691 static int hclge_set_loopback(struct hnae3_handle
*handle
,
3692 enum hnae3_loop loop_mode
, bool en
)
3694 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3695 struct hclge_dev
*hdev
= vport
->back
;
3698 switch (loop_mode
) {
3699 case HNAE3_MAC_INTER_LOOP_MAC
:
3700 ret
= hclge_set_mac_loopback(hdev
, en
);
3704 dev_err(&hdev
->pdev
->dev
,
3705 "loop_mode %d is not supported\n", loop_mode
);
3712 static int hclge_tqp_enable(struct hclge_dev
*hdev
, int tqp_id
,
3713 int stream_id
, bool enable
)
3715 struct hclge_desc desc
;
3716 struct hclge_cfg_com_tqp_queue_cmd
*req
=
3717 (struct hclge_cfg_com_tqp_queue_cmd
*)desc
.data
;
3720 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_COM_TQP_QUEUE
, false);
3721 req
->tqp_id
= cpu_to_le16(tqp_id
& HCLGE_RING_ID_MASK
);
3722 req
->stream_id
= cpu_to_le16(stream_id
);
3723 req
->enable
|= enable
<< HCLGE_TQP_ENABLE_B
;
3725 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3727 dev_err(&hdev
->pdev
->dev
,
3728 "Tqp enable fail, status =%d.\n", ret
);
3732 static void hclge_reset_tqp_stats(struct hnae3_handle
*handle
)
3734 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3735 struct hnae3_queue
*queue
;
3736 struct hclge_tqp
*tqp
;
3739 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
3740 queue
= handle
->kinfo
.tqp
[i
];
3741 tqp
= container_of(queue
, struct hclge_tqp
, q
);
3742 memset(&tqp
->tqp_stats
, 0, sizeof(tqp
->tqp_stats
));
3746 static int hclge_ae_start(struct hnae3_handle
*handle
)
3748 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3749 struct hclge_dev
*hdev
= vport
->back
;
3752 for (i
= 0; i
< vport
->alloc_tqps
; i
++)
3753 hclge_tqp_enable(hdev
, i
, 0, true);
3756 hclge_cfg_mac_mode(hdev
, true);
3757 clear_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
3758 mod_timer(&hdev
->service_timer
, jiffies
+ HZ
);
3759 hdev
->hw
.mac
.link
= 0;
3761 /* reset tqp stats */
3762 hclge_reset_tqp_stats(handle
);
3764 if (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
3767 ret
= hclge_mac_start_phy(hdev
);
3774 static void hclge_ae_stop(struct hnae3_handle
*handle
)
3776 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3777 struct hclge_dev
*hdev
= vport
->back
;
3780 del_timer_sync(&hdev
->service_timer
);
3781 cancel_work_sync(&hdev
->service_task
);
3783 if (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
3786 for (i
= 0; i
< vport
->alloc_tqps
; i
++)
3787 hclge_tqp_enable(hdev
, i
, 0, false);
3790 hclge_cfg_mac_mode(hdev
, false);
3792 hclge_mac_stop_phy(hdev
);
3794 /* reset tqp stats */
3795 hclge_reset_tqp_stats(handle
);
3796 del_timer_sync(&hdev
->service_timer
);
3797 cancel_work_sync(&hdev
->service_task
);
3798 hclge_update_link_status(hdev
);
3801 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport
*vport
,
3802 u16 cmdq_resp
, u8 resp_code
,
3803 enum hclge_mac_vlan_tbl_opcode op
)
3805 struct hclge_dev
*hdev
= vport
->back
;
3806 int return_status
= -EIO
;
3809 dev_err(&hdev
->pdev
->dev
,
3810 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
3815 if (op
== HCLGE_MAC_VLAN_ADD
) {
3816 if ((!resp_code
) || (resp_code
== 1)) {
3818 } else if (resp_code
== 2) {
3819 return_status
= -ENOSPC
;
3820 dev_err(&hdev
->pdev
->dev
,
3821 "add mac addr failed for uc_overflow.\n");
3822 } else if (resp_code
== 3) {
3823 return_status
= -ENOSPC
;
3824 dev_err(&hdev
->pdev
->dev
,
3825 "add mac addr failed for mc_overflow.\n");
3827 dev_err(&hdev
->pdev
->dev
,
3828 "add mac addr failed for undefined, code=%d.\n",
3831 } else if (op
== HCLGE_MAC_VLAN_REMOVE
) {
3834 } else if (resp_code
== 1) {
3835 return_status
= -ENOENT
;
3836 dev_dbg(&hdev
->pdev
->dev
,
3837 "remove mac addr failed for miss.\n");
3839 dev_err(&hdev
->pdev
->dev
,
3840 "remove mac addr failed for undefined, code=%d.\n",
3843 } else if (op
== HCLGE_MAC_VLAN_LKUP
) {
3846 } else if (resp_code
== 1) {
3847 return_status
= -ENOENT
;
3848 dev_dbg(&hdev
->pdev
->dev
,
3849 "lookup mac addr failed for miss.\n");
3851 dev_err(&hdev
->pdev
->dev
,
3852 "lookup mac addr failed for undefined, code=%d.\n",
3856 return_status
= -EINVAL
;
3857 dev_err(&hdev
->pdev
->dev
,
3858 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
3862 return return_status
;
3865 static int hclge_update_desc_vfid(struct hclge_desc
*desc
, int vfid
, bool clr
)
3870 if (vfid
> 255 || vfid
< 0)
3873 if (vfid
>= 0 && vfid
<= 191) {
3874 word_num
= vfid
/ 32;
3875 bit_num
= vfid
% 32;
3877 desc
[1].data
[word_num
] &= cpu_to_le32(~(1 << bit_num
));
3879 desc
[1].data
[word_num
] |= cpu_to_le32(1 << bit_num
);
3881 word_num
= (vfid
- 192) / 32;
3882 bit_num
= vfid
% 32;
3884 desc
[2].data
[word_num
] &= cpu_to_le32(~(1 << bit_num
));
3886 desc
[2].data
[word_num
] |= cpu_to_le32(1 << bit_num
);
3892 static bool hclge_is_all_function_id_zero(struct hclge_desc
*desc
)
3894 #define HCLGE_DESC_NUMBER 3
3895 #define HCLGE_FUNC_NUMBER_PER_DESC 6
3898 for (i
= 0; i
< HCLGE_DESC_NUMBER
; i
++)
3899 for (j
= 0; j
< HCLGE_FUNC_NUMBER_PER_DESC
; j
++)
3900 if (desc
[i
].data
[j
])
3906 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd
*new_req
,
3909 const unsigned char *mac_addr
= addr
;
3910 u32 high_val
= mac_addr
[2] << 16 | (mac_addr
[3] << 24) |
3911 (mac_addr
[0]) | (mac_addr
[1] << 8);
3912 u32 low_val
= mac_addr
[4] | (mac_addr
[5] << 8);
3914 new_req
->mac_addr_hi32
= cpu_to_le32(high_val
);
3915 new_req
->mac_addr_lo16
= cpu_to_le16(low_val
& 0xffff);
3918 static u16
hclge_get_mac_addr_to_mta_index(struct hclge_vport
*vport
,
3921 u16 high_val
= addr
[1] | (addr
[0] << 8);
3922 struct hclge_dev
*hdev
= vport
->back
;
3923 u32 rsh
= 4 - hdev
->mta_mac_sel_type
;
3924 u16 ret_val
= (high_val
>> rsh
) & 0xfff;
3929 static int hclge_set_mta_filter_mode(struct hclge_dev
*hdev
,
3930 enum hclge_mta_dmac_sel_type mta_mac_sel
,
3933 struct hclge_mta_filter_mode_cmd
*req
;
3934 struct hclge_desc desc
;
3937 req
= (struct hclge_mta_filter_mode_cmd
*)desc
.data
;
3938 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MTA_MAC_MODE_CFG
, false);
3940 hnae_set_bit(req
->dmac_sel_en
, HCLGE_CFG_MTA_MAC_EN_B
,
3942 hnae_set_field(req
->dmac_sel_en
, HCLGE_CFG_MTA_MAC_SEL_M
,
3943 HCLGE_CFG_MTA_MAC_SEL_S
, mta_mac_sel
);
3945 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3947 dev_err(&hdev
->pdev
->dev
,
3948 "Config mat filter mode failed for cmd_send, ret =%d.\n",
3956 int hclge_cfg_func_mta_filter(struct hclge_dev
*hdev
,
3960 struct hclge_cfg_func_mta_filter_cmd
*req
;
3961 struct hclge_desc desc
;
3964 req
= (struct hclge_cfg_func_mta_filter_cmd
*)desc
.data
;
3965 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MTA_MAC_FUNC_CFG
, false);
3967 hnae_set_bit(req
->accept
, HCLGE_CFG_FUNC_MTA_ACCEPT_B
,
3969 req
->function_id
= func_id
;
3971 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3973 dev_err(&hdev
->pdev
->dev
,
3974 "Config func_id enable failed for cmd_send, ret =%d.\n",
3982 static int hclge_set_mta_table_item(struct hclge_vport
*vport
,
3986 struct hclge_dev
*hdev
= vport
->back
;
3987 struct hclge_cfg_func_mta_item_cmd
*req
;
3988 struct hclge_desc desc
;
3992 req
= (struct hclge_cfg_func_mta_item_cmd
*)desc
.data
;
3993 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MTA_TBL_ITEM_CFG
, false);
3994 hnae_set_bit(req
->accept
, HCLGE_CFG_MTA_ITEM_ACCEPT_B
, enable
);
3996 hnae_set_field(item_idx
, HCLGE_CFG_MTA_ITEM_IDX_M
,
3997 HCLGE_CFG_MTA_ITEM_IDX_S
, idx
);
3998 req
->item_idx
= cpu_to_le16(item_idx
);
4000 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4002 dev_err(&hdev
->pdev
->dev
,
4003 "Config mta table item failed for cmd_send, ret =%d.\n",
4011 static int hclge_remove_mac_vlan_tbl(struct hclge_vport
*vport
,
4012 struct hclge_mac_vlan_tbl_entry_cmd
*req
)
4014 struct hclge_dev
*hdev
= vport
->back
;
4015 struct hclge_desc desc
;
4020 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_REMOVE
, false);
4022 memcpy(desc
.data
, req
, sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
4024 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4026 dev_err(&hdev
->pdev
->dev
,
4027 "del mac addr failed for cmd_send, ret =%d.\n",
4031 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
4032 retval
= le16_to_cpu(desc
.retval
);
4034 return hclge_get_mac_vlan_cmd_status(vport
, retval
, resp_code
,
4035 HCLGE_MAC_VLAN_REMOVE
);
4038 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport
*vport
,
4039 struct hclge_mac_vlan_tbl_entry_cmd
*req
,
4040 struct hclge_desc
*desc
,
4043 struct hclge_dev
*hdev
= vport
->back
;
4048 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_MAC_VLAN_ADD
, true);
4050 desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
4051 memcpy(desc
[0].data
,
4053 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
4054 hclge_cmd_setup_basic_desc(&desc
[1],
4055 HCLGE_OPC_MAC_VLAN_ADD
,
4057 desc
[1].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
4058 hclge_cmd_setup_basic_desc(&desc
[2],
4059 HCLGE_OPC_MAC_VLAN_ADD
,
4061 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 3);
4063 memcpy(desc
[0].data
,
4065 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
4066 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
4069 dev_err(&hdev
->pdev
->dev
,
4070 "lookup mac addr failed for cmd_send, ret =%d.\n",
4074 resp_code
= (le32_to_cpu(desc
[0].data
[0]) >> 8) & 0xff;
4075 retval
= le16_to_cpu(desc
[0].retval
);
4077 return hclge_get_mac_vlan_cmd_status(vport
, retval
, resp_code
,
4078 HCLGE_MAC_VLAN_LKUP
);
4081 static int hclge_add_mac_vlan_tbl(struct hclge_vport
*vport
,
4082 struct hclge_mac_vlan_tbl_entry_cmd
*req
,
4083 struct hclge_desc
*mc_desc
)
4085 struct hclge_dev
*hdev
= vport
->back
;
4092 struct hclge_desc desc
;
4094 hclge_cmd_setup_basic_desc(&desc
,
4095 HCLGE_OPC_MAC_VLAN_ADD
,
4097 memcpy(desc
.data
, req
,
4098 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
4099 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4100 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
4101 retval
= le16_to_cpu(desc
.retval
);
4103 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, retval
,
4105 HCLGE_MAC_VLAN_ADD
);
4107 hclge_cmd_reuse_desc(&mc_desc
[0], false);
4108 mc_desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
4109 hclge_cmd_reuse_desc(&mc_desc
[1], false);
4110 mc_desc
[1].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
4111 hclge_cmd_reuse_desc(&mc_desc
[2], false);
4112 mc_desc
[2].flag
&= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT
);
4113 memcpy(mc_desc
[0].data
, req
,
4114 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
4115 ret
= hclge_cmd_send(&hdev
->hw
, mc_desc
, 3);
4116 resp_code
= (le32_to_cpu(mc_desc
[0].data
[0]) >> 8) & 0xff;
4117 retval
= le16_to_cpu(mc_desc
[0].retval
);
4119 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, retval
,
4121 HCLGE_MAC_VLAN_ADD
);
4125 dev_err(&hdev
->pdev
->dev
,
4126 "add mac addr failed for cmd_send, ret =%d.\n",
4134 static int hclge_add_uc_addr(struct hnae3_handle
*handle
,
4135 const unsigned char *addr
)
4137 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4139 return hclge_add_uc_addr_common(vport
, addr
);
4142 int hclge_add_uc_addr_common(struct hclge_vport
*vport
,
4143 const unsigned char *addr
)
4145 struct hclge_dev
*hdev
= vport
->back
;
4146 struct hclge_mac_vlan_tbl_entry_cmd req
;
4147 struct hclge_desc desc
;
4148 u16 egress_port
= 0;
4151 /* mac addr check */
4152 if (is_zero_ether_addr(addr
) ||
4153 is_broadcast_ether_addr(addr
) ||
4154 is_multicast_ether_addr(addr
)) {
4155 dev_err(&hdev
->pdev
->dev
,
4156 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
4158 is_zero_ether_addr(addr
),
4159 is_broadcast_ether_addr(addr
),
4160 is_multicast_ether_addr(addr
));
4164 memset(&req
, 0, sizeof(req
));
4165 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
4166 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4167 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 0);
4168 hnae_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4170 hnae_set_bit(egress_port
, HCLGE_MAC_EPORT_SW_EN_B
, 0);
4171 hnae_set_bit(egress_port
, HCLGE_MAC_EPORT_TYPE_B
, 0);
4172 hnae_set_field(egress_port
, HCLGE_MAC_EPORT_VFID_M
,
4173 HCLGE_MAC_EPORT_VFID_S
, vport
->vport_id
);
4174 hnae_set_field(egress_port
, HCLGE_MAC_EPORT_PFID_M
,
4175 HCLGE_MAC_EPORT_PFID_S
, 0);
4177 req
.egress_port
= cpu_to_le16(egress_port
);
4179 hclge_prepare_mac_addr(&req
, addr
);
4181 /* Lookup the mac address in the mac_vlan table, and add
4182 * it if the entry is inexistent. Repeated unicast entry
4183 * is not allowed in the mac vlan table.
4185 ret
= hclge_lookup_mac_vlan_tbl(vport
, &req
, &desc
, false);
4187 return hclge_add_mac_vlan_tbl(vport
, &req
, NULL
);
4189 /* check if we just hit the duplicate */
4193 dev_err(&hdev
->pdev
->dev
,
4194 "PF failed to add unicast entry(%pM) in the MAC table\n",
4200 static int hclge_rm_uc_addr(struct hnae3_handle
*handle
,
4201 const unsigned char *addr
)
4203 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4205 return hclge_rm_uc_addr_common(vport
, addr
);
4208 int hclge_rm_uc_addr_common(struct hclge_vport
*vport
,
4209 const unsigned char *addr
)
4211 struct hclge_dev
*hdev
= vport
->back
;
4212 struct hclge_mac_vlan_tbl_entry_cmd req
;
4215 /* mac addr check */
4216 if (is_zero_ether_addr(addr
) ||
4217 is_broadcast_ether_addr(addr
) ||
4218 is_multicast_ether_addr(addr
)) {
4219 dev_dbg(&hdev
->pdev
->dev
,
4220 "Remove mac err! invalid mac:%pM.\n",
4225 memset(&req
, 0, sizeof(req
));
4226 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
4227 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4228 hclge_prepare_mac_addr(&req
, addr
);
4229 ret
= hclge_remove_mac_vlan_tbl(vport
, &req
);
4234 static int hclge_add_mc_addr(struct hnae3_handle
*handle
,
4235 const unsigned char *addr
)
4237 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4239 return hclge_add_mc_addr_common(vport
, addr
);
4242 int hclge_add_mc_addr_common(struct hclge_vport
*vport
,
4243 const unsigned char *addr
)
4245 struct hclge_dev
*hdev
= vport
->back
;
4246 struct hclge_mac_vlan_tbl_entry_cmd req
;
4247 struct hclge_desc desc
[3];
4251 /* mac addr check */
4252 if (!is_multicast_ether_addr(addr
)) {
4253 dev_err(&hdev
->pdev
->dev
,
4254 "Add mc mac err! invalid mac:%pM.\n",
4258 memset(&req
, 0, sizeof(req
));
4259 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
4260 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4261 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 1);
4262 hnae_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4263 hclge_prepare_mac_addr(&req
, addr
);
4264 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
4266 /* This mac addr exist, update VFID for it */
4267 hclge_update_desc_vfid(desc
, vport
->vport_id
, false);
4268 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
4270 /* This mac addr do not exist, add new entry for it */
4271 memset(desc
[0].data
, 0, sizeof(desc
[0].data
));
4272 memset(desc
[1].data
, 0, sizeof(desc
[0].data
));
4273 memset(desc
[2].data
, 0, sizeof(desc
[0].data
));
4274 hclge_update_desc_vfid(desc
, vport
->vport_id
, false);
4275 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
4278 /* Set MTA table for this MAC address */
4279 tbl_idx
= hclge_get_mac_addr_to_mta_index(vport
, addr
);
4280 status
= hclge_set_mta_table_item(vport
, tbl_idx
, true);
4285 static int hclge_rm_mc_addr(struct hnae3_handle
*handle
,
4286 const unsigned char *addr
)
4288 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4290 return hclge_rm_mc_addr_common(vport
, addr
);
4293 int hclge_rm_mc_addr_common(struct hclge_vport
*vport
,
4294 const unsigned char *addr
)
4296 struct hclge_dev
*hdev
= vport
->back
;
4297 struct hclge_mac_vlan_tbl_entry_cmd req
;
4298 enum hclge_cmd_status status
;
4299 struct hclge_desc desc
[3];
4302 /* mac addr check */
4303 if (!is_multicast_ether_addr(addr
)) {
4304 dev_dbg(&hdev
->pdev
->dev
,
4305 "Remove mc mac err! invalid mac:%pM.\n",
4310 memset(&req
, 0, sizeof(req
));
4311 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
4312 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4313 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 1);
4314 hnae_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4315 hclge_prepare_mac_addr(&req
, addr
);
4316 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
4318 /* This mac addr exist, remove this handle's VFID for it */
4319 hclge_update_desc_vfid(desc
, vport
->vport_id
, true);
4321 if (hclge_is_all_function_id_zero(desc
))
4322 /* All the vfid is zero, so need to delete this entry */
4323 status
= hclge_remove_mac_vlan_tbl(vport
, &req
);
4325 /* Not all the vfid is zero, update the vfid */
4326 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
4329 /* This mac addr do not exist, can't delete it */
4330 dev_err(&hdev
->pdev
->dev
,
4331 "Rm multicast mac addr failed, ret = %d.\n",
4336 /* Set MTB table for this MAC address */
4337 tbl_idx
= hclge_get_mac_addr_to_mta_index(vport
, addr
);
4338 status
= hclge_set_mta_table_item(vport
, tbl_idx
, false);
4343 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev
*hdev
,
4344 u16 cmdq_resp
, u8 resp_code
)
4346 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
4347 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
4348 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
4349 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
4354 dev_err(&hdev
->pdev
->dev
,
4355 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
4360 switch (resp_code
) {
4361 case HCLGE_ETHERTYPE_SUCCESS_ADD
:
4362 case HCLGE_ETHERTYPE_ALREADY_ADD
:
4365 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW
:
4366 dev_err(&hdev
->pdev
->dev
,
4367 "add mac ethertype failed for manager table overflow.\n");
4368 return_status
= -EIO
;
4370 case HCLGE_ETHERTYPE_KEY_CONFLICT
:
4371 dev_err(&hdev
->pdev
->dev
,
4372 "add mac ethertype failed for key conflict.\n");
4373 return_status
= -EIO
;
4376 dev_err(&hdev
->pdev
->dev
,
4377 "add mac ethertype failed for undefined, code=%d.\n",
4379 return_status
= -EIO
;
4382 return return_status
;
4385 static int hclge_add_mgr_tbl(struct hclge_dev
*hdev
,
4386 const struct hclge_mac_mgr_tbl_entry_cmd
*req
)
4388 struct hclge_desc desc
;
4393 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_ETHTYPE_ADD
, false);
4394 memcpy(desc
.data
, req
, sizeof(struct hclge_mac_mgr_tbl_entry_cmd
));
4396 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4398 dev_err(&hdev
->pdev
->dev
,
4399 "add mac ethertype failed for cmd_send, ret =%d.\n",
4404 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
4405 retval
= le16_to_cpu(desc
.retval
);
4407 return hclge_get_mac_ethertype_cmd_status(hdev
, retval
, resp_code
);
4410 static int init_mgr_tbl(struct hclge_dev
*hdev
)
4415 for (i
= 0; i
< ARRAY_SIZE(hclge_mgr_table
); i
++) {
4416 ret
= hclge_add_mgr_tbl(hdev
, &hclge_mgr_table
[i
]);
4418 dev_err(&hdev
->pdev
->dev
,
4419 "add mac ethertype failed, ret =%d.\n",
4428 static void hclge_get_mac_addr(struct hnae3_handle
*handle
, u8
*p
)
4430 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4431 struct hclge_dev
*hdev
= vport
->back
;
4433 ether_addr_copy(p
, hdev
->hw
.mac
.mac_addr
);
4436 static int hclge_set_mac_addr(struct hnae3_handle
*handle
, void *p
,
4439 const unsigned char *new_addr
= (const unsigned char *)p
;
4440 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4441 struct hclge_dev
*hdev
= vport
->back
;
4444 /* mac addr check */
4445 if (is_zero_ether_addr(new_addr
) ||
4446 is_broadcast_ether_addr(new_addr
) ||
4447 is_multicast_ether_addr(new_addr
)) {
4448 dev_err(&hdev
->pdev
->dev
,
4449 "Change uc mac err! invalid mac:%p.\n",
4454 if (!is_first
&& hclge_rm_uc_addr(handle
, hdev
->hw
.mac
.mac_addr
))
4455 dev_warn(&hdev
->pdev
->dev
,
4456 "remove old uc mac address fail.\n");
4458 ret
= hclge_add_uc_addr(handle
, new_addr
);
4460 dev_err(&hdev
->pdev
->dev
,
4461 "add uc mac address fail, ret =%d.\n",
4465 hclge_add_uc_addr(handle
, hdev
->hw
.mac
.mac_addr
))
4466 dev_err(&hdev
->pdev
->dev
,
4467 "restore uc mac address fail.\n");
4472 ret
= hclge_pause_addr_cfg(hdev
, new_addr
);
4474 dev_err(&hdev
->pdev
->dev
,
4475 "configure mac pause address fail, ret =%d.\n",
4480 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, new_addr
);
4485 static int hclge_set_vlan_filter_ctrl(struct hclge_dev
*hdev
, u8 vlan_type
,
4488 struct hclge_vlan_filter_ctrl_cmd
*req
;
4489 struct hclge_desc desc
;
4492 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_CTRL
, false);
4494 req
= (struct hclge_vlan_filter_ctrl_cmd
*)desc
.data
;
4495 req
->vlan_type
= vlan_type
;
4496 req
->vlan_fe
= filter_en
;
4498 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4500 dev_err(&hdev
->pdev
->dev
, "set vlan filter fail, ret =%d.\n",
4508 #define HCLGE_FILTER_TYPE_VF 0
4509 #define HCLGE_FILTER_TYPE_PORT 1
4511 static void hclge_enable_vlan_filter(struct hnae3_handle
*handle
, bool enable
)
4513 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4514 struct hclge_dev
*hdev
= vport
->back
;
4516 hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
, enable
);
4519 static int hclge_set_vf_vlan_common(struct hclge_dev
*hdev
, int vfid
,
4520 bool is_kill
, u16 vlan
, u8 qos
,
4523 #define HCLGE_MAX_VF_BYTES 16
4524 struct hclge_vlan_filter_vf_cfg_cmd
*req0
;
4525 struct hclge_vlan_filter_vf_cfg_cmd
*req1
;
4526 struct hclge_desc desc
[2];
4531 hclge_cmd_setup_basic_desc(&desc
[0],
4532 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
4533 hclge_cmd_setup_basic_desc(&desc
[1],
4534 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
4536 desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
4538 vf_byte_off
= vfid
/ 8;
4539 vf_byte_val
= 1 << (vfid
% 8);
4541 req0
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[0].data
;
4542 req1
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[1].data
;
4544 req0
->vlan_id
= cpu_to_le16(vlan
);
4545 req0
->vlan_cfg
= is_kill
;
4547 if (vf_byte_off
< HCLGE_MAX_VF_BYTES
)
4548 req0
->vf_bitmap
[vf_byte_off
] = vf_byte_val
;
4550 req1
->vf_bitmap
[vf_byte_off
- HCLGE_MAX_VF_BYTES
] = vf_byte_val
;
4552 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
4554 dev_err(&hdev
->pdev
->dev
,
4555 "Send vf vlan command fail, ret =%d.\n",
4561 if (!req0
->resp_code
|| req0
->resp_code
== 1)
4564 dev_err(&hdev
->pdev
->dev
,
4565 "Add vf vlan filter fail, ret =%d.\n",
4568 if (!req0
->resp_code
)
4571 dev_err(&hdev
->pdev
->dev
,
4572 "Kill vf vlan filter fail, ret =%d.\n",
4579 static int hclge_set_port_vlan_filter(struct hclge_dev
*hdev
, __be16 proto
,
4580 u16 vlan_id
, bool is_kill
)
4582 struct hclge_vlan_filter_pf_cfg_cmd
*req
;
4583 struct hclge_desc desc
;
4584 u8 vlan_offset_byte_val
;
4585 u8 vlan_offset_byte
;
4589 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_PF_CFG
, false);
4591 vlan_offset_160
= vlan_id
/ 160;
4592 vlan_offset_byte
= (vlan_id
% 160) / 8;
4593 vlan_offset_byte_val
= 1 << (vlan_id
% 8);
4595 req
= (struct hclge_vlan_filter_pf_cfg_cmd
*)desc
.data
;
4596 req
->vlan_offset
= vlan_offset_160
;
4597 req
->vlan_cfg
= is_kill
;
4598 req
->vlan_offset_bitmap
[vlan_offset_byte
] = vlan_offset_byte_val
;
4600 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4602 dev_err(&hdev
->pdev
->dev
,
4603 "port vlan command, send fail, ret =%d.\n", ret
);
4607 static int hclge_set_vlan_filter_hw(struct hclge_dev
*hdev
, __be16 proto
,
4608 u16 vport_id
, u16 vlan_id
, u8 qos
,
4611 u16 vport_idx
, vport_num
= 0;
4614 ret
= hclge_set_vf_vlan_common(hdev
, vport_id
, is_kill
, vlan_id
,
4617 dev_err(&hdev
->pdev
->dev
,
4618 "Set %d vport vlan filter config fail, ret =%d.\n",
4623 /* vlan 0 may be added twice when 8021q module is enabled */
4624 if (!is_kill
&& !vlan_id
&&
4625 test_bit(vport_id
, hdev
->vlan_table
[vlan_id
]))
4628 if (!is_kill
&& test_and_set_bit(vport_id
, hdev
->vlan_table
[vlan_id
])) {
4629 dev_err(&hdev
->pdev
->dev
,
4630 "Add port vlan failed, vport %d is already in vlan %d\n",
4636 !test_and_clear_bit(vport_id
, hdev
->vlan_table
[vlan_id
])) {
4637 dev_err(&hdev
->pdev
->dev
,
4638 "Delete port vlan failed, vport %d is not in vlan %d\n",
4643 for_each_set_bit(vport_idx
, hdev
->vlan_table
[vlan_id
], VLAN_N_VID
)
4646 if ((is_kill
&& vport_num
== 0) || (!is_kill
&& vport_num
== 1))
4647 ret
= hclge_set_port_vlan_filter(hdev
, proto
, vlan_id
,
4653 int hclge_set_vlan_filter(struct hnae3_handle
*handle
, __be16 proto
,
4654 u16 vlan_id
, bool is_kill
)
4656 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4657 struct hclge_dev
*hdev
= vport
->back
;
4659 return hclge_set_vlan_filter_hw(hdev
, proto
, vport
->vport_id
, vlan_id
,
4663 static int hclge_set_vf_vlan_filter(struct hnae3_handle
*handle
, int vfid
,
4664 u16 vlan
, u8 qos
, __be16 proto
)
4666 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4667 struct hclge_dev
*hdev
= vport
->back
;
4669 if ((vfid
>= hdev
->num_alloc_vfs
) || (vlan
> 4095) || (qos
> 7))
4671 if (proto
!= htons(ETH_P_8021Q
))
4672 return -EPROTONOSUPPORT
;
4674 return hclge_set_vlan_filter_hw(hdev
, proto
, vfid
, vlan
, qos
, false);
4677 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport
*vport
)
4679 struct hclge_tx_vtag_cfg
*vcfg
= &vport
->txvlan_cfg
;
4680 struct hclge_vport_vtag_tx_cfg_cmd
*req
;
4681 struct hclge_dev
*hdev
= vport
->back
;
4682 struct hclge_desc desc
;
4685 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_PORT_TX_CFG
, false);
4687 req
= (struct hclge_vport_vtag_tx_cfg_cmd
*)desc
.data
;
4688 req
->def_vlan_tag1
= cpu_to_le16(vcfg
->default_tag1
);
4689 req
->def_vlan_tag2
= cpu_to_le16(vcfg
->default_tag2
);
4690 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_TAG1_B
,
4691 vcfg
->accept_tag1
? 1 : 0);
4692 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_UNTAG1_B
,
4693 vcfg
->accept_untag1
? 1 : 0);
4694 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_TAG2_B
,
4695 vcfg
->accept_tag2
? 1 : 0);
4696 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_UNTAG2_B
,
4697 vcfg
->accept_untag2
? 1 : 0);
4698 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_PORT_INS_TAG1_EN_B
,
4699 vcfg
->insert_tag1_en
? 1 : 0);
4700 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_PORT_INS_TAG2_EN_B
,
4701 vcfg
->insert_tag2_en
? 1 : 0);
4702 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_CFG_NIC_ROCE_SEL_B
, 0);
4704 req
->vf_offset
= vport
->vport_id
/ HCLGE_VF_NUM_PER_CMD
;
4705 req
->vf_bitmap
[req
->vf_offset
] =
4706 1 << (vport
->vport_id
% HCLGE_VF_NUM_PER_BYTE
);
4708 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4710 dev_err(&hdev
->pdev
->dev
,
4711 "Send port txvlan cfg command fail, ret =%d\n",
4717 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport
*vport
)
4719 struct hclge_rx_vtag_cfg
*vcfg
= &vport
->rxvlan_cfg
;
4720 struct hclge_vport_vtag_rx_cfg_cmd
*req
;
4721 struct hclge_dev
*hdev
= vport
->back
;
4722 struct hclge_desc desc
;
4725 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_PORT_RX_CFG
, false);
4727 req
= (struct hclge_vport_vtag_rx_cfg_cmd
*)desc
.data
;
4728 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_REM_TAG1_EN_B
,
4729 vcfg
->strip_tag1_en
? 1 : 0);
4730 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_REM_TAG2_EN_B
,
4731 vcfg
->strip_tag2_en
? 1 : 0);
4732 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_SHOW_TAG1_EN_B
,
4733 vcfg
->vlan1_vlan_prionly
? 1 : 0);
4734 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_SHOW_TAG2_EN_B
,
4735 vcfg
->vlan2_vlan_prionly
? 1 : 0);
4737 req
->vf_offset
= vport
->vport_id
/ HCLGE_VF_NUM_PER_CMD
;
4738 req
->vf_bitmap
[req
->vf_offset
] =
4739 1 << (vport
->vport_id
% HCLGE_VF_NUM_PER_BYTE
);
4741 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4743 dev_err(&hdev
->pdev
->dev
,
4744 "Send port rxvlan cfg command fail, ret =%d\n",
4750 static int hclge_set_vlan_protocol_type(struct hclge_dev
*hdev
)
4752 struct hclge_rx_vlan_type_cfg_cmd
*rx_req
;
4753 struct hclge_tx_vlan_type_cfg_cmd
*tx_req
;
4754 struct hclge_desc desc
;
4757 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_TYPE_ID
, false);
4758 rx_req
= (struct hclge_rx_vlan_type_cfg_cmd
*)desc
.data
;
4759 rx_req
->ot_fst_vlan_type
=
4760 cpu_to_le16(hdev
->vlan_type_cfg
.rx_ot_fst_vlan_type
);
4761 rx_req
->ot_sec_vlan_type
=
4762 cpu_to_le16(hdev
->vlan_type_cfg
.rx_ot_sec_vlan_type
);
4763 rx_req
->in_fst_vlan_type
=
4764 cpu_to_le16(hdev
->vlan_type_cfg
.rx_in_fst_vlan_type
);
4765 rx_req
->in_sec_vlan_type
=
4766 cpu_to_le16(hdev
->vlan_type_cfg
.rx_in_sec_vlan_type
);
4768 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4770 dev_err(&hdev
->pdev
->dev
,
4771 "Send rxvlan protocol type command fail, ret =%d\n",
4776 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_INSERT
, false);
4778 tx_req
= (struct hclge_tx_vlan_type_cfg_cmd
*)&desc
.data
;
4779 tx_req
->ot_vlan_type
= cpu_to_le16(hdev
->vlan_type_cfg
.tx_ot_vlan_type
);
4780 tx_req
->in_vlan_type
= cpu_to_le16(hdev
->vlan_type_cfg
.tx_in_vlan_type
);
4782 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4784 dev_err(&hdev
->pdev
->dev
,
4785 "Send txvlan protocol type command fail, ret =%d\n",
4791 static int hclge_init_vlan_config(struct hclge_dev
*hdev
)
4793 #define HCLGE_DEF_VLAN_TYPE 0x8100
4795 struct hnae3_handle
*handle
;
4796 struct hclge_vport
*vport
;
4800 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
, true);
4804 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_PORT
, true);
4808 hdev
->vlan_type_cfg
.rx_in_fst_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
4809 hdev
->vlan_type_cfg
.rx_in_sec_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
4810 hdev
->vlan_type_cfg
.rx_ot_fst_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
4811 hdev
->vlan_type_cfg
.rx_ot_sec_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
4812 hdev
->vlan_type_cfg
.tx_ot_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
4813 hdev
->vlan_type_cfg
.tx_in_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
4815 ret
= hclge_set_vlan_protocol_type(hdev
);
4819 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
4820 vport
= &hdev
->vport
[i
];
4821 vport
->txvlan_cfg
.accept_tag1
= true;
4822 vport
->txvlan_cfg
.accept_untag1
= true;
4824 /* accept_tag2 and accept_untag2 are not supported on
4825 * pdev revision(0x20), new revision support them. The
4826 * value of this two fields will not return error when driver
4827 * send command to fireware in revision(0x20).
4828 * This two fields can not configured by user.
4830 vport
->txvlan_cfg
.accept_tag2
= true;
4831 vport
->txvlan_cfg
.accept_untag2
= true;
4833 vport
->txvlan_cfg
.insert_tag1_en
= false;
4834 vport
->txvlan_cfg
.insert_tag2_en
= false;
4835 vport
->txvlan_cfg
.default_tag1
= 0;
4836 vport
->txvlan_cfg
.default_tag2
= 0;
4838 ret
= hclge_set_vlan_tx_offload_cfg(vport
);
4842 vport
->rxvlan_cfg
.strip_tag1_en
= false;
4843 vport
->rxvlan_cfg
.strip_tag2_en
= true;
4844 vport
->rxvlan_cfg
.vlan1_vlan_prionly
= false;
4845 vport
->rxvlan_cfg
.vlan2_vlan_prionly
= false;
4847 ret
= hclge_set_vlan_rx_offload_cfg(vport
);
4852 handle
= &hdev
->vport
[0].nic
;
4853 return hclge_set_vlan_filter(handle
, htons(ETH_P_8021Q
), 0, false);
4856 int hclge_en_hw_strip_rxvtag(struct hnae3_handle
*handle
, bool enable
)
4858 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4860 vport
->rxvlan_cfg
.strip_tag1_en
= false;
4861 vport
->rxvlan_cfg
.strip_tag2_en
= enable
;
4862 vport
->rxvlan_cfg
.vlan1_vlan_prionly
= false;
4863 vport
->rxvlan_cfg
.vlan2_vlan_prionly
= false;
4865 return hclge_set_vlan_rx_offload_cfg(vport
);
4868 static int hclge_set_mac_mtu(struct hclge_dev
*hdev
, int new_mtu
)
4870 struct hclge_config_max_frm_size_cmd
*req
;
4871 struct hclge_desc desc
;
4875 max_frm_size
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
4877 if (max_frm_size
< HCLGE_MAC_MIN_FRAME
||
4878 max_frm_size
> HCLGE_MAC_MAX_FRAME
)
4881 max_frm_size
= max(max_frm_size
, HCLGE_MAC_DEFAULT_FRAME
);
4883 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAX_FRM_SIZE
, false);
4885 req
= (struct hclge_config_max_frm_size_cmd
*)desc
.data
;
4886 req
->max_frm_size
= cpu_to_le16(max_frm_size
);
4888 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4890 dev_err(&hdev
->pdev
->dev
, "set mtu fail, ret =%d.\n", ret
);
4894 hdev
->mps
= max_frm_size
;
4899 static int hclge_set_mtu(struct hnae3_handle
*handle
, int new_mtu
)
4901 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4902 struct hclge_dev
*hdev
= vport
->back
;
4905 ret
= hclge_set_mac_mtu(hdev
, new_mtu
);
4907 dev_err(&hdev
->pdev
->dev
,
4908 "Change mtu fail, ret =%d\n", ret
);
4912 ret
= hclge_buffer_alloc(hdev
);
4914 dev_err(&hdev
->pdev
->dev
,
4915 "Allocate buffer fail, ret =%d\n", ret
);
4920 static int hclge_send_reset_tqp_cmd(struct hclge_dev
*hdev
, u16 queue_id
,
4923 struct hclge_reset_tqp_queue_cmd
*req
;
4924 struct hclge_desc desc
;
4927 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, false);
4929 req
= (struct hclge_reset_tqp_queue_cmd
*)desc
.data
;
4930 req
->tqp_id
= cpu_to_le16(queue_id
& HCLGE_RING_ID_MASK
);
4931 hnae_set_bit(req
->reset_req
, HCLGE_TQP_RESET_B
, enable
);
4933 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4935 dev_err(&hdev
->pdev
->dev
,
4936 "Send tqp reset cmd error, status =%d\n", ret
);
4943 static int hclge_get_reset_status(struct hclge_dev
*hdev
, u16 queue_id
)
4945 struct hclge_reset_tqp_queue_cmd
*req
;
4946 struct hclge_desc desc
;
4949 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, true);
4951 req
= (struct hclge_reset_tqp_queue_cmd
*)desc
.data
;
4952 req
->tqp_id
= cpu_to_le16(queue_id
& HCLGE_RING_ID_MASK
);
4954 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4956 dev_err(&hdev
->pdev
->dev
,
4957 "Get reset status error, status =%d\n", ret
);
4961 return hnae_get_bit(req
->ready_to_reset
, HCLGE_TQP_RESET_B
);
4964 static u16
hclge_covert_handle_qid_global(struct hnae3_handle
*handle
,
4967 struct hnae3_queue
*queue
;
4968 struct hclge_tqp
*tqp
;
4970 queue
= handle
->kinfo
.tqp
[queue_id
];
4971 tqp
= container_of(queue
, struct hclge_tqp
, q
);
4976 void hclge_reset_tqp(struct hnae3_handle
*handle
, u16 queue_id
)
4978 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4979 struct hclge_dev
*hdev
= vport
->back
;
4980 int reset_try_times
= 0;
4985 if (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
4988 queue_gid
= hclge_covert_handle_qid_global(handle
, queue_id
);
4990 ret
= hclge_tqp_enable(hdev
, queue_id
, 0, false);
4992 dev_warn(&hdev
->pdev
->dev
, "Disable tqp fail, ret = %d\n", ret
);
4996 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_gid
, true);
4998 dev_warn(&hdev
->pdev
->dev
,
4999 "Send reset tqp cmd fail, ret = %d\n", ret
);
5003 reset_try_times
= 0;
5004 while (reset_try_times
++ < HCLGE_TQP_RESET_TRY_TIMES
) {
5005 /* Wait for tqp hw reset */
5007 reset_status
= hclge_get_reset_status(hdev
, queue_gid
);
5012 if (reset_try_times
>= HCLGE_TQP_RESET_TRY_TIMES
) {
5013 dev_warn(&hdev
->pdev
->dev
, "Reset TQP fail\n");
5017 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_gid
, false);
5019 dev_warn(&hdev
->pdev
->dev
,
5020 "Deassert the soft reset fail, ret = %d\n", ret
);
5025 void hclge_reset_vf_queue(struct hclge_vport
*vport
, u16 queue_id
)
5027 struct hclge_dev
*hdev
= vport
->back
;
5028 int reset_try_times
= 0;
5033 queue_gid
= hclge_covert_handle_qid_global(&vport
->nic
, queue_id
);
5035 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_gid
, true);
5037 dev_warn(&hdev
->pdev
->dev
,
5038 "Send reset tqp cmd fail, ret = %d\n", ret
);
5042 reset_try_times
= 0;
5043 while (reset_try_times
++ < HCLGE_TQP_RESET_TRY_TIMES
) {
5044 /* Wait for tqp hw reset */
5046 reset_status
= hclge_get_reset_status(hdev
, queue_gid
);
5051 if (reset_try_times
>= HCLGE_TQP_RESET_TRY_TIMES
) {
5052 dev_warn(&hdev
->pdev
->dev
, "Reset TQP fail\n");
5056 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_gid
, false);
5058 dev_warn(&hdev
->pdev
->dev
,
5059 "Deassert the soft reset fail, ret = %d\n", ret
);
5062 static u32
hclge_get_fw_version(struct hnae3_handle
*handle
)
5064 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5065 struct hclge_dev
*hdev
= vport
->back
;
5067 return hdev
->fw_version
;
5070 static void hclge_get_flowctrl_adv(struct hnae3_handle
*handle
,
5073 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5074 struct hclge_dev
*hdev
= vport
->back
;
5075 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
5080 *flowctrl_adv
|= (phydev
->advertising
& ADVERTISED_Pause
) |
5081 (phydev
->advertising
& ADVERTISED_Asym_Pause
);
5084 static void hclge_set_flowctrl_adv(struct hclge_dev
*hdev
, u32 rx_en
, u32 tx_en
)
5086 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
5091 phydev
->advertising
&= ~(ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
5094 phydev
->advertising
|= ADVERTISED_Pause
| ADVERTISED_Asym_Pause
;
5097 phydev
->advertising
^= ADVERTISED_Asym_Pause
;
5100 static int hclge_cfg_pauseparam(struct hclge_dev
*hdev
, u32 rx_en
, u32 tx_en
)
5105 hdev
->fc_mode_last_time
= HCLGE_FC_FULL
;
5106 else if (rx_en
&& !tx_en
)
5107 hdev
->fc_mode_last_time
= HCLGE_FC_RX_PAUSE
;
5108 else if (!rx_en
&& tx_en
)
5109 hdev
->fc_mode_last_time
= HCLGE_FC_TX_PAUSE
;
5111 hdev
->fc_mode_last_time
= HCLGE_FC_NONE
;
5113 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
)
5116 ret
= hclge_mac_pause_en_cfg(hdev
, tx_en
, rx_en
);
5118 dev_err(&hdev
->pdev
->dev
, "configure pauseparam error, ret = %d.\n",
5123 hdev
->tm_info
.fc_mode
= hdev
->fc_mode_last_time
;
5128 int hclge_cfg_flowctrl(struct hclge_dev
*hdev
)
5130 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
5131 u16 remote_advertising
= 0;
5132 u16 local_advertising
= 0;
5133 u32 rx_pause
, tx_pause
;
5136 if (!phydev
->link
|| !phydev
->autoneg
)
5139 if (phydev
->advertising
& ADVERTISED_Pause
)
5140 local_advertising
= ADVERTISE_PAUSE_CAP
;
5142 if (phydev
->advertising
& ADVERTISED_Asym_Pause
)
5143 local_advertising
|= ADVERTISE_PAUSE_ASYM
;
5146 remote_advertising
= LPA_PAUSE_CAP
;
5148 if (phydev
->asym_pause
)
5149 remote_advertising
|= LPA_PAUSE_ASYM
;
5151 flowctl
= mii_resolve_flowctrl_fdx(local_advertising
,
5152 remote_advertising
);
5153 tx_pause
= flowctl
& FLOW_CTRL_TX
;
5154 rx_pause
= flowctl
& FLOW_CTRL_RX
;
5156 if (phydev
->duplex
== HCLGE_MAC_HALF
) {
5161 return hclge_cfg_pauseparam(hdev
, rx_pause
, tx_pause
);
5164 static void hclge_get_pauseparam(struct hnae3_handle
*handle
, u32
*auto_neg
,
5165 u32
*rx_en
, u32
*tx_en
)
5167 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5168 struct hclge_dev
*hdev
= vport
->back
;
5170 *auto_neg
= hclge_get_autoneg(handle
);
5172 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
) {
5178 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_RX_PAUSE
) {
5181 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_TX_PAUSE
) {
5184 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_FULL
) {
5193 static int hclge_set_pauseparam(struct hnae3_handle
*handle
, u32 auto_neg
,
5194 u32 rx_en
, u32 tx_en
)
5196 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5197 struct hclge_dev
*hdev
= vport
->back
;
5198 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
5201 fc_autoneg
= hclge_get_autoneg(handle
);
5202 if (auto_neg
!= fc_autoneg
) {
5203 dev_info(&hdev
->pdev
->dev
,
5204 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
5208 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
) {
5209 dev_info(&hdev
->pdev
->dev
,
5210 "Priority flow control enabled. Cannot set link flow control.\n");
5214 hclge_set_flowctrl_adv(hdev
, rx_en
, tx_en
);
5217 return hclge_cfg_pauseparam(hdev
, rx_en
, tx_en
);
5219 /* Only support flow control negotiation for netdev with
5220 * phy attached for now.
5225 return phy_start_aneg(phydev
);
5228 static void hclge_get_ksettings_an_result(struct hnae3_handle
*handle
,
5229 u8
*auto_neg
, u32
*speed
, u8
*duplex
)
5231 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5232 struct hclge_dev
*hdev
= vport
->back
;
5235 *speed
= hdev
->hw
.mac
.speed
;
5237 *duplex
= hdev
->hw
.mac
.duplex
;
5239 *auto_neg
= hdev
->hw
.mac
.autoneg
;
5242 static void hclge_get_media_type(struct hnae3_handle
*handle
, u8
*media_type
)
5244 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5245 struct hclge_dev
*hdev
= vport
->back
;
5248 *media_type
= hdev
->hw
.mac
.media_type
;
5251 static void hclge_get_mdix_mode(struct hnae3_handle
*handle
,
5252 u8
*tp_mdix_ctrl
, u8
*tp_mdix
)
5254 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5255 struct hclge_dev
*hdev
= vport
->back
;
5256 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
5257 int mdix_ctrl
, mdix
, retval
, is_resolved
;
5260 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
5261 *tp_mdix
= ETH_TP_MDI_INVALID
;
5265 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_MDIX
);
5267 retval
= phy_read(phydev
, HCLGE_PHY_CSC_REG
);
5268 mdix_ctrl
= hnae_get_field(retval
, HCLGE_PHY_MDIX_CTRL_M
,
5269 HCLGE_PHY_MDIX_CTRL_S
);
5271 retval
= phy_read(phydev
, HCLGE_PHY_CSS_REG
);
5272 mdix
= hnae_get_bit(retval
, HCLGE_PHY_MDIX_STATUS_B
);
5273 is_resolved
= hnae_get_bit(retval
, HCLGE_PHY_SPEED_DUP_RESOLVE_B
);
5275 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_COPPER
);
5277 switch (mdix_ctrl
) {
5279 *tp_mdix_ctrl
= ETH_TP_MDI
;
5282 *tp_mdix_ctrl
= ETH_TP_MDI_X
;
5285 *tp_mdix_ctrl
= ETH_TP_MDI_AUTO
;
5288 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
5293 *tp_mdix
= ETH_TP_MDI_INVALID
;
5295 *tp_mdix
= ETH_TP_MDI_X
;
5297 *tp_mdix
= ETH_TP_MDI
;
5300 static int hclge_init_client_instance(struct hnae3_client
*client
,
5301 struct hnae3_ae_dev
*ae_dev
)
5303 struct hclge_dev
*hdev
= ae_dev
->priv
;
5304 struct hclge_vport
*vport
;
5307 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
5308 vport
= &hdev
->vport
[i
];
5310 switch (client
->type
) {
5311 case HNAE3_CLIENT_KNIC
:
5313 hdev
->nic_client
= client
;
5314 vport
->nic
.client
= client
;
5315 ret
= client
->ops
->init_instance(&vport
->nic
);
5319 if (hdev
->roce_client
&&
5320 hnae3_dev_roce_supported(hdev
)) {
5321 struct hnae3_client
*rc
= hdev
->roce_client
;
5323 ret
= hclge_init_roce_base_info(vport
);
5327 ret
= rc
->ops
->init_instance(&vport
->roce
);
5333 case HNAE3_CLIENT_UNIC
:
5334 hdev
->nic_client
= client
;
5335 vport
->nic
.client
= client
;
5337 ret
= client
->ops
->init_instance(&vport
->nic
);
5342 case HNAE3_CLIENT_ROCE
:
5343 if (hnae3_dev_roce_supported(hdev
)) {
5344 hdev
->roce_client
= client
;
5345 vport
->roce
.client
= client
;
5348 if (hdev
->roce_client
&& hdev
->nic_client
) {
5349 ret
= hclge_init_roce_base_info(vport
);
5353 ret
= client
->ops
->init_instance(&vport
->roce
);
5363 static void hclge_uninit_client_instance(struct hnae3_client
*client
,
5364 struct hnae3_ae_dev
*ae_dev
)
5366 struct hclge_dev
*hdev
= ae_dev
->priv
;
5367 struct hclge_vport
*vport
;
5370 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
5371 vport
= &hdev
->vport
[i
];
5372 if (hdev
->roce_client
) {
5373 hdev
->roce_client
->ops
->uninit_instance(&vport
->roce
,
5375 hdev
->roce_client
= NULL
;
5376 vport
->roce
.client
= NULL
;
5378 if (client
->type
== HNAE3_CLIENT_ROCE
)
5380 if (client
->ops
->uninit_instance
) {
5381 client
->ops
->uninit_instance(&vport
->nic
, 0);
5382 hdev
->nic_client
= NULL
;
5383 vport
->nic
.client
= NULL
;
5388 static int hclge_pci_init(struct hclge_dev
*hdev
)
5390 struct pci_dev
*pdev
= hdev
->pdev
;
5391 struct hclge_hw
*hw
;
5394 ret
= pci_enable_device(pdev
);
5396 dev_err(&pdev
->dev
, "failed to enable PCI device\n");
5400 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
5402 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
5405 "can't set consistent PCI DMA");
5406 goto err_disable_device
;
5408 dev_warn(&pdev
->dev
, "set DMA mask to 32 bits\n");
5411 ret
= pci_request_regions(pdev
, HCLGE_DRIVER_NAME
);
5413 dev_err(&pdev
->dev
, "PCI request regions failed %d\n", ret
);
5414 goto err_disable_device
;
5417 pci_set_master(pdev
);
5420 hw
->io_base
= pcim_iomap(pdev
, 2, 0);
5422 dev_err(&pdev
->dev
, "Can't map configuration register space\n");
5424 goto err_clr_master
;
5427 hdev
->num_req_vfs
= pci_sriov_get_totalvfs(pdev
);
5431 pci_clear_master(pdev
);
5432 pci_release_regions(pdev
);
5434 pci_disable_device(pdev
);
5439 static void hclge_pci_uninit(struct hclge_dev
*hdev
)
5441 struct pci_dev
*pdev
= hdev
->pdev
;
5443 pcim_iounmap(pdev
, hdev
->hw
.io_base
);
5444 pci_free_irq_vectors(pdev
);
5445 pci_clear_master(pdev
);
5446 pci_release_mem_regions(pdev
);
5447 pci_disable_device(pdev
);
5450 static int hclge_init_ae_dev(struct hnae3_ae_dev
*ae_dev
)
5452 struct pci_dev
*pdev
= ae_dev
->pdev
;
5453 struct hclge_dev
*hdev
;
5456 hdev
= devm_kzalloc(&pdev
->dev
, sizeof(*hdev
), GFP_KERNEL
);
5463 hdev
->ae_dev
= ae_dev
;
5464 hdev
->reset_type
= HNAE3_NONE_RESET
;
5465 hdev
->reset_request
= 0;
5466 hdev
->reset_pending
= 0;
5467 ae_dev
->priv
= hdev
;
5469 ret
= hclge_pci_init(hdev
);
5471 dev_err(&pdev
->dev
, "PCI init failed\n");
5475 /* Firmware command queue initialize */
5476 ret
= hclge_cmd_queue_init(hdev
);
5478 dev_err(&pdev
->dev
, "Cmd queue init failed, ret = %d.\n", ret
);
5479 goto err_pci_uninit
;
5482 /* Firmware command initialize */
5483 ret
= hclge_cmd_init(hdev
);
5485 goto err_cmd_uninit
;
5487 ret
= hclge_get_cap(hdev
);
5489 dev_err(&pdev
->dev
, "get hw capability error, ret = %d.\n",
5491 goto err_cmd_uninit
;
5494 ret
= hclge_configure(hdev
);
5496 dev_err(&pdev
->dev
, "Configure dev error, ret = %d.\n", ret
);
5497 goto err_cmd_uninit
;
5500 ret
= hclge_init_msi(hdev
);
5502 dev_err(&pdev
->dev
, "Init MSI/MSI-X error, ret = %d.\n", ret
);
5503 goto err_cmd_uninit
;
5506 ret
= hclge_misc_irq_init(hdev
);
5509 "Misc IRQ(vector0) init error, ret = %d.\n",
5511 goto err_msi_uninit
;
5514 ret
= hclge_alloc_tqps(hdev
);
5516 dev_err(&pdev
->dev
, "Allocate TQPs error, ret = %d.\n", ret
);
5517 goto err_msi_irq_uninit
;
5520 ret
= hclge_alloc_vport(hdev
);
5522 dev_err(&pdev
->dev
, "Allocate vport error, ret = %d.\n", ret
);
5523 goto err_msi_irq_uninit
;
5526 ret
= hclge_map_tqp(hdev
);
5528 dev_err(&pdev
->dev
, "Map tqp error, ret = %d.\n", ret
);
5529 goto err_msi_irq_uninit
;
5532 if (hdev
->hw
.mac
.media_type
== HNAE3_MEDIA_TYPE_COPPER
) {
5533 ret
= hclge_mac_mdio_config(hdev
);
5535 dev_err(&hdev
->pdev
->dev
,
5536 "mdio config fail ret=%d\n", ret
);
5537 goto err_msi_irq_uninit
;
5541 ret
= hclge_mac_init(hdev
);
5543 dev_err(&pdev
->dev
, "Mac init error, ret = %d\n", ret
);
5544 goto err_mdiobus_unreg
;
5547 ret
= hclge_config_tso(hdev
, HCLGE_TSO_MSS_MIN
, HCLGE_TSO_MSS_MAX
);
5549 dev_err(&pdev
->dev
, "Enable tso fail, ret =%d\n", ret
);
5550 goto err_mdiobus_unreg
;
5553 ret
= hclge_init_vlan_config(hdev
);
5555 dev_err(&pdev
->dev
, "VLAN init fail, ret =%d\n", ret
);
5556 goto err_mdiobus_unreg
;
5559 ret
= hclge_tm_schd_init(hdev
);
5561 dev_err(&pdev
->dev
, "tm schd init fail, ret =%d\n", ret
);
5562 goto err_mdiobus_unreg
;
5565 hclge_rss_init_cfg(hdev
);
5566 ret
= hclge_rss_init_hw(hdev
);
5568 dev_err(&pdev
->dev
, "Rss init fail, ret =%d\n", ret
);
5569 goto err_mdiobus_unreg
;
5572 ret
= init_mgr_tbl(hdev
);
5574 dev_err(&pdev
->dev
, "manager table init fail, ret =%d\n", ret
);
5575 goto err_mdiobus_unreg
;
5578 hclge_dcb_ops_set(hdev
);
5580 timer_setup(&hdev
->service_timer
, hclge_service_timer
, 0);
5581 INIT_WORK(&hdev
->service_task
, hclge_service_task
);
5582 INIT_WORK(&hdev
->rst_service_task
, hclge_reset_service_task
);
5583 INIT_WORK(&hdev
->mbx_service_task
, hclge_mailbox_service_task
);
5585 /* Enable MISC vector(vector0) */
5586 hclge_enable_vector(&hdev
->misc_vector
, true);
5588 set_bit(HCLGE_STATE_SERVICE_INITED
, &hdev
->state
);
5589 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
5590 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
);
5591 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
5592 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
5593 clear_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
);
5595 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME
);
5599 if (hdev
->hw
.mac
.phydev
)
5600 mdiobus_unregister(hdev
->hw
.mac
.mdio_bus
);
5602 hclge_misc_irq_uninit(hdev
);
5604 pci_free_irq_vectors(pdev
);
5606 hclge_destroy_cmd_queue(&hdev
->hw
);
5608 pcim_iounmap(pdev
, hdev
->hw
.io_base
);
5609 pci_clear_master(pdev
);
5610 pci_release_regions(pdev
);
5611 pci_disable_device(pdev
);
5616 static void hclge_stats_clear(struct hclge_dev
*hdev
)
5618 memset(&hdev
->hw_stats
, 0, sizeof(hdev
->hw_stats
));
5621 static int hclge_reset_ae_dev(struct hnae3_ae_dev
*ae_dev
)
5623 struct hclge_dev
*hdev
= ae_dev
->priv
;
5624 struct pci_dev
*pdev
= ae_dev
->pdev
;
5627 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
5629 hclge_stats_clear(hdev
);
5630 memset(hdev
->vlan_table
, 0, sizeof(hdev
->vlan_table
));
5632 ret
= hclge_cmd_init(hdev
);
5634 dev_err(&pdev
->dev
, "Cmd queue init failed\n");
5638 ret
= hclge_get_cap(hdev
);
5640 dev_err(&pdev
->dev
, "get hw capability error, ret = %d.\n",
5645 ret
= hclge_configure(hdev
);
5647 dev_err(&pdev
->dev
, "Configure dev error, ret = %d.\n", ret
);
5651 ret
= hclge_map_tqp(hdev
);
5653 dev_err(&pdev
->dev
, "Map tqp error, ret = %d.\n", ret
);
5657 ret
= hclge_mac_init(hdev
);
5659 dev_err(&pdev
->dev
, "Mac init error, ret = %d\n", ret
);
5663 ret
= hclge_config_tso(hdev
, HCLGE_TSO_MSS_MIN
, HCLGE_TSO_MSS_MAX
);
5665 dev_err(&pdev
->dev
, "Enable tso fail, ret =%d\n", ret
);
5669 ret
= hclge_init_vlan_config(hdev
);
5671 dev_err(&pdev
->dev
, "VLAN init fail, ret =%d\n", ret
);
5675 ret
= hclge_tm_init_hw(hdev
);
5677 dev_err(&pdev
->dev
, "tm init hw fail, ret =%d\n", ret
);
5681 ret
= hclge_rss_init_hw(hdev
);
5683 dev_err(&pdev
->dev
, "Rss init fail, ret =%d\n", ret
);
5687 /* Enable MISC vector(vector0) */
5688 hclge_enable_vector(&hdev
->misc_vector
, true);
5690 dev_info(&pdev
->dev
, "Reset done, %s driver initialization finished.\n",
5696 static void hclge_uninit_ae_dev(struct hnae3_ae_dev
*ae_dev
)
5698 struct hclge_dev
*hdev
= ae_dev
->priv
;
5699 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
5701 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
5703 if (hdev
->service_timer
.function
)
5704 del_timer_sync(&hdev
->service_timer
);
5705 if (hdev
->service_task
.func
)
5706 cancel_work_sync(&hdev
->service_task
);
5707 if (hdev
->rst_service_task
.func
)
5708 cancel_work_sync(&hdev
->rst_service_task
);
5709 if (hdev
->mbx_service_task
.func
)
5710 cancel_work_sync(&hdev
->mbx_service_task
);
5713 mdiobus_unregister(mac
->mdio_bus
);
5715 /* Disable MISC vector(vector0) */
5716 hclge_enable_vector(&hdev
->misc_vector
, false);
5717 hclge_destroy_cmd_queue(&hdev
->hw
);
5718 hclge_misc_irq_uninit(hdev
);
5719 hclge_pci_uninit(hdev
);
5720 ae_dev
->priv
= NULL
;
5723 static u32
hclge_get_max_channels(struct hnae3_handle
*handle
)
5725 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
5726 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5727 struct hclge_dev
*hdev
= vport
->back
;
5729 return min_t(u32
, hdev
->rss_size_max
* kinfo
->num_tc
, hdev
->num_tqps
);
5732 static void hclge_get_channels(struct hnae3_handle
*handle
,
5733 struct ethtool_channels
*ch
)
5735 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5737 ch
->max_combined
= hclge_get_max_channels(handle
);
5738 ch
->other_count
= 1;
5740 ch
->combined_count
= vport
->alloc_tqps
;
5743 static void hclge_get_tqps_and_rss_info(struct hnae3_handle
*handle
,
5744 u16
*free_tqps
, u16
*max_rss_size
)
5746 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5747 struct hclge_dev
*hdev
= vport
->back
;
5751 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
5752 if (!hdev
->htqp
[i
].alloced
)
5755 *free_tqps
= temp_tqps
;
5756 *max_rss_size
= hdev
->rss_size_max
;
5759 static void hclge_release_tqp(struct hclge_vport
*vport
)
5761 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
5762 struct hclge_dev
*hdev
= vport
->back
;
5765 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
5766 struct hclge_tqp
*tqp
=
5767 container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
5769 tqp
->q
.handle
= NULL
;
5770 tqp
->q
.tqp_index
= 0;
5771 tqp
->alloced
= false;
5774 devm_kfree(&hdev
->pdev
->dev
, kinfo
->tqp
);
5778 static int hclge_set_channels(struct hnae3_handle
*handle
, u32 new_tqps_num
)
5780 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5781 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
5782 struct hclge_dev
*hdev
= vport
->back
;
5783 int cur_rss_size
= kinfo
->rss_size
;
5784 int cur_tqps
= kinfo
->num_tqps
;
5785 u16 tc_offset
[HCLGE_MAX_TC_NUM
];
5786 u16 tc_valid
[HCLGE_MAX_TC_NUM
];
5787 u16 tc_size
[HCLGE_MAX_TC_NUM
];
5792 hclge_release_tqp(vport
);
5794 ret
= hclge_knic_setup(vport
, new_tqps_num
);
5796 dev_err(&hdev
->pdev
->dev
, "setup nic fail, ret =%d\n", ret
);
5800 ret
= hclge_map_tqp_to_vport(hdev
, vport
);
5802 dev_err(&hdev
->pdev
->dev
, "map vport tqp fail, ret =%d\n", ret
);
5806 ret
= hclge_tm_schd_init(hdev
);
5808 dev_err(&hdev
->pdev
->dev
, "tm schd init fail, ret =%d\n", ret
);
5812 roundup_size
= roundup_pow_of_two(kinfo
->rss_size
);
5813 roundup_size
= ilog2(roundup_size
);
5814 /* Set the RSS TC mode according to the new RSS size */
5815 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
5818 if (!(hdev
->hw_tc_map
& BIT(i
)))
5822 tc_size
[i
] = roundup_size
;
5823 tc_offset
[i
] = kinfo
->rss_size
* i
;
5825 ret
= hclge_set_rss_tc_mode(hdev
, tc_valid
, tc_size
, tc_offset
);
5829 /* Reinitializes the rss indirect table according to the new RSS size */
5830 rss_indir
= kcalloc(HCLGE_RSS_IND_TBL_SIZE
, sizeof(u32
), GFP_KERNEL
);
5834 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
5835 rss_indir
[i
] = i
% kinfo
->rss_size
;
5837 ret
= hclge_set_rss(handle
, rss_indir
, NULL
, 0);
5839 dev_err(&hdev
->pdev
->dev
, "set rss indir table fail, ret=%d\n",
5845 dev_info(&hdev
->pdev
->dev
,
5846 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
5847 cur_rss_size
, kinfo
->rss_size
,
5848 cur_tqps
, kinfo
->rss_size
* kinfo
->num_tc
);
5853 static int hclge_get_regs_num(struct hclge_dev
*hdev
, u32
*regs_num_32_bit
,
5854 u32
*regs_num_64_bit
)
5856 struct hclge_desc desc
;
5860 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_REG_NUM
, true);
5861 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5863 dev_err(&hdev
->pdev
->dev
,
5864 "Query register number cmd failed, ret = %d.\n", ret
);
5868 *regs_num_32_bit
= le32_to_cpu(desc
.data
[0]);
5869 *regs_num_64_bit
= le32_to_cpu(desc
.data
[1]);
5871 total_num
= *regs_num_32_bit
+ *regs_num_64_bit
;
5878 static int hclge_get_32_bit_regs(struct hclge_dev
*hdev
, u32 regs_num
,
5881 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
5883 struct hclge_desc
*desc
;
5884 u32
*reg_val
= data
;
5893 cmd_num
= DIV_ROUND_UP(regs_num
+ 2, HCLGE_32_BIT_REG_RTN_DATANUM
);
5894 desc
= kcalloc(cmd_num
, sizeof(struct hclge_desc
), GFP_KERNEL
);
5898 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_QUERY_32_BIT_REG
, true);
5899 ret
= hclge_cmd_send(&hdev
->hw
, desc
, cmd_num
);
5901 dev_err(&hdev
->pdev
->dev
,
5902 "Query 32 bit register cmd failed, ret = %d.\n", ret
);
5907 for (i
= 0; i
< cmd_num
; i
++) {
5909 desc_data
= (__le32
*)(&desc
[i
].data
[0]);
5910 n
= HCLGE_32_BIT_REG_RTN_DATANUM
- 2;
5912 desc_data
= (__le32
*)(&desc
[i
]);
5913 n
= HCLGE_32_BIT_REG_RTN_DATANUM
;
5915 for (k
= 0; k
< n
; k
++) {
5916 *reg_val
++ = le32_to_cpu(*desc_data
++);
5928 static int hclge_get_64_bit_regs(struct hclge_dev
*hdev
, u32 regs_num
,
5931 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
5933 struct hclge_desc
*desc
;
5934 u64
*reg_val
= data
;
5943 cmd_num
= DIV_ROUND_UP(regs_num
+ 1, HCLGE_64_BIT_REG_RTN_DATANUM
);
5944 desc
= kcalloc(cmd_num
, sizeof(struct hclge_desc
), GFP_KERNEL
);
5948 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_QUERY_64_BIT_REG
, true);
5949 ret
= hclge_cmd_send(&hdev
->hw
, desc
, cmd_num
);
5951 dev_err(&hdev
->pdev
->dev
,
5952 "Query 64 bit register cmd failed, ret = %d.\n", ret
);
5957 for (i
= 0; i
< cmd_num
; i
++) {
5959 desc_data
= (__le64
*)(&desc
[i
].data
[0]);
5960 n
= HCLGE_64_BIT_REG_RTN_DATANUM
- 1;
5962 desc_data
= (__le64
*)(&desc
[i
]);
5963 n
= HCLGE_64_BIT_REG_RTN_DATANUM
;
5965 for (k
= 0; k
< n
; k
++) {
5966 *reg_val
++ = le64_to_cpu(*desc_data
++);
5978 static int hclge_get_regs_len(struct hnae3_handle
*handle
)
5980 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5981 struct hclge_dev
*hdev
= vport
->back
;
5982 u32 regs_num_32_bit
, regs_num_64_bit
;
5985 ret
= hclge_get_regs_num(hdev
, ®s_num_32_bit
, ®s_num_64_bit
);
5987 dev_err(&hdev
->pdev
->dev
,
5988 "Get register number failed, ret = %d.\n", ret
);
5992 return regs_num_32_bit
* sizeof(u32
) + regs_num_64_bit
* sizeof(u64
);
5995 static void hclge_get_regs(struct hnae3_handle
*handle
, u32
*version
,
5998 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5999 struct hclge_dev
*hdev
= vport
->back
;
6000 u32 regs_num_32_bit
, regs_num_64_bit
;
6003 *version
= hdev
->fw_version
;
6005 ret
= hclge_get_regs_num(hdev
, ®s_num_32_bit
, ®s_num_64_bit
);
6007 dev_err(&hdev
->pdev
->dev
,
6008 "Get register number failed, ret = %d.\n", ret
);
6012 ret
= hclge_get_32_bit_regs(hdev
, regs_num_32_bit
, data
);
6014 dev_err(&hdev
->pdev
->dev
,
6015 "Get 32 bit register failed, ret = %d.\n", ret
);
6019 data
= (u32
*)data
+ regs_num_32_bit
;
6020 ret
= hclge_get_64_bit_regs(hdev
, regs_num_64_bit
,
6023 dev_err(&hdev
->pdev
->dev
,
6024 "Get 64 bit register failed, ret = %d.\n", ret
);
6027 static int hclge_set_led_status_sfp(struct hclge_dev
*hdev
, u8 speed_led_status
,
6028 u8 act_led_status
, u8 link_led_status
,
6029 u8 locate_led_status
)
6031 struct hclge_set_led_state_cmd
*req
;
6032 struct hclge_desc desc
;
6035 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_LED_STATUS_CFG
, false);
6037 req
= (struct hclge_set_led_state_cmd
*)desc
.data
;
6038 hnae_set_field(req
->port_speed_led_config
, HCLGE_LED_PORT_SPEED_STATE_M
,
6039 HCLGE_LED_PORT_SPEED_STATE_S
, speed_led_status
);
6040 hnae_set_field(req
->link_led_config
, HCLGE_LED_ACTIVITY_STATE_M
,
6041 HCLGE_LED_ACTIVITY_STATE_S
, act_led_status
);
6042 hnae_set_field(req
->activity_led_config
, HCLGE_LED_LINK_STATE_M
,
6043 HCLGE_LED_LINK_STATE_S
, link_led_status
);
6044 hnae_set_field(req
->locate_led_config
, HCLGE_LED_LOCATE_STATE_M
,
6045 HCLGE_LED_LOCATE_STATE_S
, locate_led_status
);
6047 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
6049 dev_err(&hdev
->pdev
->dev
,
6050 "Send set led state cmd error, ret =%d\n", ret
);
6055 enum hclge_led_status
{
6058 HCLGE_LED_NO_CHANGE
= 0xFF,
6061 static int hclge_set_led_id(struct hnae3_handle
*handle
,
6062 enum ethtool_phys_id_state status
)
6064 #define BLINK_FREQUENCY 2
6065 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6066 struct hclge_dev
*hdev
= vport
->back
;
6067 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
6070 if (phydev
|| hdev
->hw
.mac
.media_type
!= HNAE3_MEDIA_TYPE_FIBER
)
6074 case ETHTOOL_ID_ACTIVE
:
6075 ret
= hclge_set_led_status_sfp(hdev
,
6076 HCLGE_LED_NO_CHANGE
,
6077 HCLGE_LED_NO_CHANGE
,
6078 HCLGE_LED_NO_CHANGE
,
6081 case ETHTOOL_ID_INACTIVE
:
6082 ret
= hclge_set_led_status_sfp(hdev
,
6083 HCLGE_LED_NO_CHANGE
,
6084 HCLGE_LED_NO_CHANGE
,
6085 HCLGE_LED_NO_CHANGE
,
6096 enum hclge_led_port_speed
{
6097 HCLGE_SPEED_LED_FOR_1G
,
6098 HCLGE_SPEED_LED_FOR_10G
,
6099 HCLGE_SPEED_LED_FOR_25G
,
6100 HCLGE_SPEED_LED_FOR_40G
,
6101 HCLGE_SPEED_LED_FOR_50G
,
6102 HCLGE_SPEED_LED_FOR_100G
,
6105 static u8
hclge_led_get_speed_status(u32 speed
)
6110 case HCLGE_MAC_SPEED_1G
:
6111 speed_led
= HCLGE_SPEED_LED_FOR_1G
;
6113 case HCLGE_MAC_SPEED_10G
:
6114 speed_led
= HCLGE_SPEED_LED_FOR_10G
;
6116 case HCLGE_MAC_SPEED_25G
:
6117 speed_led
= HCLGE_SPEED_LED_FOR_25G
;
6119 case HCLGE_MAC_SPEED_40G
:
6120 speed_led
= HCLGE_SPEED_LED_FOR_40G
;
6122 case HCLGE_MAC_SPEED_50G
:
6123 speed_led
= HCLGE_SPEED_LED_FOR_50G
;
6125 case HCLGE_MAC_SPEED_100G
:
6126 speed_led
= HCLGE_SPEED_LED_FOR_100G
;
6129 speed_led
= HCLGE_LED_NO_CHANGE
;
6135 static int hclge_update_led_status(struct hclge_dev
*hdev
)
6137 u8 port_speed_status
, link_status
, activity_status
;
6138 u64 rx_pkts
, tx_pkts
;
6140 if (hdev
->hw
.mac
.media_type
!= HNAE3_MEDIA_TYPE_FIBER
)
6143 port_speed_status
= hclge_led_get_speed_status(hdev
->hw
.mac
.speed
);
6145 rx_pkts
= hdev
->hw_stats
.mac_stats
.mac_rx_total_pkt_num
;
6146 tx_pkts
= hdev
->hw_stats
.mac_stats
.mac_tx_total_pkt_num
;
6147 if (rx_pkts
!= hdev
->rx_pkts_for_led
||
6148 tx_pkts
!= hdev
->tx_pkts_for_led
)
6149 activity_status
= HCLGE_LED_ON
;
6151 activity_status
= HCLGE_LED_OFF
;
6152 hdev
->rx_pkts_for_led
= rx_pkts
;
6153 hdev
->tx_pkts_for_led
= tx_pkts
;
6155 if (hdev
->hw
.mac
.link
)
6156 link_status
= HCLGE_LED_ON
;
6158 link_status
= HCLGE_LED_OFF
;
6160 return hclge_set_led_status_sfp(hdev
, port_speed_status
,
6161 activity_status
, link_status
,
6162 HCLGE_LED_NO_CHANGE
);
6165 static void hclge_get_link_mode(struct hnae3_handle
*handle
,
6166 unsigned long *supported
,
6167 unsigned long *advertising
)
6169 unsigned int size
= BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS
);
6170 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6171 struct hclge_dev
*hdev
= vport
->back
;
6172 unsigned int idx
= 0;
6174 for (; idx
< size
; idx
++) {
6175 supported
[idx
] = hdev
->hw
.mac
.supported
[idx
];
6176 advertising
[idx
] = hdev
->hw
.mac
.advertising
[idx
];
6180 static void hclge_get_port_type(struct hnae3_handle
*handle
,
6183 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6184 struct hclge_dev
*hdev
= vport
->back
;
6185 u8 media_type
= hdev
->hw
.mac
.media_type
;
6187 switch (media_type
) {
6188 case HNAE3_MEDIA_TYPE_FIBER
:
6189 *port_type
= PORT_FIBRE
;
6191 case HNAE3_MEDIA_TYPE_COPPER
:
6192 *port_type
= PORT_TP
;
6194 case HNAE3_MEDIA_TYPE_UNKNOWN
:
6196 *port_type
= PORT_OTHER
;
6201 static const struct hnae3_ae_ops hclge_ops
= {
6202 .init_ae_dev
= hclge_init_ae_dev
,
6203 .uninit_ae_dev
= hclge_uninit_ae_dev
,
6204 .init_client_instance
= hclge_init_client_instance
,
6205 .uninit_client_instance
= hclge_uninit_client_instance
,
6206 .map_ring_to_vector
= hclge_map_ring_to_vector
,
6207 .unmap_ring_from_vector
= hclge_unmap_ring_frm_vector
,
6208 .get_vector
= hclge_get_vector
,
6209 .put_vector
= hclge_put_vector
,
6210 .set_promisc_mode
= hclge_set_promisc_mode
,
6211 .set_loopback
= hclge_set_loopback
,
6212 .start
= hclge_ae_start
,
6213 .stop
= hclge_ae_stop
,
6214 .get_status
= hclge_get_status
,
6215 .get_ksettings_an_result
= hclge_get_ksettings_an_result
,
6216 .update_speed_duplex_h
= hclge_update_speed_duplex_h
,
6217 .cfg_mac_speed_dup_h
= hclge_cfg_mac_speed_dup_h
,
6218 .get_media_type
= hclge_get_media_type
,
6219 .get_rss_key_size
= hclge_get_rss_key_size
,
6220 .get_rss_indir_size
= hclge_get_rss_indir_size
,
6221 .get_rss
= hclge_get_rss
,
6222 .set_rss
= hclge_set_rss
,
6223 .set_rss_tuple
= hclge_set_rss_tuple
,
6224 .get_rss_tuple
= hclge_get_rss_tuple
,
6225 .get_tc_size
= hclge_get_tc_size
,
6226 .get_mac_addr
= hclge_get_mac_addr
,
6227 .set_mac_addr
= hclge_set_mac_addr
,
6228 .add_uc_addr
= hclge_add_uc_addr
,
6229 .rm_uc_addr
= hclge_rm_uc_addr
,
6230 .add_mc_addr
= hclge_add_mc_addr
,
6231 .rm_mc_addr
= hclge_rm_mc_addr
,
6232 .set_autoneg
= hclge_set_autoneg
,
6233 .get_autoneg
= hclge_get_autoneg
,
6234 .get_pauseparam
= hclge_get_pauseparam
,
6235 .set_pauseparam
= hclge_set_pauseparam
,
6236 .set_mtu
= hclge_set_mtu
,
6237 .reset_queue
= hclge_reset_tqp
,
6238 .get_stats
= hclge_get_stats
,
6239 .update_stats
= hclge_update_stats
,
6240 .get_strings
= hclge_get_strings
,
6241 .get_sset_count
= hclge_get_sset_count
,
6242 .get_fw_version
= hclge_get_fw_version
,
6243 .get_mdix_mode
= hclge_get_mdix_mode
,
6244 .enable_vlan_filter
= hclge_enable_vlan_filter
,
6245 .set_vlan_filter
= hclge_set_vlan_filter
,
6246 .set_vf_vlan_filter
= hclge_set_vf_vlan_filter
,
6247 .enable_hw_strip_rxvtag
= hclge_en_hw_strip_rxvtag
,
6248 .reset_event
= hclge_reset_event
,
6249 .get_tqps_and_rss_info
= hclge_get_tqps_and_rss_info
,
6250 .set_channels
= hclge_set_channels
,
6251 .get_channels
= hclge_get_channels
,
6252 .get_flowctrl_adv
= hclge_get_flowctrl_adv
,
6253 .get_regs_len
= hclge_get_regs_len
,
6254 .get_regs
= hclge_get_regs
,
6255 .set_led_id
= hclge_set_led_id
,
6256 .get_link_mode
= hclge_get_link_mode
,
6257 .get_port_type
= hclge_get_port_type
,
6260 static struct hnae3_ae_algo ae_algo
= {
6263 .pdev_id_table
= ae_algo_pci_tbl
,
6266 static int hclge_init(void)
6268 pr_info("%s is initializing\n", HCLGE_NAME
);
6270 hnae3_register_ae_algo(&ae_algo
);
6275 static void hclge_exit(void)
6277 hnae3_unregister_ae_algo(&ae_algo
);
6279 module_init(hclge_init
);
6280 module_exit(hclge_exit
);
6282 MODULE_LICENSE("GPL");
6283 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
6284 MODULE_DESCRIPTION("HCLGE Driver");
6285 MODULE_VERSION(HCLGE_MOD_VERSION
);