1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <net/rtnetlink.h>
16 #include "hclge_cmd.h"
17 #include "hclge_dcb.h"
18 #include "hclge_main.h"
19 #include "hclge_mbx.h"
20 #include "hclge_mdio.h"
24 #define HCLGE_NAME "hclge"
25 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
26 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
27 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
28 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
30 static int hclge_set_mta_filter_mode(struct hclge_dev
*hdev
,
31 enum hclge_mta_dmac_sel_type mta_mac_sel
,
33 static int hclge_set_mtu(struct hnae3_handle
*handle
, int new_mtu
);
34 static int hclge_init_vlan_config(struct hclge_dev
*hdev
);
35 static int hclge_reset_ae_dev(struct hnae3_ae_dev
*ae_dev
);
37 static struct hnae3_ae_algo ae_algo
;
39 static const struct pci_device_id ae_algo_pci_tbl
[] = {
40 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
41 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
42 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
), 0},
43 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
), 0},
44 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
), 0},
45 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
), 0},
46 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
), 0},
47 /* required last entry */
51 MODULE_DEVICE_TABLE(pci
, ae_algo_pci_tbl
);
53 static const char hns3_nic_test_strs
[][ETH_GSTRING_LEN
] = {
55 "Serdes Loopback test",
59 static const struct hclge_comm_stats_str g_all_64bit_stats_string
[] = {
60 {"igu_rx_oversize_pkt",
61 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt
)},
62 {"igu_rx_undersize_pkt",
63 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt
)},
64 {"igu_rx_out_all_pkt",
65 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt
)},
67 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt
)},
69 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt
)},
71 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt
)},
72 {"egu_tx_out_all_pkt",
73 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt
)},
75 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt
)},
77 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt
)},
79 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt
)},
80 {"ssu_ppp_mac_key_num",
81 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num
)},
82 {"ssu_ppp_host_key_num",
83 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num
)},
84 {"ppp_ssu_mac_rlt_num",
85 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num
)},
86 {"ppp_ssu_host_rlt_num",
87 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num
)},
89 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num
)},
91 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num
)},
93 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num
)},
95 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num
)}
98 static const struct hclge_comm_stats_str g_all_32bit_stats_string
[] = {
100 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt
)},
101 {"igu_rx_no_eof_pkt",
102 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt
)},
103 {"igu_rx_no_sof_pkt",
104 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt
)},
106 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt
)},
107 {"ssu_full_drop_num",
108 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num
)},
109 {"ssu_part_drop_num",
110 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num
)},
112 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num
)},
114 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num
)},
116 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num
)},
118 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt
)},
120 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt
)},
122 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt
)},
123 {"qcn_fb_invaild_cnt",
124 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt
)},
125 {"rx_packet_tc0_in_cnt",
126 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt
)},
127 {"rx_packet_tc1_in_cnt",
128 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt
)},
129 {"rx_packet_tc2_in_cnt",
130 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt
)},
131 {"rx_packet_tc3_in_cnt",
132 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt
)},
133 {"rx_packet_tc4_in_cnt",
134 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt
)},
135 {"rx_packet_tc5_in_cnt",
136 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt
)},
137 {"rx_packet_tc6_in_cnt",
138 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt
)},
139 {"rx_packet_tc7_in_cnt",
140 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt
)},
141 {"rx_packet_tc0_out_cnt",
142 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt
)},
143 {"rx_packet_tc1_out_cnt",
144 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt
)},
145 {"rx_packet_tc2_out_cnt",
146 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt
)},
147 {"rx_packet_tc3_out_cnt",
148 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt
)},
149 {"rx_packet_tc4_out_cnt",
150 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt
)},
151 {"rx_packet_tc5_out_cnt",
152 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt
)},
153 {"rx_packet_tc6_out_cnt",
154 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt
)},
155 {"rx_packet_tc7_out_cnt",
156 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt
)},
157 {"tx_packet_tc0_in_cnt",
158 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt
)},
159 {"tx_packet_tc1_in_cnt",
160 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt
)},
161 {"tx_packet_tc2_in_cnt",
162 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt
)},
163 {"tx_packet_tc3_in_cnt",
164 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt
)},
165 {"tx_packet_tc4_in_cnt",
166 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt
)},
167 {"tx_packet_tc5_in_cnt",
168 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt
)},
169 {"tx_packet_tc6_in_cnt",
170 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt
)},
171 {"tx_packet_tc7_in_cnt",
172 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt
)},
173 {"tx_packet_tc0_out_cnt",
174 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt
)},
175 {"tx_packet_tc1_out_cnt",
176 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt
)},
177 {"tx_packet_tc2_out_cnt",
178 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt
)},
179 {"tx_packet_tc3_out_cnt",
180 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt
)},
181 {"tx_packet_tc4_out_cnt",
182 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt
)},
183 {"tx_packet_tc5_out_cnt",
184 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt
)},
185 {"tx_packet_tc6_out_cnt",
186 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt
)},
187 {"tx_packet_tc7_out_cnt",
188 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt
)},
189 {"pkt_curr_buf_tc0_cnt",
190 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt
)},
191 {"pkt_curr_buf_tc1_cnt",
192 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt
)},
193 {"pkt_curr_buf_tc2_cnt",
194 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt
)},
195 {"pkt_curr_buf_tc3_cnt",
196 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt
)},
197 {"pkt_curr_buf_tc4_cnt",
198 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt
)},
199 {"pkt_curr_buf_tc5_cnt",
200 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt
)},
201 {"pkt_curr_buf_tc6_cnt",
202 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt
)},
203 {"pkt_curr_buf_tc7_cnt",
204 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt
)},
206 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num
)},
207 {"lo_pri_unicast_rlt_drop_num",
208 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num
)},
209 {"hi_pri_multicast_rlt_drop_num",
210 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num
)},
211 {"lo_pri_multicast_rlt_drop_num",
212 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num
)},
213 {"rx_oq_drop_pkt_cnt",
214 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt
)},
215 {"tx_oq_drop_pkt_cnt",
216 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt
)},
217 {"nic_l2_err_drop_pkt_cnt",
218 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt
)},
219 {"roc_l2_err_drop_pkt_cnt",
220 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt
)}
223 static const struct hclge_comm_stats_str g_mac_stats_string
[] = {
224 {"mac_tx_mac_pause_num",
225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num
)},
226 {"mac_rx_mac_pause_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num
)},
228 {"mac_tx_pfc_pri0_pkt_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num
)},
230 {"mac_tx_pfc_pri1_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num
)},
232 {"mac_tx_pfc_pri2_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num
)},
234 {"mac_tx_pfc_pri3_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num
)},
236 {"mac_tx_pfc_pri4_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num
)},
238 {"mac_tx_pfc_pri5_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num
)},
240 {"mac_tx_pfc_pri6_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num
)},
242 {"mac_tx_pfc_pri7_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num
)},
244 {"mac_rx_pfc_pri0_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num
)},
246 {"mac_rx_pfc_pri1_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num
)},
248 {"mac_rx_pfc_pri2_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num
)},
250 {"mac_rx_pfc_pri3_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num
)},
252 {"mac_rx_pfc_pri4_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num
)},
254 {"mac_rx_pfc_pri5_pkt_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num
)},
256 {"mac_rx_pfc_pri6_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num
)},
258 {"mac_rx_pfc_pri7_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num
)},
260 {"mac_tx_total_pkt_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num
)},
262 {"mac_tx_total_oct_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num
)},
264 {"mac_tx_good_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num
)},
266 {"mac_tx_bad_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num
)},
268 {"mac_tx_good_oct_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num
)},
270 {"mac_tx_bad_oct_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num
)},
272 {"mac_tx_uni_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num
)},
274 {"mac_tx_multi_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num
)},
276 {"mac_tx_broad_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num
)},
278 {"mac_tx_undersize_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num
)},
280 {"mac_tx_oversize_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num
)},
282 {"mac_tx_64_oct_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num
)},
284 {"mac_tx_65_127_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num
)},
286 {"mac_tx_128_255_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num
)},
288 {"mac_tx_256_511_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num
)},
290 {"mac_tx_512_1023_oct_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num
)},
292 {"mac_tx_1024_1518_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num
)},
294 {"mac_tx_1519_2047_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num
)},
296 {"mac_tx_2048_4095_oct_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num
)},
298 {"mac_tx_4096_8191_oct_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num
)},
300 {"mac_tx_8192_9216_oct_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num
)},
302 {"mac_tx_9217_12287_oct_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num
)},
304 {"mac_tx_12288_16383_oct_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num
)},
306 {"mac_tx_1519_max_good_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num
)},
308 {"mac_tx_1519_max_bad_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num
)},
310 {"mac_rx_total_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num
)},
312 {"mac_rx_total_oct_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num
)},
314 {"mac_rx_good_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num
)},
316 {"mac_rx_bad_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num
)},
318 {"mac_rx_good_oct_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num
)},
320 {"mac_rx_bad_oct_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num
)},
322 {"mac_rx_uni_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num
)},
324 {"mac_rx_multi_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num
)},
326 {"mac_rx_broad_pkt_num",
327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num
)},
328 {"mac_rx_undersize_pkt_num",
329 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num
)},
330 {"mac_rx_oversize_pkt_num",
331 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num
)},
332 {"mac_rx_64_oct_pkt_num",
333 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num
)},
334 {"mac_rx_65_127_oct_pkt_num",
335 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num
)},
336 {"mac_rx_128_255_oct_pkt_num",
337 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num
)},
338 {"mac_rx_256_511_oct_pkt_num",
339 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num
)},
340 {"mac_rx_512_1023_oct_pkt_num",
341 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num
)},
342 {"mac_rx_1024_1518_oct_pkt_num",
343 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num
)},
344 {"mac_rx_1519_2047_oct_pkt_num",
345 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num
)},
346 {"mac_rx_2048_4095_oct_pkt_num",
347 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num
)},
348 {"mac_rx_4096_8191_oct_pkt_num",
349 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num
)},
350 {"mac_rx_8192_9216_oct_pkt_num",
351 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num
)},
352 {"mac_rx_9217_12287_oct_pkt_num",
353 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num
)},
354 {"mac_rx_12288_16383_oct_pkt_num",
355 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num
)},
356 {"mac_rx_1519_max_good_pkt_num",
357 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num
)},
358 {"mac_rx_1519_max_bad_pkt_num",
359 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num
)},
361 {"mac_tx_fragment_pkt_num",
362 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num
)},
363 {"mac_tx_undermin_pkt_num",
364 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num
)},
365 {"mac_tx_jabber_pkt_num",
366 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num
)},
367 {"mac_tx_err_all_pkt_num",
368 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num
)},
369 {"mac_tx_from_app_good_pkt_num",
370 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num
)},
371 {"mac_tx_from_app_bad_pkt_num",
372 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num
)},
373 {"mac_rx_fragment_pkt_num",
374 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num
)},
375 {"mac_rx_undermin_pkt_num",
376 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num
)},
377 {"mac_rx_jabber_pkt_num",
378 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num
)},
379 {"mac_rx_fcs_err_pkt_num",
380 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num
)},
381 {"mac_rx_send_app_good_pkt_num",
382 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num
)},
383 {"mac_rx_send_app_bad_pkt_num",
384 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num
)}
387 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table
[] = {
389 .flags
= HCLGE_MAC_MGR_MASK_VLAN_B
,
390 .ethter_type
= cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP
),
391 .mac_addr_hi32
= cpu_to_le32(htonl(0x0180C200)),
392 .mac_addr_lo16
= cpu_to_le16(htons(0x000E)),
393 .i_port_bitmap
= 0x1,
397 static int hclge_64_bit_update_stats(struct hclge_dev
*hdev
)
399 #define HCLGE_64_BIT_CMD_NUM 5
400 #define HCLGE_64_BIT_RTN_DATANUM 4
401 u64
*data
= (u64
*)(&hdev
->hw_stats
.all_64_bit_stats
);
402 struct hclge_desc desc
[HCLGE_64_BIT_CMD_NUM
];
407 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_64_BIT
, true);
408 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_64_BIT_CMD_NUM
);
410 dev_err(&hdev
->pdev
->dev
,
411 "Get 64 bit pkt stats fail, status = %d.\n", ret
);
415 for (i
= 0; i
< HCLGE_64_BIT_CMD_NUM
; i
++) {
416 if (unlikely(i
== 0)) {
417 desc_data
= (__le64
*)(&desc
[i
].data
[0]);
418 n
= HCLGE_64_BIT_RTN_DATANUM
- 1;
420 desc_data
= (__le64
*)(&desc
[i
]);
421 n
= HCLGE_64_BIT_RTN_DATANUM
;
423 for (k
= 0; k
< n
; k
++) {
424 *data
++ += le64_to_cpu(*desc_data
);
432 static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats
*stats
)
434 stats
->pkt_curr_buf_cnt
= 0;
435 stats
->pkt_curr_buf_tc0_cnt
= 0;
436 stats
->pkt_curr_buf_tc1_cnt
= 0;
437 stats
->pkt_curr_buf_tc2_cnt
= 0;
438 stats
->pkt_curr_buf_tc3_cnt
= 0;
439 stats
->pkt_curr_buf_tc4_cnt
= 0;
440 stats
->pkt_curr_buf_tc5_cnt
= 0;
441 stats
->pkt_curr_buf_tc6_cnt
= 0;
442 stats
->pkt_curr_buf_tc7_cnt
= 0;
445 static int hclge_32_bit_update_stats(struct hclge_dev
*hdev
)
447 #define HCLGE_32_BIT_CMD_NUM 8
448 #define HCLGE_32_BIT_RTN_DATANUM 8
450 struct hclge_desc desc
[HCLGE_32_BIT_CMD_NUM
];
451 struct hclge_32_bit_stats
*all_32_bit_stats
;
457 all_32_bit_stats
= &hdev
->hw_stats
.all_32_bit_stats
;
458 data
= (u64
*)(&all_32_bit_stats
->egu_tx_1588_pkt
);
460 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_32_BIT
, true);
461 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_32_BIT_CMD_NUM
);
463 dev_err(&hdev
->pdev
->dev
,
464 "Get 32 bit pkt stats fail, status = %d.\n", ret
);
469 hclge_reset_partial_32bit_counter(all_32_bit_stats
);
470 for (i
= 0; i
< HCLGE_32_BIT_CMD_NUM
; i
++) {
471 if (unlikely(i
== 0)) {
472 __le16
*desc_data_16bit
;
474 all_32_bit_stats
->igu_rx_err_pkt
+=
475 le32_to_cpu(desc
[i
].data
[0]);
477 desc_data_16bit
= (__le16
*)&desc
[i
].data
[1];
478 all_32_bit_stats
->igu_rx_no_eof_pkt
+=
479 le16_to_cpu(*desc_data_16bit
);
482 all_32_bit_stats
->igu_rx_no_sof_pkt
+=
483 le16_to_cpu(*desc_data_16bit
);
485 desc_data
= &desc
[i
].data
[2];
486 n
= HCLGE_32_BIT_RTN_DATANUM
- 4;
488 desc_data
= (__le32
*)&desc
[i
];
489 n
= HCLGE_32_BIT_RTN_DATANUM
;
491 for (k
= 0; k
< n
; k
++) {
492 *data
++ += le32_to_cpu(*desc_data
);
500 static int hclge_mac_update_stats(struct hclge_dev
*hdev
)
502 #define HCLGE_MAC_CMD_NUM 21
503 #define HCLGE_RTN_DATA_NUM 4
505 u64
*data
= (u64
*)(&hdev
->hw_stats
.mac_stats
);
506 struct hclge_desc desc
[HCLGE_MAC_CMD_NUM
];
511 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_MAC
, true);
512 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_MAC_CMD_NUM
);
514 dev_err(&hdev
->pdev
->dev
,
515 "Get MAC pkt stats fail, status = %d.\n", ret
);
520 for (i
= 0; i
< HCLGE_MAC_CMD_NUM
; i
++) {
521 if (unlikely(i
== 0)) {
522 desc_data
= (__le64
*)(&desc
[i
].data
[0]);
523 n
= HCLGE_RTN_DATA_NUM
- 2;
525 desc_data
= (__le64
*)(&desc
[i
]);
526 n
= HCLGE_RTN_DATA_NUM
;
528 for (k
= 0; k
< n
; k
++) {
529 *data
++ += le64_to_cpu(*desc_data
);
537 static int hclge_tqps_update_stats(struct hnae3_handle
*handle
)
539 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
540 struct hclge_vport
*vport
= hclge_get_vport(handle
);
541 struct hclge_dev
*hdev
= vport
->back
;
542 struct hnae3_queue
*queue
;
543 struct hclge_desc desc
[1];
544 struct hclge_tqp
*tqp
;
547 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
548 queue
= handle
->kinfo
.tqp
[i
];
549 tqp
= container_of(queue
, struct hclge_tqp
, q
);
550 /* command : HCLGE_OPC_QUERY_IGU_STAT */
551 hclge_cmd_setup_basic_desc(&desc
[0],
552 HCLGE_OPC_QUERY_RX_STATUS
,
555 desc
[0].data
[0] = cpu_to_le32((tqp
->index
& 0x1ff));
556 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
558 dev_err(&hdev
->pdev
->dev
,
559 "Query tqp stat fail, status = %d,queue = %d\n",
563 tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
+=
564 le32_to_cpu(desc
[0].data
[1]);
567 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
568 queue
= handle
->kinfo
.tqp
[i
];
569 tqp
= container_of(queue
, struct hclge_tqp
, q
);
570 /* command : HCLGE_OPC_QUERY_IGU_STAT */
571 hclge_cmd_setup_basic_desc(&desc
[0],
572 HCLGE_OPC_QUERY_TX_STATUS
,
575 desc
[0].data
[0] = cpu_to_le32((tqp
->index
& 0x1ff));
576 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
578 dev_err(&hdev
->pdev
->dev
,
579 "Query tqp stat fail, status = %d,queue = %d\n",
583 tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
+=
584 le32_to_cpu(desc
[0].data
[1]);
590 static u64
*hclge_tqps_get_stats(struct hnae3_handle
*handle
, u64
*data
)
592 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
593 struct hclge_tqp
*tqp
;
597 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
598 tqp
= container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
599 *buff
++ = tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
;
602 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
603 tqp
= container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
604 *buff
++ = tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
;
610 static int hclge_tqps_get_sset_count(struct hnae3_handle
*handle
, int stringset
)
612 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
614 return kinfo
->num_tqps
* (2);
617 static u8
*hclge_tqps_get_strings(struct hnae3_handle
*handle
, u8
*data
)
619 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
623 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
624 struct hclge_tqp
*tqp
= container_of(handle
->kinfo
.tqp
[i
],
625 struct hclge_tqp
, q
);
626 snprintf(buff
, ETH_GSTRING_LEN
, "txq#%d_pktnum_rcd",
628 buff
= buff
+ ETH_GSTRING_LEN
;
631 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
632 struct hclge_tqp
*tqp
= container_of(kinfo
->tqp
[i
],
633 struct hclge_tqp
, q
);
634 snprintf(buff
, ETH_GSTRING_LEN
, "rxq#%d_pktnum_rcd",
636 buff
= buff
+ ETH_GSTRING_LEN
;
642 static u64
*hclge_comm_get_stats(void *comm_stats
,
643 const struct hclge_comm_stats_str strs
[],
649 for (i
= 0; i
< size
; i
++)
650 buf
[i
] = HCLGE_STATS_READ(comm_stats
, strs
[i
].offset
);
655 static u8
*hclge_comm_get_strings(u32 stringset
,
656 const struct hclge_comm_stats_str strs
[],
659 char *buff
= (char *)data
;
662 if (stringset
!= ETH_SS_STATS
)
665 for (i
= 0; i
< size
; i
++) {
666 snprintf(buff
, ETH_GSTRING_LEN
,
668 buff
= buff
+ ETH_GSTRING_LEN
;
674 static void hclge_update_netstat(struct hclge_hw_stats
*hw_stats
,
675 struct net_device_stats
*net_stats
)
677 net_stats
->tx_dropped
= 0;
678 net_stats
->rx_dropped
= hw_stats
->all_32_bit_stats
.ssu_full_drop_num
;
679 net_stats
->rx_dropped
+= hw_stats
->all_32_bit_stats
.ppp_key_drop_num
;
680 net_stats
->rx_dropped
+= hw_stats
->all_32_bit_stats
.ssu_key_drop_num
;
682 net_stats
->rx_errors
= hw_stats
->mac_stats
.mac_rx_oversize_pkt_num
;
683 net_stats
->rx_errors
+= hw_stats
->mac_stats
.mac_rx_undersize_pkt_num
;
684 net_stats
->rx_errors
+= hw_stats
->all_32_bit_stats
.igu_rx_no_eof_pkt
;
685 net_stats
->rx_errors
+= hw_stats
->all_32_bit_stats
.igu_rx_no_sof_pkt
;
686 net_stats
->rx_errors
+= hw_stats
->mac_stats
.mac_rx_fcs_err_pkt_num
;
688 net_stats
->multicast
= hw_stats
->mac_stats
.mac_tx_multi_pkt_num
;
689 net_stats
->multicast
+= hw_stats
->mac_stats
.mac_rx_multi_pkt_num
;
691 net_stats
->rx_crc_errors
= hw_stats
->mac_stats
.mac_rx_fcs_err_pkt_num
;
692 net_stats
->rx_length_errors
=
693 hw_stats
->mac_stats
.mac_rx_undersize_pkt_num
;
694 net_stats
->rx_length_errors
+=
695 hw_stats
->mac_stats
.mac_rx_oversize_pkt_num
;
696 net_stats
->rx_over_errors
=
697 hw_stats
->mac_stats
.mac_rx_oversize_pkt_num
;
700 static void hclge_update_stats_for_all(struct hclge_dev
*hdev
)
702 struct hnae3_handle
*handle
;
705 handle
= &hdev
->vport
[0].nic
;
706 if (handle
->client
) {
707 status
= hclge_tqps_update_stats(handle
);
709 dev_err(&hdev
->pdev
->dev
,
710 "Update TQPS stats fail, status = %d.\n",
715 status
= hclge_mac_update_stats(hdev
);
717 dev_err(&hdev
->pdev
->dev
,
718 "Update MAC stats fail, status = %d.\n", status
);
720 status
= hclge_32_bit_update_stats(hdev
);
722 dev_err(&hdev
->pdev
->dev
,
723 "Update 32 bit stats fail, status = %d.\n",
726 hclge_update_netstat(&hdev
->hw_stats
, &handle
->kinfo
.netdev
->stats
);
729 static void hclge_update_stats(struct hnae3_handle
*handle
,
730 struct net_device_stats
*net_stats
)
732 struct hclge_vport
*vport
= hclge_get_vport(handle
);
733 struct hclge_dev
*hdev
= vport
->back
;
734 struct hclge_hw_stats
*hw_stats
= &hdev
->hw_stats
;
737 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING
, &hdev
->state
))
740 status
= hclge_mac_update_stats(hdev
);
742 dev_err(&hdev
->pdev
->dev
,
743 "Update MAC stats fail, status = %d.\n",
746 status
= hclge_32_bit_update_stats(hdev
);
748 dev_err(&hdev
->pdev
->dev
,
749 "Update 32 bit stats fail, status = %d.\n",
752 status
= hclge_64_bit_update_stats(hdev
);
754 dev_err(&hdev
->pdev
->dev
,
755 "Update 64 bit stats fail, status = %d.\n",
758 status
= hclge_tqps_update_stats(handle
);
760 dev_err(&hdev
->pdev
->dev
,
761 "Update TQPS stats fail, status = %d.\n",
764 hclge_update_netstat(hw_stats
, net_stats
);
766 clear_bit(HCLGE_STATE_STATISTICS_UPDATING
, &hdev
->state
);
769 static int hclge_get_sset_count(struct hnae3_handle
*handle
, int stringset
)
771 #define HCLGE_LOOPBACK_TEST_FLAGS 0x7
773 struct hclge_vport
*vport
= hclge_get_vport(handle
);
774 struct hclge_dev
*hdev
= vport
->back
;
777 /* Loopback test support rules:
778 * mac: only GE mode support
779 * serdes: all mac mode will support include GE/XGE/LGE/CGE
780 * phy: only support when phy device exist on board
782 if (stringset
== ETH_SS_TEST
) {
783 /* clear loopback bit flags at first */
784 handle
->flags
= (handle
->flags
& (~HCLGE_LOOPBACK_TEST_FLAGS
));
785 if (hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_10M
||
786 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_100M
||
787 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_1G
) {
789 handle
->flags
|= HNAE3_SUPPORT_MAC_LOOPBACK
;
793 handle
->flags
|= HNAE3_SUPPORT_SERDES_LOOPBACK
;
794 } else if (stringset
== ETH_SS_STATS
) {
795 count
= ARRAY_SIZE(g_mac_stats_string
) +
796 ARRAY_SIZE(g_all_32bit_stats_string
) +
797 ARRAY_SIZE(g_all_64bit_stats_string
) +
798 hclge_tqps_get_sset_count(handle
, stringset
);
804 static void hclge_get_strings(struct hnae3_handle
*handle
,
808 u8
*p
= (char *)data
;
811 if (stringset
== ETH_SS_STATS
) {
812 size
= ARRAY_SIZE(g_mac_stats_string
);
813 p
= hclge_comm_get_strings(stringset
,
817 size
= ARRAY_SIZE(g_all_32bit_stats_string
);
818 p
= hclge_comm_get_strings(stringset
,
819 g_all_32bit_stats_string
,
822 size
= ARRAY_SIZE(g_all_64bit_stats_string
);
823 p
= hclge_comm_get_strings(stringset
,
824 g_all_64bit_stats_string
,
827 p
= hclge_tqps_get_strings(handle
, p
);
828 } else if (stringset
== ETH_SS_TEST
) {
829 if (handle
->flags
& HNAE3_SUPPORT_MAC_LOOPBACK
) {
831 hns3_nic_test_strs
[HNAE3_MAC_INTER_LOOP_MAC
],
833 p
+= ETH_GSTRING_LEN
;
835 if (handle
->flags
& HNAE3_SUPPORT_SERDES_LOOPBACK
) {
837 hns3_nic_test_strs
[HNAE3_MAC_INTER_LOOP_SERDES
],
839 p
+= ETH_GSTRING_LEN
;
841 if (handle
->flags
& HNAE3_SUPPORT_PHY_LOOPBACK
) {
843 hns3_nic_test_strs
[HNAE3_MAC_INTER_LOOP_PHY
],
845 p
+= ETH_GSTRING_LEN
;
850 static void hclge_get_stats(struct hnae3_handle
*handle
, u64
*data
)
852 struct hclge_vport
*vport
= hclge_get_vport(handle
);
853 struct hclge_dev
*hdev
= vport
->back
;
856 p
= hclge_comm_get_stats(&hdev
->hw_stats
.mac_stats
,
858 ARRAY_SIZE(g_mac_stats_string
),
860 p
= hclge_comm_get_stats(&hdev
->hw_stats
.all_32_bit_stats
,
861 g_all_32bit_stats_string
,
862 ARRAY_SIZE(g_all_32bit_stats_string
),
864 p
= hclge_comm_get_stats(&hdev
->hw_stats
.all_64_bit_stats
,
865 g_all_64bit_stats_string
,
866 ARRAY_SIZE(g_all_64bit_stats_string
),
868 p
= hclge_tqps_get_stats(handle
, p
);
871 static int hclge_parse_func_status(struct hclge_dev
*hdev
,
872 struct hclge_func_status_cmd
*status
)
874 if (!(status
->pf_state
& HCLGE_PF_STATE_DONE
))
877 /* Set the pf to main pf */
878 if (status
->pf_state
& HCLGE_PF_STATE_MAIN
)
879 hdev
->flag
|= HCLGE_FLAG_MAIN
;
881 hdev
->flag
&= ~HCLGE_FLAG_MAIN
;
886 static int hclge_query_function_status(struct hclge_dev
*hdev
)
888 struct hclge_func_status_cmd
*req
;
889 struct hclge_desc desc
;
893 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_FUNC_STATUS
, true);
894 req
= (struct hclge_func_status_cmd
*)desc
.data
;
897 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
899 dev_err(&hdev
->pdev
->dev
,
900 "query function status failed %d.\n",
906 /* Check pf reset is done */
909 usleep_range(1000, 2000);
910 } while (timeout
++ < 5);
912 ret
= hclge_parse_func_status(hdev
, req
);
917 static int hclge_query_pf_resource(struct hclge_dev
*hdev
)
919 struct hclge_pf_res_cmd
*req
;
920 struct hclge_desc desc
;
923 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_PF_RSRC
, true);
924 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
926 dev_err(&hdev
->pdev
->dev
,
927 "query pf resource failed %d.\n", ret
);
931 req
= (struct hclge_pf_res_cmd
*)desc
.data
;
932 hdev
->num_tqps
= __le16_to_cpu(req
->tqp_num
);
933 hdev
->pkt_buf_size
= __le16_to_cpu(req
->buf_size
) << HCLGE_BUF_UNIT_S
;
935 if (hnae3_dev_roce_supported(hdev
)) {
936 hdev
->roce_base_msix_offset
=
937 hnae3_get_field(__le16_to_cpu(req
->msixcap_localid_ba_rocee
),
938 HCLGE_MSIX_OFT_ROCEE_M
, HCLGE_MSIX_OFT_ROCEE_S
);
940 hnae3_get_field(__le16_to_cpu(req
->pf_intr_vector_number
),
941 HCLGE_PF_VEC_NUM_M
, HCLGE_PF_VEC_NUM_S
);
943 /* PF should have NIC vectors and Roce vectors,
944 * NIC vectors are queued before Roce vectors.
946 hdev
->num_msi
= hdev
->num_roce_msi
+
947 hdev
->roce_base_msix_offset
;
950 hnae3_get_field(__le16_to_cpu(req
->pf_intr_vector_number
),
951 HCLGE_PF_VEC_NUM_M
, HCLGE_PF_VEC_NUM_S
);
957 static int hclge_parse_speed(int speed_cmd
, int *speed
)
961 *speed
= HCLGE_MAC_SPEED_10M
;
964 *speed
= HCLGE_MAC_SPEED_100M
;
967 *speed
= HCLGE_MAC_SPEED_1G
;
970 *speed
= HCLGE_MAC_SPEED_10G
;
973 *speed
= HCLGE_MAC_SPEED_25G
;
976 *speed
= HCLGE_MAC_SPEED_40G
;
979 *speed
= HCLGE_MAC_SPEED_50G
;
982 *speed
= HCLGE_MAC_SPEED_100G
;
991 static void hclge_parse_fiber_link_mode(struct hclge_dev
*hdev
,
994 unsigned long *supported
= hdev
->hw
.mac
.supported
;
996 if (speed_ability
& HCLGE_SUPPORT_1G_BIT
)
997 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT
,
1000 if (speed_ability
& HCLGE_SUPPORT_10G_BIT
)
1001 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT
,
1004 if (speed_ability
& HCLGE_SUPPORT_25G_BIT
)
1005 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
1008 if (speed_ability
& HCLGE_SUPPORT_50G_BIT
)
1009 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT
,
1012 if (speed_ability
& HCLGE_SUPPORT_100G_BIT
)
1013 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
,
1016 set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT
, supported
);
1017 set_bit(ETHTOOL_LINK_MODE_Pause_BIT
, supported
);
1020 static void hclge_parse_link_mode(struct hclge_dev
*hdev
, u8 speed_ability
)
1022 u8 media_type
= hdev
->hw
.mac
.media_type
;
1024 if (media_type
!= HNAE3_MEDIA_TYPE_FIBER
)
1027 hclge_parse_fiber_link_mode(hdev
, speed_ability
);
1030 static void hclge_parse_cfg(struct hclge_cfg
*cfg
, struct hclge_desc
*desc
)
1032 struct hclge_cfg_param_cmd
*req
;
1033 u64 mac_addr_tmp_high
;
1037 req
= (struct hclge_cfg_param_cmd
*)desc
[0].data
;
1039 /* get the configuration */
1040 cfg
->vmdq_vport_num
= hnae3_get_field(__le32_to_cpu(req
->param
[0]),
1043 cfg
->tc_num
= hnae3_get_field(__le32_to_cpu(req
->param
[0]),
1044 HCLGE_CFG_TC_NUM_M
, HCLGE_CFG_TC_NUM_S
);
1045 cfg
->tqp_desc_num
= hnae3_get_field(__le32_to_cpu(req
->param
[0]),
1046 HCLGE_CFG_TQP_DESC_N_M
,
1047 HCLGE_CFG_TQP_DESC_N_S
);
1049 cfg
->phy_addr
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1050 HCLGE_CFG_PHY_ADDR_M
,
1051 HCLGE_CFG_PHY_ADDR_S
);
1052 cfg
->media_type
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1053 HCLGE_CFG_MEDIA_TP_M
,
1054 HCLGE_CFG_MEDIA_TP_S
);
1055 cfg
->rx_buf_len
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1056 HCLGE_CFG_RX_BUF_LEN_M
,
1057 HCLGE_CFG_RX_BUF_LEN_S
);
1058 /* get mac_address */
1059 mac_addr_tmp
= __le32_to_cpu(req
->param
[2]);
1060 mac_addr_tmp_high
= hnae3_get_field(__le32_to_cpu(req
->param
[3]),
1061 HCLGE_CFG_MAC_ADDR_H_M
,
1062 HCLGE_CFG_MAC_ADDR_H_S
);
1064 mac_addr_tmp
|= (mac_addr_tmp_high
<< 31) << 1;
1066 cfg
->default_speed
= hnae3_get_field(__le32_to_cpu(req
->param
[3]),
1067 HCLGE_CFG_DEFAULT_SPEED_M
,
1068 HCLGE_CFG_DEFAULT_SPEED_S
);
1069 cfg
->rss_size_max
= hnae3_get_field(__le32_to_cpu(req
->param
[3]),
1070 HCLGE_CFG_RSS_SIZE_M
,
1071 HCLGE_CFG_RSS_SIZE_S
);
1073 for (i
= 0; i
< ETH_ALEN
; i
++)
1074 cfg
->mac_addr
[i
] = (mac_addr_tmp
>> (8 * i
)) & 0xff;
1076 req
= (struct hclge_cfg_param_cmd
*)desc
[1].data
;
1077 cfg
->numa_node_map
= __le32_to_cpu(req
->param
[0]);
1079 cfg
->speed_ability
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1080 HCLGE_CFG_SPEED_ABILITY_M
,
1081 HCLGE_CFG_SPEED_ABILITY_S
);
1084 /* hclge_get_cfg: query the static parameter from flash
1085 * @hdev: pointer to struct hclge_dev
1086 * @hcfg: the config structure to be getted
1088 static int hclge_get_cfg(struct hclge_dev
*hdev
, struct hclge_cfg
*hcfg
)
1090 struct hclge_desc desc
[HCLGE_PF_CFG_DESC_NUM
];
1091 struct hclge_cfg_param_cmd
*req
;
1094 for (i
= 0; i
< HCLGE_PF_CFG_DESC_NUM
; i
++) {
1097 req
= (struct hclge_cfg_param_cmd
*)desc
[i
].data
;
1098 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_GET_CFG_PARAM
,
1100 hnae3_set_field(offset
, HCLGE_CFG_OFFSET_M
,
1101 HCLGE_CFG_OFFSET_S
, i
* HCLGE_CFG_RD_LEN_BYTES
);
1102 /* Len should be united by 4 bytes when send to hardware */
1103 hnae3_set_field(offset
, HCLGE_CFG_RD_LEN_M
, HCLGE_CFG_RD_LEN_S
,
1104 HCLGE_CFG_RD_LEN_BYTES
/ HCLGE_CFG_RD_LEN_UNIT
);
1105 req
->offset
= cpu_to_le32(offset
);
1108 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_PF_CFG_DESC_NUM
);
1110 dev_err(&hdev
->pdev
->dev
, "get config failed %d.\n", ret
);
1114 hclge_parse_cfg(hcfg
, desc
);
1119 static int hclge_get_cap(struct hclge_dev
*hdev
)
1123 ret
= hclge_query_function_status(hdev
);
1125 dev_err(&hdev
->pdev
->dev
,
1126 "query function status error %d.\n", ret
);
1130 /* get pf resource */
1131 ret
= hclge_query_pf_resource(hdev
);
1133 dev_err(&hdev
->pdev
->dev
, "query pf resource error %d.\n", ret
);
1138 static int hclge_configure(struct hclge_dev
*hdev
)
1140 struct hclge_cfg cfg
;
1143 ret
= hclge_get_cfg(hdev
, &cfg
);
1145 dev_err(&hdev
->pdev
->dev
, "get mac mode error %d.\n", ret
);
1149 hdev
->num_vmdq_vport
= cfg
.vmdq_vport_num
;
1150 hdev
->base_tqp_pid
= 0;
1151 hdev
->rss_size_max
= cfg
.rss_size_max
;
1152 hdev
->rx_buf_len
= cfg
.rx_buf_len
;
1153 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, cfg
.mac_addr
);
1154 hdev
->hw
.mac
.media_type
= cfg
.media_type
;
1155 hdev
->hw
.mac
.phy_addr
= cfg
.phy_addr
;
1156 hdev
->num_desc
= cfg
.tqp_desc_num
;
1157 hdev
->tm_info
.num_pg
= 1;
1158 hdev
->tc_max
= cfg
.tc_num
;
1159 hdev
->tm_info
.hw_pfc_map
= 0;
1161 ret
= hclge_parse_speed(cfg
.default_speed
, &hdev
->hw
.mac
.speed
);
1163 dev_err(&hdev
->pdev
->dev
, "Get wrong speed ret=%d.\n", ret
);
1167 hclge_parse_link_mode(hdev
, cfg
.speed_ability
);
1169 if ((hdev
->tc_max
> HNAE3_MAX_TC
) ||
1170 (hdev
->tc_max
< 1)) {
1171 dev_warn(&hdev
->pdev
->dev
, "TC num = %d.\n",
1176 /* Dev does not support DCB */
1177 if (!hnae3_dev_dcb_supported(hdev
)) {
1181 hdev
->pfc_max
= hdev
->tc_max
;
1184 hdev
->tm_info
.num_tc
= hdev
->tc_max
;
1186 /* Currently not support uncontiuous tc */
1187 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++)
1188 hnae3_set_bit(hdev
->hw_tc_map
, i
, 1);
1190 hdev
->tx_sch_mode
= HCLGE_FLAG_TC_BASE_SCH_MODE
;
1195 static int hclge_config_tso(struct hclge_dev
*hdev
, int tso_mss_min
,
1198 struct hclge_cfg_tso_status_cmd
*req
;
1199 struct hclge_desc desc
;
1202 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TSO_GENERIC_CONFIG
, false);
1204 req
= (struct hclge_cfg_tso_status_cmd
*)desc
.data
;
1207 hnae3_set_field(tso_mss
, HCLGE_TSO_MSS_MIN_M
,
1208 HCLGE_TSO_MSS_MIN_S
, tso_mss_min
);
1209 req
->tso_mss_min
= cpu_to_le16(tso_mss
);
1212 hnae3_set_field(tso_mss
, HCLGE_TSO_MSS_MIN_M
,
1213 HCLGE_TSO_MSS_MIN_S
, tso_mss_max
);
1214 req
->tso_mss_max
= cpu_to_le16(tso_mss
);
1216 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1219 static int hclge_alloc_tqps(struct hclge_dev
*hdev
)
1221 struct hclge_tqp
*tqp
;
1224 hdev
->htqp
= devm_kcalloc(&hdev
->pdev
->dev
, hdev
->num_tqps
,
1225 sizeof(struct hclge_tqp
), GFP_KERNEL
);
1231 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
1232 tqp
->dev
= &hdev
->pdev
->dev
;
1235 tqp
->q
.ae_algo
= &ae_algo
;
1236 tqp
->q
.buf_size
= hdev
->rx_buf_len
;
1237 tqp
->q
.desc_num
= hdev
->num_desc
;
1238 tqp
->q
.io_base
= hdev
->hw
.io_base
+ HCLGE_TQP_REG_OFFSET
+
1239 i
* HCLGE_TQP_REG_SIZE
;
1247 static int hclge_map_tqps_to_func(struct hclge_dev
*hdev
, u16 func_id
,
1248 u16 tqp_pid
, u16 tqp_vid
, bool is_pf
)
1250 struct hclge_tqp_map_cmd
*req
;
1251 struct hclge_desc desc
;
1254 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_SET_TQP_MAP
, false);
1256 req
= (struct hclge_tqp_map_cmd
*)desc
.data
;
1257 req
->tqp_id
= cpu_to_le16(tqp_pid
);
1258 req
->tqp_vf
= func_id
;
1259 req
->tqp_flag
= !is_pf
<< HCLGE_TQP_MAP_TYPE_B
|
1260 1 << HCLGE_TQP_MAP_EN_B
;
1261 req
->tqp_vid
= cpu_to_le16(tqp_vid
);
1263 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1265 dev_err(&hdev
->pdev
->dev
, "TQP map failed %d.\n", ret
);
1270 static int hclge_assign_tqp(struct hclge_vport
*vport
,
1271 struct hnae3_queue
**tqp
, u16 num_tqps
)
1273 struct hclge_dev
*hdev
= vport
->back
;
1276 for (i
= 0, alloced
= 0; i
< hdev
->num_tqps
&&
1277 alloced
< num_tqps
; i
++) {
1278 if (!hdev
->htqp
[i
].alloced
) {
1279 hdev
->htqp
[i
].q
.handle
= &vport
->nic
;
1280 hdev
->htqp
[i
].q
.tqp_index
= alloced
;
1281 tqp
[alloced
] = &hdev
->htqp
[i
].q
;
1282 hdev
->htqp
[i
].alloced
= true;
1286 vport
->alloc_tqps
= num_tqps
;
1291 static int hclge_knic_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1293 struct hnae3_handle
*nic
= &vport
->nic
;
1294 struct hnae3_knic_private_info
*kinfo
= &nic
->kinfo
;
1295 struct hclge_dev
*hdev
= vport
->back
;
1298 kinfo
->num_desc
= hdev
->num_desc
;
1299 kinfo
->rx_buf_len
= hdev
->rx_buf_len
;
1300 kinfo
->num_tc
= min_t(u16
, num_tqps
, hdev
->tm_info
.num_tc
);
1302 = min_t(u16
, hdev
->rss_size_max
, num_tqps
/ kinfo
->num_tc
);
1303 kinfo
->num_tqps
= kinfo
->rss_size
* kinfo
->num_tc
;
1305 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
1306 if (hdev
->hw_tc_map
& BIT(i
)) {
1307 kinfo
->tc_info
[i
].enable
= true;
1308 kinfo
->tc_info
[i
].tqp_offset
= i
* kinfo
->rss_size
;
1309 kinfo
->tc_info
[i
].tqp_count
= kinfo
->rss_size
;
1310 kinfo
->tc_info
[i
].tc
= i
;
1312 /* Set to default queue if TC is disable */
1313 kinfo
->tc_info
[i
].enable
= false;
1314 kinfo
->tc_info
[i
].tqp_offset
= 0;
1315 kinfo
->tc_info
[i
].tqp_count
= 1;
1316 kinfo
->tc_info
[i
].tc
= 0;
1320 kinfo
->tqp
= devm_kcalloc(&hdev
->pdev
->dev
, kinfo
->num_tqps
,
1321 sizeof(struct hnae3_queue
*), GFP_KERNEL
);
1325 ret
= hclge_assign_tqp(vport
, kinfo
->tqp
, kinfo
->num_tqps
);
1327 dev_err(&hdev
->pdev
->dev
, "fail to assign TQPs %d.\n", ret
);
1332 static int hclge_map_tqp_to_vport(struct hclge_dev
*hdev
,
1333 struct hclge_vport
*vport
)
1335 struct hnae3_handle
*nic
= &vport
->nic
;
1336 struct hnae3_knic_private_info
*kinfo
;
1339 kinfo
= &nic
->kinfo
;
1340 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
1341 struct hclge_tqp
*q
=
1342 container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
1346 is_pf
= !(vport
->vport_id
);
1347 ret
= hclge_map_tqps_to_func(hdev
, vport
->vport_id
, q
->index
,
1356 static int hclge_map_tqp(struct hclge_dev
*hdev
)
1358 struct hclge_vport
*vport
= hdev
->vport
;
1361 num_vport
= hdev
->num_vmdq_vport
+ hdev
->num_req_vfs
+ 1;
1362 for (i
= 0; i
< num_vport
; i
++) {
1365 ret
= hclge_map_tqp_to_vport(hdev
, vport
);
1375 static void hclge_unic_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1377 /* this would be initialized later */
1380 static int hclge_vport_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1382 struct hnae3_handle
*nic
= &vport
->nic
;
1383 struct hclge_dev
*hdev
= vport
->back
;
1386 nic
->pdev
= hdev
->pdev
;
1387 nic
->ae_algo
= &ae_algo
;
1388 nic
->numa_node_mask
= hdev
->numa_node_mask
;
1390 if (hdev
->ae_dev
->dev_type
== HNAE3_DEV_KNIC
) {
1391 ret
= hclge_knic_setup(vport
, num_tqps
);
1393 dev_err(&hdev
->pdev
->dev
, "knic setup failed %d\n",
1398 hclge_unic_setup(vport
, num_tqps
);
1404 static int hclge_alloc_vport(struct hclge_dev
*hdev
)
1406 struct pci_dev
*pdev
= hdev
->pdev
;
1407 struct hclge_vport
*vport
;
1413 /* We need to alloc a vport for main NIC of PF */
1414 num_vport
= hdev
->num_vmdq_vport
+ hdev
->num_req_vfs
+ 1;
1416 if (hdev
->num_tqps
< num_vport
) {
1417 dev_err(&hdev
->pdev
->dev
, "tqps(%d) is less than vports(%d)",
1418 hdev
->num_tqps
, num_vport
);
1422 /* Alloc the same number of TQPs for every vport */
1423 tqp_per_vport
= hdev
->num_tqps
/ num_vport
;
1424 tqp_main_vport
= tqp_per_vport
+ hdev
->num_tqps
% num_vport
;
1426 vport
= devm_kcalloc(&pdev
->dev
, num_vport
, sizeof(struct hclge_vport
),
1431 hdev
->vport
= vport
;
1432 hdev
->num_alloc_vport
= num_vport
;
1434 if (IS_ENABLED(CONFIG_PCI_IOV
))
1435 hdev
->num_alloc_vfs
= hdev
->num_req_vfs
;
1437 for (i
= 0; i
< num_vport
; i
++) {
1439 vport
->vport_id
= i
;
1442 ret
= hclge_vport_setup(vport
, tqp_main_vport
);
1444 ret
= hclge_vport_setup(vport
, tqp_per_vport
);
1447 "vport setup failed for vport %d, %d\n",
1458 static int hclge_cmd_alloc_tx_buff(struct hclge_dev
*hdev
,
1459 struct hclge_pkt_buf_alloc
*buf_alloc
)
1461 /* TX buffer size is unit by 128 byte */
1462 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1463 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1464 struct hclge_tx_buff_alloc_cmd
*req
;
1465 struct hclge_desc desc
;
1469 req
= (struct hclge_tx_buff_alloc_cmd
*)desc
.data
;
1471 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TX_BUFF_ALLOC
, 0);
1472 for (i
= 0; i
< HCLGE_TC_NUM
; i
++) {
1473 u32 buf_size
= buf_alloc
->priv_buf
[i
].tx_buf_size
;
1475 req
->tx_pkt_buff
[i
] =
1476 cpu_to_le16((buf_size
>> HCLGE_BUF_SIZE_UNIT_SHIFT
) |
1477 HCLGE_BUF_SIZE_UPDATE_EN_MSK
);
1480 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1482 dev_err(&hdev
->pdev
->dev
, "tx buffer alloc cmd failed %d.\n",
1488 static int hclge_tx_buffer_alloc(struct hclge_dev
*hdev
,
1489 struct hclge_pkt_buf_alloc
*buf_alloc
)
1491 int ret
= hclge_cmd_alloc_tx_buff(hdev
, buf_alloc
);
1494 dev_err(&hdev
->pdev
->dev
, "tx buffer alloc failed %d\n", ret
);
1499 static int hclge_get_tc_num(struct hclge_dev
*hdev
)
1503 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1504 if (hdev
->hw_tc_map
& BIT(i
))
1509 static int hclge_get_pfc_enalbe_num(struct hclge_dev
*hdev
)
1513 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1514 if (hdev
->hw_tc_map
& BIT(i
) &&
1515 hdev
->tm_info
.hw_pfc_map
& BIT(i
))
1520 /* Get the number of pfc enabled TCs, which have private buffer */
1521 static int hclge_get_pfc_priv_num(struct hclge_dev
*hdev
,
1522 struct hclge_pkt_buf_alloc
*buf_alloc
)
1524 struct hclge_priv_buf
*priv
;
1527 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1528 priv
= &buf_alloc
->priv_buf
[i
];
1529 if ((hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1537 /* Get the number of pfc disabled TCs, which have private buffer */
1538 static int hclge_get_no_pfc_priv_num(struct hclge_dev
*hdev
,
1539 struct hclge_pkt_buf_alloc
*buf_alloc
)
1541 struct hclge_priv_buf
*priv
;
1544 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1545 priv
= &buf_alloc
->priv_buf
[i
];
1546 if (hdev
->hw_tc_map
& BIT(i
) &&
1547 !(hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1555 static u32
hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc
*buf_alloc
)
1557 struct hclge_priv_buf
*priv
;
1561 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1562 priv
= &buf_alloc
->priv_buf
[i
];
1564 rx_priv
+= priv
->buf_size
;
1569 static u32
hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc
*buf_alloc
)
1571 u32 i
, total_tx_size
= 0;
1573 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1574 total_tx_size
+= buf_alloc
->priv_buf
[i
].tx_buf_size
;
1576 return total_tx_size
;
1579 static bool hclge_is_rx_buf_ok(struct hclge_dev
*hdev
,
1580 struct hclge_pkt_buf_alloc
*buf_alloc
,
1583 u32 shared_buf_min
, shared_buf_tc
, shared_std
;
1584 int tc_num
, pfc_enable_num
;
1589 tc_num
= hclge_get_tc_num(hdev
);
1590 pfc_enable_num
= hclge_get_pfc_enalbe_num(hdev
);
1592 if (hnae3_dev_dcb_supported(hdev
))
1593 shared_buf_min
= 2 * hdev
->mps
+ HCLGE_DEFAULT_DV
;
1595 shared_buf_min
= 2 * hdev
->mps
+ HCLGE_DEFAULT_NON_DCB_DV
;
1597 shared_buf_tc
= pfc_enable_num
* hdev
->mps
+
1598 (tc_num
- pfc_enable_num
) * hdev
->mps
/ 2 +
1600 shared_std
= max_t(u32
, shared_buf_min
, shared_buf_tc
);
1602 rx_priv
= hclge_get_rx_priv_buff_alloced(buf_alloc
);
1603 if (rx_all
<= rx_priv
+ shared_std
)
1606 shared_buf
= rx_all
- rx_priv
;
1607 buf_alloc
->s_buf
.buf_size
= shared_buf
;
1608 buf_alloc
->s_buf
.self
.high
= shared_buf
;
1609 buf_alloc
->s_buf
.self
.low
= 2 * hdev
->mps
;
1611 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1612 if ((hdev
->hw_tc_map
& BIT(i
)) &&
1613 (hdev
->tm_info
.hw_pfc_map
& BIT(i
))) {
1614 buf_alloc
->s_buf
.tc_thrd
[i
].low
= hdev
->mps
;
1615 buf_alloc
->s_buf
.tc_thrd
[i
].high
= 2 * hdev
->mps
;
1617 buf_alloc
->s_buf
.tc_thrd
[i
].low
= 0;
1618 buf_alloc
->s_buf
.tc_thrd
[i
].high
= hdev
->mps
;
1625 static int hclge_tx_buffer_calc(struct hclge_dev
*hdev
,
1626 struct hclge_pkt_buf_alloc
*buf_alloc
)
1630 total_size
= hdev
->pkt_buf_size
;
1632 /* alloc tx buffer for all enabled tc */
1633 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1634 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
1636 if (total_size
< HCLGE_DEFAULT_TX_BUF
)
1639 if (hdev
->hw_tc_map
& BIT(i
))
1640 priv
->tx_buf_size
= HCLGE_DEFAULT_TX_BUF
;
1642 priv
->tx_buf_size
= 0;
1644 total_size
-= priv
->tx_buf_size
;
1650 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1651 * @hdev: pointer to struct hclge_dev
1652 * @buf_alloc: pointer to buffer calculation data
1653 * @return: 0: calculate sucessful, negative: fail
1655 static int hclge_rx_buffer_calc(struct hclge_dev
*hdev
,
1656 struct hclge_pkt_buf_alloc
*buf_alloc
)
1658 u32 rx_all
= hdev
->pkt_buf_size
;
1659 int no_pfc_priv_num
, pfc_priv_num
;
1660 struct hclge_priv_buf
*priv
;
1663 rx_all
-= hclge_get_tx_buff_alloced(buf_alloc
);
1665 /* When DCB is not supported, rx private
1666 * buffer is not allocated.
1668 if (!hnae3_dev_dcb_supported(hdev
)) {
1669 if (!hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1675 /* step 1, try to alloc private buffer for all enabled tc */
1676 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1677 priv
= &buf_alloc
->priv_buf
[i
];
1678 if (hdev
->hw_tc_map
& BIT(i
)) {
1680 if (hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1681 priv
->wl
.low
= hdev
->mps
;
1682 priv
->wl
.high
= priv
->wl
.low
+ hdev
->mps
;
1683 priv
->buf_size
= priv
->wl
.high
+
1687 priv
->wl
.high
= 2 * hdev
->mps
;
1688 priv
->buf_size
= priv
->wl
.high
;
1698 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1701 /* step 2, try to decrease the buffer size of
1702 * no pfc TC's private buffer
1704 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1705 priv
= &buf_alloc
->priv_buf
[i
];
1712 if (!(hdev
->hw_tc_map
& BIT(i
)))
1717 if (hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1719 priv
->wl
.high
= priv
->wl
.low
+ hdev
->mps
;
1720 priv
->buf_size
= priv
->wl
.high
+ HCLGE_DEFAULT_DV
;
1723 priv
->wl
.high
= hdev
->mps
;
1724 priv
->buf_size
= priv
->wl
.high
;
1728 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1731 /* step 3, try to reduce the number of pfc disabled TCs,
1732 * which have private buffer
1734 /* get the total no pfc enable TC number, which have private buffer */
1735 no_pfc_priv_num
= hclge_get_no_pfc_priv_num(hdev
, buf_alloc
);
1737 /* let the last to be cleared first */
1738 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
1739 priv
= &buf_alloc
->priv_buf
[i
];
1741 if (hdev
->hw_tc_map
& BIT(i
) &&
1742 !(hdev
->tm_info
.hw_pfc_map
& BIT(i
))) {
1743 /* Clear the no pfc TC private buffer */
1751 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
) ||
1752 no_pfc_priv_num
== 0)
1756 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1759 /* step 4, try to reduce the number of pfc enabled TCs
1760 * which have private buffer.
1762 pfc_priv_num
= hclge_get_pfc_priv_num(hdev
, buf_alloc
);
1764 /* let the last to be cleared first */
1765 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
1766 priv
= &buf_alloc
->priv_buf
[i
];
1768 if (hdev
->hw_tc_map
& BIT(i
) &&
1769 hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1770 /* Reduce the number of pfc TC with private buffer */
1778 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
) ||
1782 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1788 static int hclge_rx_priv_buf_alloc(struct hclge_dev
*hdev
,
1789 struct hclge_pkt_buf_alloc
*buf_alloc
)
1791 struct hclge_rx_priv_buff_cmd
*req
;
1792 struct hclge_desc desc
;
1796 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_PRIV_BUFF_ALLOC
, false);
1797 req
= (struct hclge_rx_priv_buff_cmd
*)desc
.data
;
1799 /* Alloc private buffer TCs */
1800 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1801 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
1804 cpu_to_le16(priv
->buf_size
>> HCLGE_BUF_UNIT_S
);
1806 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B
);
1810 cpu_to_le16((buf_alloc
->s_buf
.buf_size
>> HCLGE_BUF_UNIT_S
) |
1811 (1 << HCLGE_TC0_PRI_BUF_EN_B
));
1813 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1815 dev_err(&hdev
->pdev
->dev
,
1816 "rx private buffer alloc cmd failed %d\n", ret
);
1821 static int hclge_rx_priv_wl_config(struct hclge_dev
*hdev
,
1822 struct hclge_pkt_buf_alloc
*buf_alloc
)
1824 struct hclge_rx_priv_wl_buf
*req
;
1825 struct hclge_priv_buf
*priv
;
1826 struct hclge_desc desc
[2];
1830 for (i
= 0; i
< 2; i
++) {
1831 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_RX_PRIV_WL_ALLOC
,
1833 req
= (struct hclge_rx_priv_wl_buf
*)desc
[i
].data
;
1835 /* The first descriptor set the NEXT bit to 1 */
1837 desc
[i
].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1839 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1841 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
1842 u32 idx
= i
* HCLGE_TC_NUM_ONE_DESC
+ j
;
1844 priv
= &buf_alloc
->priv_buf
[idx
];
1845 req
->tc_wl
[j
].high
=
1846 cpu_to_le16(priv
->wl
.high
>> HCLGE_BUF_UNIT_S
);
1847 req
->tc_wl
[j
].high
|=
1848 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
1850 cpu_to_le16(priv
->wl
.low
>> HCLGE_BUF_UNIT_S
);
1851 req
->tc_wl
[j
].low
|=
1852 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
1856 /* Send 2 descriptor at one time */
1857 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
1859 dev_err(&hdev
->pdev
->dev
,
1860 "rx private waterline config cmd failed %d\n",
1865 static int hclge_common_thrd_config(struct hclge_dev
*hdev
,
1866 struct hclge_pkt_buf_alloc
*buf_alloc
)
1868 struct hclge_shared_buf
*s_buf
= &buf_alloc
->s_buf
;
1869 struct hclge_rx_com_thrd
*req
;
1870 struct hclge_desc desc
[2];
1871 struct hclge_tc_thrd
*tc
;
1875 for (i
= 0; i
< 2; i
++) {
1876 hclge_cmd_setup_basic_desc(&desc
[i
],
1877 HCLGE_OPC_RX_COM_THRD_ALLOC
, false);
1878 req
= (struct hclge_rx_com_thrd
*)&desc
[i
].data
;
1880 /* The first descriptor set the NEXT bit to 1 */
1882 desc
[i
].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1884 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1886 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
1887 tc
= &s_buf
->tc_thrd
[i
* HCLGE_TC_NUM_ONE_DESC
+ j
];
1889 req
->com_thrd
[j
].high
=
1890 cpu_to_le16(tc
->high
>> HCLGE_BUF_UNIT_S
);
1891 req
->com_thrd
[j
].high
|=
1892 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
1893 req
->com_thrd
[j
].low
=
1894 cpu_to_le16(tc
->low
>> HCLGE_BUF_UNIT_S
);
1895 req
->com_thrd
[j
].low
|=
1896 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
1900 /* Send 2 descriptors at one time */
1901 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
1903 dev_err(&hdev
->pdev
->dev
,
1904 "common threshold config cmd failed %d\n", ret
);
1908 static int hclge_common_wl_config(struct hclge_dev
*hdev
,
1909 struct hclge_pkt_buf_alloc
*buf_alloc
)
1911 struct hclge_shared_buf
*buf
= &buf_alloc
->s_buf
;
1912 struct hclge_rx_com_wl
*req
;
1913 struct hclge_desc desc
;
1916 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_COM_WL_ALLOC
, false);
1918 req
= (struct hclge_rx_com_wl
*)desc
.data
;
1919 req
->com_wl
.high
= cpu_to_le16(buf
->self
.high
>> HCLGE_BUF_UNIT_S
);
1920 req
->com_wl
.high
|= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
1922 req
->com_wl
.low
= cpu_to_le16(buf
->self
.low
>> HCLGE_BUF_UNIT_S
);
1923 req
->com_wl
.low
|= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
1925 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1927 dev_err(&hdev
->pdev
->dev
,
1928 "common waterline config cmd failed %d\n", ret
);
1933 int hclge_buffer_alloc(struct hclge_dev
*hdev
)
1935 struct hclge_pkt_buf_alloc
*pkt_buf
;
1938 pkt_buf
= kzalloc(sizeof(*pkt_buf
), GFP_KERNEL
);
1942 ret
= hclge_tx_buffer_calc(hdev
, pkt_buf
);
1944 dev_err(&hdev
->pdev
->dev
,
1945 "could not calc tx buffer size for all TCs %d\n", ret
);
1949 ret
= hclge_tx_buffer_alloc(hdev
, pkt_buf
);
1951 dev_err(&hdev
->pdev
->dev
,
1952 "could not alloc tx buffers %d\n", ret
);
1956 ret
= hclge_rx_buffer_calc(hdev
, pkt_buf
);
1958 dev_err(&hdev
->pdev
->dev
,
1959 "could not calc rx priv buffer size for all TCs %d\n",
1964 ret
= hclge_rx_priv_buf_alloc(hdev
, pkt_buf
);
1966 dev_err(&hdev
->pdev
->dev
, "could not alloc rx priv buffer %d\n",
1971 if (hnae3_dev_dcb_supported(hdev
)) {
1972 ret
= hclge_rx_priv_wl_config(hdev
, pkt_buf
);
1974 dev_err(&hdev
->pdev
->dev
,
1975 "could not configure rx private waterline %d\n",
1980 ret
= hclge_common_thrd_config(hdev
, pkt_buf
);
1982 dev_err(&hdev
->pdev
->dev
,
1983 "could not configure common threshold %d\n",
1989 ret
= hclge_common_wl_config(hdev
, pkt_buf
);
1991 dev_err(&hdev
->pdev
->dev
,
1992 "could not configure common waterline %d\n", ret
);
1999 static int hclge_init_roce_base_info(struct hclge_vport
*vport
)
2001 struct hnae3_handle
*roce
= &vport
->roce
;
2002 struct hnae3_handle
*nic
= &vport
->nic
;
2004 roce
->rinfo
.num_vectors
= vport
->back
->num_roce_msi
;
2006 if (vport
->back
->num_msi_left
< vport
->roce
.rinfo
.num_vectors
||
2007 vport
->back
->num_msi_left
== 0)
2010 roce
->rinfo
.base_vector
= vport
->back
->roce_base_vector
;
2012 roce
->rinfo
.netdev
= nic
->kinfo
.netdev
;
2013 roce
->rinfo
.roce_io_base
= vport
->back
->hw
.io_base
;
2015 roce
->pdev
= nic
->pdev
;
2016 roce
->ae_algo
= nic
->ae_algo
;
2017 roce
->numa_node_mask
= nic
->numa_node_mask
;
2022 static int hclge_init_msi(struct hclge_dev
*hdev
)
2024 struct pci_dev
*pdev
= hdev
->pdev
;
2028 vectors
= pci_alloc_irq_vectors(pdev
, 1, hdev
->num_msi
,
2029 PCI_IRQ_MSI
| PCI_IRQ_MSIX
);
2032 "failed(%d) to allocate MSI/MSI-X vectors\n",
2036 if (vectors
< hdev
->num_msi
)
2037 dev_warn(&hdev
->pdev
->dev
,
2038 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2039 hdev
->num_msi
, vectors
);
2041 hdev
->num_msi
= vectors
;
2042 hdev
->num_msi_left
= vectors
;
2043 hdev
->base_msi_vector
= pdev
->irq
;
2044 hdev
->roce_base_vector
= hdev
->base_msi_vector
+
2045 hdev
->roce_base_msix_offset
;
2047 hdev
->vector_status
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
2048 sizeof(u16
), GFP_KERNEL
);
2049 if (!hdev
->vector_status
) {
2050 pci_free_irq_vectors(pdev
);
2054 for (i
= 0; i
< hdev
->num_msi
; i
++)
2055 hdev
->vector_status
[i
] = HCLGE_INVALID_VPORT
;
2057 hdev
->vector_irq
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
2058 sizeof(int), GFP_KERNEL
);
2059 if (!hdev
->vector_irq
) {
2060 pci_free_irq_vectors(pdev
);
2067 static void hclge_check_speed_dup(struct hclge_dev
*hdev
, int duplex
, int speed
)
2069 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2071 if ((speed
== HCLGE_MAC_SPEED_10M
) || (speed
== HCLGE_MAC_SPEED_100M
))
2072 mac
->duplex
= (u8
)duplex
;
2074 mac
->duplex
= HCLGE_MAC_FULL
;
2079 int hclge_cfg_mac_speed_dup(struct hclge_dev
*hdev
, int speed
, u8 duplex
)
2081 struct hclge_config_mac_speed_dup_cmd
*req
;
2082 struct hclge_desc desc
;
2085 req
= (struct hclge_config_mac_speed_dup_cmd
*)desc
.data
;
2087 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_SPEED_DUP
, false);
2089 hnae3_set_bit(req
->speed_dup
, HCLGE_CFG_DUPLEX_B
, !!duplex
);
2092 case HCLGE_MAC_SPEED_10M
:
2093 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2094 HCLGE_CFG_SPEED_S
, 6);
2096 case HCLGE_MAC_SPEED_100M
:
2097 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2098 HCLGE_CFG_SPEED_S
, 7);
2100 case HCLGE_MAC_SPEED_1G
:
2101 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2102 HCLGE_CFG_SPEED_S
, 0);
2104 case HCLGE_MAC_SPEED_10G
:
2105 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2106 HCLGE_CFG_SPEED_S
, 1);
2108 case HCLGE_MAC_SPEED_25G
:
2109 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2110 HCLGE_CFG_SPEED_S
, 2);
2112 case HCLGE_MAC_SPEED_40G
:
2113 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2114 HCLGE_CFG_SPEED_S
, 3);
2116 case HCLGE_MAC_SPEED_50G
:
2117 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2118 HCLGE_CFG_SPEED_S
, 4);
2120 case HCLGE_MAC_SPEED_100G
:
2121 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2122 HCLGE_CFG_SPEED_S
, 5);
2125 dev_err(&hdev
->pdev
->dev
, "invalid speed (%d)\n", speed
);
2129 hnae3_set_bit(req
->mac_change_fec_en
, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B
,
2132 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2134 dev_err(&hdev
->pdev
->dev
,
2135 "mac speed/duplex config cmd failed %d.\n", ret
);
2139 hclge_check_speed_dup(hdev
, duplex
, speed
);
2144 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle
*handle
, int speed
,
2147 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2148 struct hclge_dev
*hdev
= vport
->back
;
2150 return hclge_cfg_mac_speed_dup(hdev
, speed
, duplex
);
2153 static int hclge_query_mac_an_speed_dup(struct hclge_dev
*hdev
, int *speed
,
2156 struct hclge_query_an_speed_dup_cmd
*req
;
2157 struct hclge_desc desc
;
2161 req
= (struct hclge_query_an_speed_dup_cmd
*)desc
.data
;
2163 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_AN_RESULT
, true);
2164 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2166 dev_err(&hdev
->pdev
->dev
,
2167 "mac speed/autoneg/duplex query cmd failed %d\n",
2172 *duplex
= hnae3_get_bit(req
->an_syn_dup_speed
, HCLGE_QUERY_DUPLEX_B
);
2173 speed_tmp
= hnae3_get_field(req
->an_syn_dup_speed
, HCLGE_QUERY_SPEED_M
,
2174 HCLGE_QUERY_SPEED_S
);
2176 ret
= hclge_parse_speed(speed_tmp
, speed
);
2178 dev_err(&hdev
->pdev
->dev
,
2179 "could not parse speed(=%d), %d\n", speed_tmp
, ret
);
2184 static int hclge_set_autoneg_en(struct hclge_dev
*hdev
, bool enable
)
2186 struct hclge_config_auto_neg_cmd
*req
;
2187 struct hclge_desc desc
;
2191 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_AN_MODE
, false);
2193 req
= (struct hclge_config_auto_neg_cmd
*)desc
.data
;
2194 hnae3_set_bit(flag
, HCLGE_MAC_CFG_AN_EN_B
, !!enable
);
2195 req
->cfg_an_cmd_flag
= cpu_to_le32(flag
);
2197 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2199 dev_err(&hdev
->pdev
->dev
, "auto neg set cmd failed %d.\n",
2205 static int hclge_set_autoneg(struct hnae3_handle
*handle
, bool enable
)
2207 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2208 struct hclge_dev
*hdev
= vport
->back
;
2210 return hclge_set_autoneg_en(hdev
, enable
);
2213 static int hclge_get_autoneg(struct hnae3_handle
*handle
)
2215 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2216 struct hclge_dev
*hdev
= vport
->back
;
2217 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
2220 return phydev
->autoneg
;
2222 return hdev
->hw
.mac
.autoneg
;
2225 static int hclge_set_default_mac_vlan_mask(struct hclge_dev
*hdev
,
2229 struct hclge_mac_vlan_mask_entry_cmd
*req
;
2230 struct hclge_desc desc
;
2233 req
= (struct hclge_mac_vlan_mask_entry_cmd
*)desc
.data
;
2234 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_MASK_SET
, false);
2236 hnae3_set_bit(req
->vlan_mask
, HCLGE_VLAN_MASK_EN_B
,
2238 ether_addr_copy(req
->mac_mask
, mac_mask
);
2240 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2242 dev_err(&hdev
->pdev
->dev
,
2243 "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
2249 static int hclge_mac_init(struct hclge_dev
*hdev
)
2251 struct hnae3_handle
*handle
= &hdev
->vport
[0].nic
;
2252 struct net_device
*netdev
= handle
->kinfo
.netdev
;
2253 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2254 u8 mac_mask
[ETH_ALEN
] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
2255 struct hclge_vport
*vport
;
2260 ret
= hclge_cfg_mac_speed_dup(hdev
, hdev
->hw
.mac
.speed
, HCLGE_MAC_FULL
);
2262 dev_err(&hdev
->pdev
->dev
,
2263 "Config mac speed dup fail ret=%d\n", ret
);
2269 /* Initialize the MTA table work mode */
2270 hdev
->enable_mta
= true;
2271 hdev
->mta_mac_sel_type
= HCLGE_MAC_ADDR_47_36
;
2273 ret
= hclge_set_mta_filter_mode(hdev
,
2274 hdev
->mta_mac_sel_type
,
2277 dev_err(&hdev
->pdev
->dev
, "set mta filter mode failed %d\n",
2282 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
2283 vport
= &hdev
->vport
[i
];
2284 vport
->accept_mta_mc
= false;
2286 memset(vport
->mta_shadow
, 0, sizeof(vport
->mta_shadow
));
2287 ret
= hclge_cfg_func_mta_filter(hdev
, vport
->vport_id
, false);
2289 dev_err(&hdev
->pdev
->dev
,
2290 "set mta filter mode fail ret=%d\n", ret
);
2295 ret
= hclge_set_default_mac_vlan_mask(hdev
, true, mac_mask
);
2297 dev_err(&hdev
->pdev
->dev
,
2298 "set default mac_vlan_mask fail ret=%d\n", ret
);
2307 ret
= hclge_set_mtu(handle
, mtu
);
2309 dev_err(&hdev
->pdev
->dev
,
2310 "set mtu failed ret=%d\n", ret
);
2315 static void hclge_mbx_task_schedule(struct hclge_dev
*hdev
)
2317 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
))
2318 schedule_work(&hdev
->mbx_service_task
);
2321 static void hclge_reset_task_schedule(struct hclge_dev
*hdev
)
2323 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
))
2324 schedule_work(&hdev
->rst_service_task
);
2327 static void hclge_task_schedule(struct hclge_dev
*hdev
)
2329 if (!test_bit(HCLGE_STATE_DOWN
, &hdev
->state
) &&
2330 !test_bit(HCLGE_STATE_REMOVING
, &hdev
->state
) &&
2331 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
))
2332 (void)schedule_work(&hdev
->service_task
);
2335 static int hclge_get_mac_link_status(struct hclge_dev
*hdev
)
2337 struct hclge_link_status_cmd
*req
;
2338 struct hclge_desc desc
;
2342 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_LINK_STATUS
, true);
2343 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2345 dev_err(&hdev
->pdev
->dev
, "get link status cmd failed %d\n",
2350 req
= (struct hclge_link_status_cmd
*)desc
.data
;
2351 link_status
= req
->status
& HCLGE_LINK_STATUS_UP_M
;
2353 return !!link_status
;
2356 static int hclge_get_mac_phy_link(struct hclge_dev
*hdev
)
2361 mac_state
= hclge_get_mac_link_status(hdev
);
2363 if (hdev
->hw
.mac
.phydev
) {
2364 if (!genphy_read_status(hdev
->hw
.mac
.phydev
))
2365 link_stat
= mac_state
&
2366 hdev
->hw
.mac
.phydev
->link
;
2371 link_stat
= mac_state
;
2377 static void hclge_update_link_status(struct hclge_dev
*hdev
)
2379 struct hnae3_client
*rclient
= hdev
->roce_client
;
2380 struct hnae3_client
*client
= hdev
->nic_client
;
2381 struct hnae3_handle
*rhandle
;
2382 struct hnae3_handle
*handle
;
2388 state
= hclge_get_mac_phy_link(hdev
);
2389 if (state
!= hdev
->hw
.mac
.link
) {
2390 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
2391 handle
= &hdev
->vport
[i
].nic
;
2392 client
->ops
->link_status_change(handle
, state
);
2393 rhandle
= &hdev
->vport
[i
].roce
;
2394 if (rclient
&& rclient
->ops
->link_status_change
)
2395 rclient
->ops
->link_status_change(rhandle
,
2398 hdev
->hw
.mac
.link
= state
;
2402 static int hclge_update_speed_duplex(struct hclge_dev
*hdev
)
2404 struct hclge_mac mac
= hdev
->hw
.mac
;
2409 /* get the speed and duplex as autoneg'result from mac cmd when phy
2412 if (mac
.phydev
|| !mac
.autoneg
)
2415 ret
= hclge_query_mac_an_speed_dup(hdev
, &speed
, &duplex
);
2417 dev_err(&hdev
->pdev
->dev
,
2418 "mac autoneg/speed/duplex query failed %d\n", ret
);
2422 if ((mac
.speed
!= speed
) || (mac
.duplex
!= duplex
)) {
2423 ret
= hclge_cfg_mac_speed_dup(hdev
, speed
, duplex
);
2425 dev_err(&hdev
->pdev
->dev
,
2426 "mac speed/duplex config failed %d\n", ret
);
2434 static int hclge_update_speed_duplex_h(struct hnae3_handle
*handle
)
2436 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2437 struct hclge_dev
*hdev
= vport
->back
;
2439 return hclge_update_speed_duplex(hdev
);
2442 static int hclge_get_status(struct hnae3_handle
*handle
)
2444 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2445 struct hclge_dev
*hdev
= vport
->back
;
2447 hclge_update_link_status(hdev
);
2449 return hdev
->hw
.mac
.link
;
2452 static void hclge_service_timer(struct timer_list
*t
)
2454 struct hclge_dev
*hdev
= from_timer(hdev
, t
, service_timer
);
2456 mod_timer(&hdev
->service_timer
, jiffies
+ HZ
);
2457 hdev
->hw_stats
.stats_timer
++;
2458 hclge_task_schedule(hdev
);
2461 static void hclge_service_complete(struct hclge_dev
*hdev
)
2463 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
));
2465 /* Flush memory before next watchdog */
2466 smp_mb__before_atomic();
2467 clear_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
);
2470 static u32
hclge_check_event_cause(struct hclge_dev
*hdev
, u32
*clearval
)
2475 /* fetch the events from their corresponding regs */
2476 rst_src_reg
= hclge_read_dev(&hdev
->hw
, HCLGE_MISC_VECTOR_INT_STS
);
2477 cmdq_src_reg
= hclge_read_dev(&hdev
->hw
, HCLGE_VECTOR0_CMDQ_SRC_REG
);
2479 /* Assumption: If by any chance reset and mailbox events are reported
2480 * together then we will only process reset event in this go and will
2481 * defer the processing of the mailbox events. Since, we would have not
2482 * cleared RX CMDQ event this time we would receive again another
2483 * interrupt from H/W just for the mailbox.
2486 /* check for vector0 reset event sources */
2487 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
) & rst_src_reg
) {
2488 set_bit(HCLGE_STATE_CMD_DISABLE
, &hdev
->state
);
2489 set_bit(HNAE3_GLOBAL_RESET
, &hdev
->reset_pending
);
2490 *clearval
= BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
);
2491 return HCLGE_VECTOR0_EVENT_RST
;
2494 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B
) & rst_src_reg
) {
2495 set_bit(HCLGE_STATE_CMD_DISABLE
, &hdev
->state
);
2496 set_bit(HNAE3_CORE_RESET
, &hdev
->reset_pending
);
2497 *clearval
= BIT(HCLGE_VECTOR0_CORERESET_INT_B
);
2498 return HCLGE_VECTOR0_EVENT_RST
;
2501 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B
) & rst_src_reg
) {
2502 set_bit(HNAE3_IMP_RESET
, &hdev
->reset_pending
);
2503 *clearval
= BIT(HCLGE_VECTOR0_IMPRESET_INT_B
);
2504 return HCLGE_VECTOR0_EVENT_RST
;
2507 /* check for vector0 mailbox(=CMDQ RX) event source */
2508 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B
) & cmdq_src_reg
) {
2509 cmdq_src_reg
&= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B
);
2510 *clearval
= cmdq_src_reg
;
2511 return HCLGE_VECTOR0_EVENT_MBX
;
2514 return HCLGE_VECTOR0_EVENT_OTHER
;
2517 static void hclge_clear_event_cause(struct hclge_dev
*hdev
, u32 event_type
,
2520 switch (event_type
) {
2521 case HCLGE_VECTOR0_EVENT_RST
:
2522 hclge_write_dev(&hdev
->hw
, HCLGE_MISC_RESET_STS_REG
, regclr
);
2524 case HCLGE_VECTOR0_EVENT_MBX
:
2525 hclge_write_dev(&hdev
->hw
, HCLGE_VECTOR0_CMDQ_SRC_REG
, regclr
);
2530 static void hclge_clear_all_event_cause(struct hclge_dev
*hdev
)
2532 hclge_clear_event_cause(hdev
, HCLGE_VECTOR0_EVENT_RST
,
2533 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
) |
2534 BIT(HCLGE_VECTOR0_CORERESET_INT_B
) |
2535 BIT(HCLGE_VECTOR0_IMPRESET_INT_B
));
2536 hclge_clear_event_cause(hdev
, HCLGE_VECTOR0_EVENT_MBX
, 0);
2539 static void hclge_enable_vector(struct hclge_misc_vector
*vector
, bool enable
)
2541 writel(enable
? 1 : 0, vector
->addr
);
2544 static irqreturn_t
hclge_misc_irq_handle(int irq
, void *data
)
2546 struct hclge_dev
*hdev
= data
;
2550 hclge_enable_vector(&hdev
->misc_vector
, false);
2551 event_cause
= hclge_check_event_cause(hdev
, &clearval
);
2553 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2554 switch (event_cause
) {
2555 case HCLGE_VECTOR0_EVENT_RST
:
2556 hclge_reset_task_schedule(hdev
);
2558 case HCLGE_VECTOR0_EVENT_MBX
:
2559 /* If we are here then,
2560 * 1. Either we are not handling any mbx task and we are not
2563 * 2. We could be handling a mbx task but nothing more is
2565 * In both cases, we should schedule mbx task as there are more
2566 * mbx messages reported by this interrupt.
2568 hclge_mbx_task_schedule(hdev
);
2571 dev_warn(&hdev
->pdev
->dev
,
2572 "received unknown or unhandled event of vector0\n");
2576 /* clear the source of interrupt if it is not cause by reset */
2577 if (event_cause
!= HCLGE_VECTOR0_EVENT_RST
) {
2578 hclge_clear_event_cause(hdev
, event_cause
, clearval
);
2579 hclge_enable_vector(&hdev
->misc_vector
, true);
2585 static void hclge_free_vector(struct hclge_dev
*hdev
, int vector_id
)
2587 if (hdev
->vector_status
[vector_id
] == HCLGE_INVALID_VPORT
) {
2588 dev_warn(&hdev
->pdev
->dev
,
2589 "vector(vector_id %d) has been freed.\n", vector_id
);
2593 hdev
->vector_status
[vector_id
] = HCLGE_INVALID_VPORT
;
2594 hdev
->num_msi_left
+= 1;
2595 hdev
->num_msi_used
-= 1;
2598 static void hclge_get_misc_vector(struct hclge_dev
*hdev
)
2600 struct hclge_misc_vector
*vector
= &hdev
->misc_vector
;
2602 vector
->vector_irq
= pci_irq_vector(hdev
->pdev
, 0);
2604 vector
->addr
= hdev
->hw
.io_base
+ HCLGE_MISC_VECTOR_REG_BASE
;
2605 hdev
->vector_status
[0] = 0;
2607 hdev
->num_msi_left
-= 1;
2608 hdev
->num_msi_used
+= 1;
2611 static int hclge_misc_irq_init(struct hclge_dev
*hdev
)
2615 hclge_get_misc_vector(hdev
);
2617 /* this would be explicitly freed in the end */
2618 ret
= request_irq(hdev
->misc_vector
.vector_irq
, hclge_misc_irq_handle
,
2619 0, "hclge_misc", hdev
);
2621 hclge_free_vector(hdev
, 0);
2622 dev_err(&hdev
->pdev
->dev
, "request misc irq(%d) fail\n",
2623 hdev
->misc_vector
.vector_irq
);
2629 static void hclge_misc_irq_uninit(struct hclge_dev
*hdev
)
2631 free_irq(hdev
->misc_vector
.vector_irq
, hdev
);
2632 hclge_free_vector(hdev
, 0);
2635 static int hclge_notify_client(struct hclge_dev
*hdev
,
2636 enum hnae3_reset_notify_type type
)
2638 struct hnae3_client
*client
= hdev
->nic_client
;
2639 struct hnae3_handle
*handle
;
2643 if (!client
->ops
->reset_notify
)
2646 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
2647 handle
= &hdev
->vport
[i
].nic
;
2648 ret
= client
->ops
->reset_notify(handle
, type
);
2650 dev_err(&hdev
->pdev
->dev
,
2651 "notify nic client failed %d", ret
);
2659 static int hclge_notify_roce_client(struct hclge_dev
*hdev
,
2660 enum hnae3_reset_notify_type type
)
2662 struct hnae3_client
*client
= hdev
->roce_client
;
2663 struct hnae3_handle
*handle
;
2670 if (!client
->ops
->reset_notify
)
2673 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
2674 handle
= &hdev
->vport
[i
].roce
;
2675 ret
= client
->ops
->reset_notify(handle
, type
);
2677 dev_err(&hdev
->pdev
->dev
,
2678 "notify roce client failed %d", ret
);
2686 static int hclge_reset_wait(struct hclge_dev
*hdev
)
2688 #define HCLGE_RESET_WATI_MS 100
2689 #define HCLGE_RESET_WAIT_CNT 5
2690 u32 val
, reg
, reg_bit
;
2693 switch (hdev
->reset_type
) {
2694 case HNAE3_GLOBAL_RESET
:
2695 reg
= HCLGE_GLOBAL_RESET_REG
;
2696 reg_bit
= HCLGE_GLOBAL_RESET_BIT
;
2698 case HNAE3_CORE_RESET
:
2699 reg
= HCLGE_GLOBAL_RESET_REG
;
2700 reg_bit
= HCLGE_CORE_RESET_BIT
;
2702 case HNAE3_FUNC_RESET
:
2703 reg
= HCLGE_FUN_RST_ING
;
2704 reg_bit
= HCLGE_FUN_RST_ING_B
;
2707 dev_err(&hdev
->pdev
->dev
,
2708 "Wait for unsupported reset type: %d\n",
2713 val
= hclge_read_dev(&hdev
->hw
, reg
);
2714 while (hnae3_get_bit(val
, reg_bit
) && cnt
< HCLGE_RESET_WAIT_CNT
) {
2715 msleep(HCLGE_RESET_WATI_MS
);
2716 val
= hclge_read_dev(&hdev
->hw
, reg
);
2720 if (cnt
>= HCLGE_RESET_WAIT_CNT
) {
2721 dev_warn(&hdev
->pdev
->dev
,
2722 "Wait for reset timeout: %d\n", hdev
->reset_type
);
2729 int hclge_func_reset_cmd(struct hclge_dev
*hdev
, int func_id
)
2731 struct hclge_desc desc
;
2732 struct hclge_reset_cmd
*req
= (struct hclge_reset_cmd
*)desc
.data
;
2735 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_RST_TRIGGER
, false);
2736 hnae3_set_bit(req
->mac_func_reset
, HCLGE_CFG_RESET_FUNC_B
, 1);
2737 req
->fun_reset_vfid
= func_id
;
2739 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2741 dev_err(&hdev
->pdev
->dev
,
2742 "send function reset cmd fail, status =%d\n", ret
);
2747 static void hclge_do_reset(struct hclge_dev
*hdev
)
2749 struct pci_dev
*pdev
= hdev
->pdev
;
2752 switch (hdev
->reset_type
) {
2753 case HNAE3_GLOBAL_RESET
:
2754 val
= hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
);
2755 hnae3_set_bit(val
, HCLGE_GLOBAL_RESET_BIT
, 1);
2756 hclge_write_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
, val
);
2757 dev_info(&pdev
->dev
, "Global Reset requested\n");
2759 case HNAE3_CORE_RESET
:
2760 val
= hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
);
2761 hnae3_set_bit(val
, HCLGE_CORE_RESET_BIT
, 1);
2762 hclge_write_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
, val
);
2763 dev_info(&pdev
->dev
, "Core Reset requested\n");
2765 case HNAE3_FUNC_RESET
:
2766 dev_info(&pdev
->dev
, "PF Reset requested\n");
2767 hclge_func_reset_cmd(hdev
, 0);
2768 /* schedule again to check later */
2769 set_bit(HNAE3_FUNC_RESET
, &hdev
->reset_pending
);
2770 hclge_reset_task_schedule(hdev
);
2773 dev_warn(&pdev
->dev
,
2774 "Unsupported reset type: %d\n", hdev
->reset_type
);
2779 static enum hnae3_reset_type
hclge_get_reset_level(struct hclge_dev
*hdev
,
2780 unsigned long *addr
)
2782 enum hnae3_reset_type rst_level
= HNAE3_NONE_RESET
;
2784 /* return the highest priority reset level amongst all */
2785 if (test_bit(HNAE3_GLOBAL_RESET
, addr
))
2786 rst_level
= HNAE3_GLOBAL_RESET
;
2787 else if (test_bit(HNAE3_CORE_RESET
, addr
))
2788 rst_level
= HNAE3_CORE_RESET
;
2789 else if (test_bit(HNAE3_IMP_RESET
, addr
))
2790 rst_level
= HNAE3_IMP_RESET
;
2791 else if (test_bit(HNAE3_FUNC_RESET
, addr
))
2792 rst_level
= HNAE3_FUNC_RESET
;
2794 /* now, clear all other resets */
2795 clear_bit(HNAE3_GLOBAL_RESET
, addr
);
2796 clear_bit(HNAE3_CORE_RESET
, addr
);
2797 clear_bit(HNAE3_IMP_RESET
, addr
);
2798 clear_bit(HNAE3_FUNC_RESET
, addr
);
2803 static void hclge_clear_reset_cause(struct hclge_dev
*hdev
)
2807 switch (hdev
->reset_type
) {
2808 case HNAE3_IMP_RESET
:
2809 clearval
= BIT(HCLGE_VECTOR0_IMPRESET_INT_B
);
2811 case HNAE3_GLOBAL_RESET
:
2812 clearval
= BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
);
2814 case HNAE3_CORE_RESET
:
2815 clearval
= BIT(HCLGE_VECTOR0_CORERESET_INT_B
);
2824 hclge_write_dev(&hdev
->hw
, HCLGE_MISC_RESET_STS_REG
, clearval
);
2825 hclge_enable_vector(&hdev
->misc_vector
, true);
2828 static void hclge_reset(struct hclge_dev
*hdev
)
2830 struct hnae3_handle
*handle
;
2832 /* perform reset of the stack & ae device for a client */
2833 handle
= &hdev
->vport
[0].nic
;
2835 hclge_notify_roce_client(hdev
, HNAE3_DOWN_CLIENT
);
2836 hclge_notify_roce_client(hdev
, HNAE3_UNINIT_CLIENT
);
2839 hclge_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
2841 if (!hclge_reset_wait(hdev
)) {
2842 hclge_notify_client(hdev
, HNAE3_UNINIT_CLIENT
);
2843 hclge_reset_ae_dev(hdev
->ae_dev
);
2844 hclge_notify_client(hdev
, HNAE3_INIT_CLIENT
);
2846 hclge_clear_reset_cause(hdev
);
2848 /* schedule again to check pending resets later */
2849 set_bit(hdev
->reset_type
, &hdev
->reset_pending
);
2850 hclge_reset_task_schedule(hdev
);
2853 hclge_notify_client(hdev
, HNAE3_UP_CLIENT
);
2854 handle
->last_reset_time
= jiffies
;
2857 hclge_notify_roce_client(hdev
, HNAE3_INIT_CLIENT
);
2858 hclge_notify_roce_client(hdev
, HNAE3_UP_CLIENT
);
2861 static void hclge_reset_event(struct hnae3_handle
*handle
)
2863 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2864 struct hclge_dev
*hdev
= vport
->back
;
2866 /* check if this is a new reset request and we are not here just because
2867 * last reset attempt did not succeed and watchdog hit us again. We will
2868 * know this if last reset request did not occur very recently (watchdog
2869 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
2870 * In case of new request we reset the "reset level" to PF reset.
2871 * And if it is a repeat reset request of the most recent one then we
2872 * want to make sure we throttle the reset request. Therefore, we will
2873 * not allow it again before 3*HZ times.
2875 if (time_before(jiffies
, (handle
->last_reset_time
+ 3 * HZ
)))
2877 else if (time_after(jiffies
, (handle
->last_reset_time
+ 4 * 5 * HZ
)))
2878 handle
->reset_level
= HNAE3_FUNC_RESET
;
2880 dev_info(&hdev
->pdev
->dev
, "received reset event , reset type is %d",
2881 handle
->reset_level
);
2883 /* request reset & schedule reset task */
2884 set_bit(handle
->reset_level
, &hdev
->reset_request
);
2885 hclge_reset_task_schedule(hdev
);
2887 if (handle
->reset_level
< HNAE3_GLOBAL_RESET
)
2888 handle
->reset_level
++;
2891 static void hclge_reset_subtask(struct hclge_dev
*hdev
)
2893 /* check if there is any ongoing reset in the hardware. This status can
2894 * be checked from reset_pending. If there is then, we need to wait for
2895 * hardware to complete reset.
2896 * a. If we are able to figure out in reasonable time that hardware
2897 * has fully resetted then, we can proceed with driver, client
2899 * b. else, we can come back later to check this status so re-sched
2902 hdev
->reset_type
= hclge_get_reset_level(hdev
, &hdev
->reset_pending
);
2903 if (hdev
->reset_type
!= HNAE3_NONE_RESET
)
2906 /* check if we got any *new* reset requests to be honored */
2907 hdev
->reset_type
= hclge_get_reset_level(hdev
, &hdev
->reset_request
);
2908 if (hdev
->reset_type
!= HNAE3_NONE_RESET
)
2909 hclge_do_reset(hdev
);
2911 hdev
->reset_type
= HNAE3_NONE_RESET
;
2914 static void hclge_reset_service_task(struct work_struct
*work
)
2916 struct hclge_dev
*hdev
=
2917 container_of(work
, struct hclge_dev
, rst_service_task
);
2919 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
2922 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
);
2924 hclge_reset_subtask(hdev
);
2926 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
2929 static void hclge_mailbox_service_task(struct work_struct
*work
)
2931 struct hclge_dev
*hdev
=
2932 container_of(work
, struct hclge_dev
, mbx_service_task
);
2934 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
))
2937 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
2939 hclge_mbx_handler(hdev
);
2941 clear_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
);
2944 static void hclge_service_task(struct work_struct
*work
)
2946 struct hclge_dev
*hdev
=
2947 container_of(work
, struct hclge_dev
, service_task
);
2949 if (hdev
->hw_stats
.stats_timer
>= HCLGE_STATS_TIMER_INTERVAL
) {
2950 hclge_update_stats_for_all(hdev
);
2951 hdev
->hw_stats
.stats_timer
= 0;
2954 hclge_update_speed_duplex(hdev
);
2955 hclge_update_link_status(hdev
);
2956 hclge_service_complete(hdev
);
2959 struct hclge_vport
*hclge_get_vport(struct hnae3_handle
*handle
)
2961 /* VF handle has no client */
2962 if (!handle
->client
)
2963 return container_of(handle
, struct hclge_vport
, nic
);
2964 else if (handle
->client
->type
== HNAE3_CLIENT_ROCE
)
2965 return container_of(handle
, struct hclge_vport
, roce
);
2967 return container_of(handle
, struct hclge_vport
, nic
);
2970 static int hclge_get_vector(struct hnae3_handle
*handle
, u16 vector_num
,
2971 struct hnae3_vector_info
*vector_info
)
2973 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2974 struct hnae3_vector_info
*vector
= vector_info
;
2975 struct hclge_dev
*hdev
= vport
->back
;
2979 vector_num
= min(hdev
->num_msi_left
, vector_num
);
2981 for (j
= 0; j
< vector_num
; j
++) {
2982 for (i
= 1; i
< hdev
->num_msi
; i
++) {
2983 if (hdev
->vector_status
[i
] == HCLGE_INVALID_VPORT
) {
2984 vector
->vector
= pci_irq_vector(hdev
->pdev
, i
);
2985 vector
->io_addr
= hdev
->hw
.io_base
+
2986 HCLGE_VECTOR_REG_BASE
+
2987 (i
- 1) * HCLGE_VECTOR_REG_OFFSET
+
2989 HCLGE_VECTOR_VF_OFFSET
;
2990 hdev
->vector_status
[i
] = vport
->vport_id
;
2991 hdev
->vector_irq
[i
] = vector
->vector
;
3000 hdev
->num_msi_left
-= alloc
;
3001 hdev
->num_msi_used
+= alloc
;
3006 static int hclge_get_vector_index(struct hclge_dev
*hdev
, int vector
)
3010 for (i
= 0; i
< hdev
->num_msi
; i
++)
3011 if (vector
== hdev
->vector_irq
[i
])
3017 static int hclge_put_vector(struct hnae3_handle
*handle
, int vector
)
3019 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3020 struct hclge_dev
*hdev
= vport
->back
;
3023 vector_id
= hclge_get_vector_index(hdev
, vector
);
3024 if (vector_id
< 0) {
3025 dev_err(&hdev
->pdev
->dev
,
3026 "Get vector index fail. vector_id =%d\n", vector_id
);
3030 hclge_free_vector(hdev
, vector_id
);
3035 static u32
hclge_get_rss_key_size(struct hnae3_handle
*handle
)
3037 return HCLGE_RSS_KEY_SIZE
;
3040 static u32
hclge_get_rss_indir_size(struct hnae3_handle
*handle
)
3042 return HCLGE_RSS_IND_TBL_SIZE
;
3045 static int hclge_set_rss_algo_key(struct hclge_dev
*hdev
,
3046 const u8 hfunc
, const u8
*key
)
3048 struct hclge_rss_config_cmd
*req
;
3049 struct hclge_desc desc
;
3054 req
= (struct hclge_rss_config_cmd
*)desc
.data
;
3056 for (key_offset
= 0; key_offset
< 3; key_offset
++) {
3057 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_GENERIC_CONFIG
,
3060 req
->hash_config
|= (hfunc
& HCLGE_RSS_HASH_ALGO_MASK
);
3061 req
->hash_config
|= (key_offset
<< HCLGE_RSS_HASH_KEY_OFFSET_B
);
3063 if (key_offset
== 2)
3065 HCLGE_RSS_KEY_SIZE
- HCLGE_RSS_HASH_KEY_NUM
* 2;
3067 key_size
= HCLGE_RSS_HASH_KEY_NUM
;
3069 memcpy(req
->hash_key
,
3070 key
+ key_offset
* HCLGE_RSS_HASH_KEY_NUM
, key_size
);
3072 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3074 dev_err(&hdev
->pdev
->dev
,
3075 "Configure RSS config fail, status = %d\n",
3083 static int hclge_set_rss_indir_table(struct hclge_dev
*hdev
, const u8
*indir
)
3085 struct hclge_rss_indirection_table_cmd
*req
;
3086 struct hclge_desc desc
;
3090 req
= (struct hclge_rss_indirection_table_cmd
*)desc
.data
;
3092 for (i
= 0; i
< HCLGE_RSS_CFG_TBL_NUM
; i
++) {
3093 hclge_cmd_setup_basic_desc
3094 (&desc
, HCLGE_OPC_RSS_INDIR_TABLE
, false);
3096 req
->start_table_index
=
3097 cpu_to_le16(i
* HCLGE_RSS_CFG_TBL_SIZE
);
3098 req
->rss_set_bitmap
= cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK
);
3100 for (j
= 0; j
< HCLGE_RSS_CFG_TBL_SIZE
; j
++)
3101 req
->rss_result
[j
] =
3102 indir
[i
* HCLGE_RSS_CFG_TBL_SIZE
+ j
];
3104 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3106 dev_err(&hdev
->pdev
->dev
,
3107 "Configure rss indir table fail,status = %d\n",
3115 static int hclge_set_rss_tc_mode(struct hclge_dev
*hdev
, u16
*tc_valid
,
3116 u16
*tc_size
, u16
*tc_offset
)
3118 struct hclge_rss_tc_mode_cmd
*req
;
3119 struct hclge_desc desc
;
3123 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_TC_MODE
, false);
3124 req
= (struct hclge_rss_tc_mode_cmd
*)desc
.data
;
3126 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
3129 hnae3_set_bit(mode
, HCLGE_RSS_TC_VALID_B
, (tc_valid
[i
] & 0x1));
3130 hnae3_set_field(mode
, HCLGE_RSS_TC_SIZE_M
,
3131 HCLGE_RSS_TC_SIZE_S
, tc_size
[i
]);
3132 hnae3_set_field(mode
, HCLGE_RSS_TC_OFFSET_M
,
3133 HCLGE_RSS_TC_OFFSET_S
, tc_offset
[i
]);
3135 req
->rss_tc_mode
[i
] = cpu_to_le16(mode
);
3138 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3140 dev_err(&hdev
->pdev
->dev
,
3141 "Configure rss tc mode fail, status = %d\n", ret
);
3146 static int hclge_set_rss_input_tuple(struct hclge_dev
*hdev
)
3148 struct hclge_rss_input_tuple_cmd
*req
;
3149 struct hclge_desc desc
;
3152 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_INPUT_TUPLE
, false);
3154 req
= (struct hclge_rss_input_tuple_cmd
*)desc
.data
;
3156 /* Get the tuple cfg from pf */
3157 req
->ipv4_tcp_en
= hdev
->vport
[0].rss_tuple_sets
.ipv4_tcp_en
;
3158 req
->ipv4_udp_en
= hdev
->vport
[0].rss_tuple_sets
.ipv4_udp_en
;
3159 req
->ipv4_sctp_en
= hdev
->vport
[0].rss_tuple_sets
.ipv4_sctp_en
;
3160 req
->ipv4_fragment_en
= hdev
->vport
[0].rss_tuple_sets
.ipv4_fragment_en
;
3161 req
->ipv6_tcp_en
= hdev
->vport
[0].rss_tuple_sets
.ipv6_tcp_en
;
3162 req
->ipv6_udp_en
= hdev
->vport
[0].rss_tuple_sets
.ipv6_udp_en
;
3163 req
->ipv6_sctp_en
= hdev
->vport
[0].rss_tuple_sets
.ipv6_sctp_en
;
3164 req
->ipv6_fragment_en
= hdev
->vport
[0].rss_tuple_sets
.ipv6_fragment_en
;
3165 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3167 dev_err(&hdev
->pdev
->dev
,
3168 "Configure rss input fail, status = %d\n", ret
);
3172 static int hclge_get_rss(struct hnae3_handle
*handle
, u32
*indir
,
3175 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3178 /* Get hash algorithm */
3180 *hfunc
= vport
->rss_algo
;
3182 /* Get the RSS Key required by the user */
3184 memcpy(key
, vport
->rss_hash_key
, HCLGE_RSS_KEY_SIZE
);
3186 /* Get indirect table */
3188 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
3189 indir
[i
] = vport
->rss_indirection_tbl
[i
];
3194 static int hclge_set_rss(struct hnae3_handle
*handle
, const u32
*indir
,
3195 const u8
*key
, const u8 hfunc
)
3197 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3198 struct hclge_dev
*hdev
= vport
->back
;
3202 /* Set the RSS Hash Key if specififed by the user */
3205 if (hfunc
== ETH_RSS_HASH_TOP
||
3206 hfunc
== ETH_RSS_HASH_NO_CHANGE
)
3207 hash_algo
= HCLGE_RSS_HASH_ALGO_TOEPLITZ
;
3210 ret
= hclge_set_rss_algo_key(hdev
, hash_algo
, key
);
3214 /* Update the shadow RSS key with user specified qids */
3215 memcpy(vport
->rss_hash_key
, key
, HCLGE_RSS_KEY_SIZE
);
3216 vport
->rss_algo
= hash_algo
;
3219 /* Update the shadow RSS table with user specified qids */
3220 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
3221 vport
->rss_indirection_tbl
[i
] = indir
[i
];
3223 /* Update the hardware */
3224 return hclge_set_rss_indir_table(hdev
, vport
->rss_indirection_tbl
);
3227 static u8
hclge_get_rss_hash_bits(struct ethtool_rxnfc
*nfc
)
3229 u8 hash_sets
= nfc
->data
& RXH_L4_B_0_1
? HCLGE_S_PORT_BIT
: 0;
3231 if (nfc
->data
& RXH_L4_B_2_3
)
3232 hash_sets
|= HCLGE_D_PORT_BIT
;
3234 hash_sets
&= ~HCLGE_D_PORT_BIT
;
3236 if (nfc
->data
& RXH_IP_SRC
)
3237 hash_sets
|= HCLGE_S_IP_BIT
;
3239 hash_sets
&= ~HCLGE_S_IP_BIT
;
3241 if (nfc
->data
& RXH_IP_DST
)
3242 hash_sets
|= HCLGE_D_IP_BIT
;
3244 hash_sets
&= ~HCLGE_D_IP_BIT
;
3246 if (nfc
->flow_type
== SCTP_V4_FLOW
|| nfc
->flow_type
== SCTP_V6_FLOW
)
3247 hash_sets
|= HCLGE_V_TAG_BIT
;
3252 static int hclge_set_rss_tuple(struct hnae3_handle
*handle
,
3253 struct ethtool_rxnfc
*nfc
)
3255 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3256 struct hclge_dev
*hdev
= vport
->back
;
3257 struct hclge_rss_input_tuple_cmd
*req
;
3258 struct hclge_desc desc
;
3262 if (nfc
->data
& ~(RXH_IP_SRC
| RXH_IP_DST
|
3263 RXH_L4_B_0_1
| RXH_L4_B_2_3
))
3266 req
= (struct hclge_rss_input_tuple_cmd
*)desc
.data
;
3267 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_INPUT_TUPLE
, false);
3269 req
->ipv4_tcp_en
= vport
->rss_tuple_sets
.ipv4_tcp_en
;
3270 req
->ipv4_udp_en
= vport
->rss_tuple_sets
.ipv4_udp_en
;
3271 req
->ipv4_sctp_en
= vport
->rss_tuple_sets
.ipv4_sctp_en
;
3272 req
->ipv4_fragment_en
= vport
->rss_tuple_sets
.ipv4_fragment_en
;
3273 req
->ipv6_tcp_en
= vport
->rss_tuple_sets
.ipv6_tcp_en
;
3274 req
->ipv6_udp_en
= vport
->rss_tuple_sets
.ipv6_udp_en
;
3275 req
->ipv6_sctp_en
= vport
->rss_tuple_sets
.ipv6_sctp_en
;
3276 req
->ipv6_fragment_en
= vport
->rss_tuple_sets
.ipv6_fragment_en
;
3278 tuple_sets
= hclge_get_rss_hash_bits(nfc
);
3279 switch (nfc
->flow_type
) {
3281 req
->ipv4_tcp_en
= tuple_sets
;
3284 req
->ipv6_tcp_en
= tuple_sets
;
3287 req
->ipv4_udp_en
= tuple_sets
;
3290 req
->ipv6_udp_en
= tuple_sets
;
3293 req
->ipv4_sctp_en
= tuple_sets
;
3296 if ((nfc
->data
& RXH_L4_B_0_1
) ||
3297 (nfc
->data
& RXH_L4_B_2_3
))
3300 req
->ipv6_sctp_en
= tuple_sets
;
3303 req
->ipv4_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3306 req
->ipv6_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3312 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3314 dev_err(&hdev
->pdev
->dev
,
3315 "Set rss tuple fail, status = %d\n", ret
);
3319 vport
->rss_tuple_sets
.ipv4_tcp_en
= req
->ipv4_tcp_en
;
3320 vport
->rss_tuple_sets
.ipv4_udp_en
= req
->ipv4_udp_en
;
3321 vport
->rss_tuple_sets
.ipv4_sctp_en
= req
->ipv4_sctp_en
;
3322 vport
->rss_tuple_sets
.ipv4_fragment_en
= req
->ipv4_fragment_en
;
3323 vport
->rss_tuple_sets
.ipv6_tcp_en
= req
->ipv6_tcp_en
;
3324 vport
->rss_tuple_sets
.ipv6_udp_en
= req
->ipv6_udp_en
;
3325 vport
->rss_tuple_sets
.ipv6_sctp_en
= req
->ipv6_sctp_en
;
3326 vport
->rss_tuple_sets
.ipv6_fragment_en
= req
->ipv6_fragment_en
;
3330 static int hclge_get_rss_tuple(struct hnae3_handle
*handle
,
3331 struct ethtool_rxnfc
*nfc
)
3333 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3338 switch (nfc
->flow_type
) {
3340 tuple_sets
= vport
->rss_tuple_sets
.ipv4_tcp_en
;
3343 tuple_sets
= vport
->rss_tuple_sets
.ipv4_udp_en
;
3346 tuple_sets
= vport
->rss_tuple_sets
.ipv6_tcp_en
;
3349 tuple_sets
= vport
->rss_tuple_sets
.ipv6_udp_en
;
3352 tuple_sets
= vport
->rss_tuple_sets
.ipv4_sctp_en
;
3355 tuple_sets
= vport
->rss_tuple_sets
.ipv6_sctp_en
;
3359 tuple_sets
= HCLGE_S_IP_BIT
| HCLGE_D_IP_BIT
;
3368 if (tuple_sets
& HCLGE_D_PORT_BIT
)
3369 nfc
->data
|= RXH_L4_B_2_3
;
3370 if (tuple_sets
& HCLGE_S_PORT_BIT
)
3371 nfc
->data
|= RXH_L4_B_0_1
;
3372 if (tuple_sets
& HCLGE_D_IP_BIT
)
3373 nfc
->data
|= RXH_IP_DST
;
3374 if (tuple_sets
& HCLGE_S_IP_BIT
)
3375 nfc
->data
|= RXH_IP_SRC
;
3380 static int hclge_get_tc_size(struct hnae3_handle
*handle
)
3382 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3383 struct hclge_dev
*hdev
= vport
->back
;
3385 return hdev
->rss_size_max
;
3388 int hclge_rss_init_hw(struct hclge_dev
*hdev
)
3390 struct hclge_vport
*vport
= hdev
->vport
;
3391 u8
*rss_indir
= vport
[0].rss_indirection_tbl
;
3392 u16 rss_size
= vport
[0].alloc_rss_size
;
3393 u8
*key
= vport
[0].rss_hash_key
;
3394 u8 hfunc
= vport
[0].rss_algo
;
3395 u16 tc_offset
[HCLGE_MAX_TC_NUM
];
3396 u16 tc_valid
[HCLGE_MAX_TC_NUM
];
3397 u16 tc_size
[HCLGE_MAX_TC_NUM
];
3401 ret
= hclge_set_rss_indir_table(hdev
, rss_indir
);
3405 ret
= hclge_set_rss_algo_key(hdev
, hfunc
, key
);
3409 ret
= hclge_set_rss_input_tuple(hdev
);
3413 /* Each TC have the same queue size, and tc_size set to hardware is
3414 * the log2 of roundup power of two of rss_size, the acutal queue
3415 * size is limited by indirection table.
3417 if (rss_size
> HCLGE_RSS_TC_SIZE_7
|| rss_size
== 0) {
3418 dev_err(&hdev
->pdev
->dev
,
3419 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3424 roundup_size
= roundup_pow_of_two(rss_size
);
3425 roundup_size
= ilog2(roundup_size
);
3427 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
3430 if (!(hdev
->hw_tc_map
& BIT(i
)))
3434 tc_size
[i
] = roundup_size
;
3435 tc_offset
[i
] = rss_size
* i
;
3438 return hclge_set_rss_tc_mode(hdev
, tc_valid
, tc_size
, tc_offset
);
3441 void hclge_rss_indir_init_cfg(struct hclge_dev
*hdev
)
3443 struct hclge_vport
*vport
= hdev
->vport
;
3446 for (j
= 0; j
< hdev
->num_vmdq_vport
+ 1; j
++) {
3447 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
3448 vport
[j
].rss_indirection_tbl
[i
] =
3449 i
% vport
[j
].alloc_rss_size
;
3453 static void hclge_rss_init_cfg(struct hclge_dev
*hdev
)
3455 struct hclge_vport
*vport
= hdev
->vport
;
3458 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
3459 vport
[i
].rss_tuple_sets
.ipv4_tcp_en
=
3460 HCLGE_RSS_INPUT_TUPLE_OTHER
;
3461 vport
[i
].rss_tuple_sets
.ipv4_udp_en
=
3462 HCLGE_RSS_INPUT_TUPLE_OTHER
;
3463 vport
[i
].rss_tuple_sets
.ipv4_sctp_en
=
3464 HCLGE_RSS_INPUT_TUPLE_SCTP
;
3465 vport
[i
].rss_tuple_sets
.ipv4_fragment_en
=
3466 HCLGE_RSS_INPUT_TUPLE_OTHER
;
3467 vport
[i
].rss_tuple_sets
.ipv6_tcp_en
=
3468 HCLGE_RSS_INPUT_TUPLE_OTHER
;
3469 vport
[i
].rss_tuple_sets
.ipv6_udp_en
=
3470 HCLGE_RSS_INPUT_TUPLE_OTHER
;
3471 vport
[i
].rss_tuple_sets
.ipv6_sctp_en
=
3472 HCLGE_RSS_INPUT_TUPLE_SCTP
;
3473 vport
[i
].rss_tuple_sets
.ipv6_fragment_en
=
3474 HCLGE_RSS_INPUT_TUPLE_OTHER
;
3476 vport
[i
].rss_algo
= HCLGE_RSS_HASH_ALGO_TOEPLITZ
;
3478 netdev_rss_key_fill(vport
[i
].rss_hash_key
, HCLGE_RSS_KEY_SIZE
);
3481 hclge_rss_indir_init_cfg(hdev
);
3484 int hclge_bind_ring_with_vector(struct hclge_vport
*vport
,
3485 int vector_id
, bool en
,
3486 struct hnae3_ring_chain_node
*ring_chain
)
3488 struct hclge_dev
*hdev
= vport
->back
;
3489 struct hnae3_ring_chain_node
*node
;
3490 struct hclge_desc desc
;
3491 struct hclge_ctrl_vector_chain_cmd
*req
3492 = (struct hclge_ctrl_vector_chain_cmd
*)desc
.data
;
3493 enum hclge_cmd_status status
;
3494 enum hclge_opcode_type op
;
3495 u16 tqp_type_and_id
;
3498 op
= en
? HCLGE_OPC_ADD_RING_TO_VECTOR
: HCLGE_OPC_DEL_RING_TO_VECTOR
;
3499 hclge_cmd_setup_basic_desc(&desc
, op
, false);
3500 req
->int_vector_id
= vector_id
;
3503 for (node
= ring_chain
; node
; node
= node
->next
) {
3504 tqp_type_and_id
= le16_to_cpu(req
->tqp_type_and_id
[i
]);
3505 hnae3_set_field(tqp_type_and_id
, HCLGE_INT_TYPE_M
,
3507 hnae3_get_bit(node
->flag
, HNAE3_RING_TYPE_B
));
3508 hnae3_set_field(tqp_type_and_id
, HCLGE_TQP_ID_M
,
3509 HCLGE_TQP_ID_S
, node
->tqp_index
);
3510 hnae3_set_field(tqp_type_and_id
, HCLGE_INT_GL_IDX_M
,
3512 hnae3_get_field(node
->int_gl_idx
,
3513 HNAE3_RING_GL_IDX_M
,
3514 HNAE3_RING_GL_IDX_S
));
3515 req
->tqp_type_and_id
[i
] = cpu_to_le16(tqp_type_and_id
);
3516 if (++i
>= HCLGE_VECTOR_ELEMENTS_PER_CMD
) {
3517 req
->int_cause_num
= HCLGE_VECTOR_ELEMENTS_PER_CMD
;
3518 req
->vfid
= vport
->vport_id
;
3520 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3522 dev_err(&hdev
->pdev
->dev
,
3523 "Map TQP fail, status is %d.\n",
3529 hclge_cmd_setup_basic_desc(&desc
,
3532 req
->int_vector_id
= vector_id
;
3537 req
->int_cause_num
= i
;
3538 req
->vfid
= vport
->vport_id
;
3539 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3541 dev_err(&hdev
->pdev
->dev
,
3542 "Map TQP fail, status is %d.\n", status
);
3550 static int hclge_map_ring_to_vector(struct hnae3_handle
*handle
,
3552 struct hnae3_ring_chain_node
*ring_chain
)
3554 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3555 struct hclge_dev
*hdev
= vport
->back
;
3558 vector_id
= hclge_get_vector_index(hdev
, vector
);
3559 if (vector_id
< 0) {
3560 dev_err(&hdev
->pdev
->dev
,
3561 "Get vector index fail. vector_id =%d\n", vector_id
);
3565 return hclge_bind_ring_with_vector(vport
, vector_id
, true, ring_chain
);
3568 static int hclge_unmap_ring_frm_vector(struct hnae3_handle
*handle
,
3570 struct hnae3_ring_chain_node
*ring_chain
)
3572 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3573 struct hclge_dev
*hdev
= vport
->back
;
3576 if (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
3579 vector_id
= hclge_get_vector_index(hdev
, vector
);
3580 if (vector_id
< 0) {
3581 dev_err(&handle
->pdev
->dev
,
3582 "Get vector index fail. ret =%d\n", vector_id
);
3586 ret
= hclge_bind_ring_with_vector(vport
, vector_id
, false, ring_chain
);
3588 dev_err(&handle
->pdev
->dev
,
3589 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3596 int hclge_cmd_set_promisc_mode(struct hclge_dev
*hdev
,
3597 struct hclge_promisc_param
*param
)
3599 struct hclge_promisc_cfg_cmd
*req
;
3600 struct hclge_desc desc
;
3603 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_PROMISC_MODE
, false);
3605 req
= (struct hclge_promisc_cfg_cmd
*)desc
.data
;
3606 req
->vf_id
= param
->vf_id
;
3608 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3609 * pdev revision(0x20), new revision support them. The
3610 * value of this two fields will not return error when driver
3611 * send command to fireware in revision(0x20).
3613 req
->flag
= (param
->enable
<< HCLGE_PROMISC_EN_B
) |
3614 HCLGE_PROMISC_TX_EN_B
| HCLGE_PROMISC_RX_EN_B
;
3616 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3618 dev_err(&hdev
->pdev
->dev
,
3619 "Set promisc mode fail, status is %d.\n", ret
);
3624 void hclge_promisc_param_init(struct hclge_promisc_param
*param
, bool en_uc
,
3625 bool en_mc
, bool en_bc
, int vport_id
)
3630 memset(param
, 0, sizeof(struct hclge_promisc_param
));
3632 param
->enable
= HCLGE_PROMISC_EN_UC
;
3634 param
->enable
|= HCLGE_PROMISC_EN_MC
;
3636 param
->enable
|= HCLGE_PROMISC_EN_BC
;
3637 param
->vf_id
= vport_id
;
3640 static void hclge_set_promisc_mode(struct hnae3_handle
*handle
, bool en_uc_pmc
,
3643 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3644 struct hclge_dev
*hdev
= vport
->back
;
3645 struct hclge_promisc_param param
;
3647 hclge_promisc_param_init(¶m
, en_uc_pmc
, en_mc_pmc
, true,
3649 hclge_cmd_set_promisc_mode(hdev
, ¶m
);
3652 static void hclge_cfg_mac_mode(struct hclge_dev
*hdev
, bool enable
)
3654 struct hclge_desc desc
;
3655 struct hclge_config_mac_mode_cmd
*req
=
3656 (struct hclge_config_mac_mode_cmd
*)desc
.data
;
3660 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAC_MODE
, false);
3661 hnae3_set_bit(loop_en
, HCLGE_MAC_TX_EN_B
, enable
);
3662 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_EN_B
, enable
);
3663 hnae3_set_bit(loop_en
, HCLGE_MAC_PAD_TX_B
, enable
);
3664 hnae3_set_bit(loop_en
, HCLGE_MAC_PAD_RX_B
, enable
);
3665 hnae3_set_bit(loop_en
, HCLGE_MAC_1588_TX_B
, 0);
3666 hnae3_set_bit(loop_en
, HCLGE_MAC_1588_RX_B
, 0);
3667 hnae3_set_bit(loop_en
, HCLGE_MAC_APP_LP_B
, 0);
3668 hnae3_set_bit(loop_en
, HCLGE_MAC_LINE_LP_B
, 0);
3669 hnae3_set_bit(loop_en
, HCLGE_MAC_FCS_TX_B
, enable
);
3670 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_FCS_B
, enable
);
3671 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_FCS_STRIP_B
, enable
);
3672 hnae3_set_bit(loop_en
, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B
, enable
);
3673 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B
, enable
);
3674 hnae3_set_bit(loop_en
, HCLGE_MAC_TX_UNDER_MIN_ERR_B
, enable
);
3675 req
->txrx_pad_fcs_loop_en
= cpu_to_le32(loop_en
);
3677 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3679 dev_err(&hdev
->pdev
->dev
,
3680 "mac enable fail, ret =%d.\n", ret
);
3683 static int hclge_set_mac_loopback(struct hclge_dev
*hdev
, bool en
)
3685 struct hclge_config_mac_mode_cmd
*req
;
3686 struct hclge_desc desc
;
3690 req
= (struct hclge_config_mac_mode_cmd
*)&desc
.data
[0];
3691 /* 1 Read out the MAC mode config at first */
3692 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAC_MODE
, true);
3693 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3695 dev_err(&hdev
->pdev
->dev
,
3696 "mac loopback get fail, ret =%d.\n", ret
);
3700 /* 2 Then setup the loopback flag */
3701 loop_en
= le32_to_cpu(req
->txrx_pad_fcs_loop_en
);
3702 hnae3_set_bit(loop_en
, HCLGE_MAC_APP_LP_B
, en
? 1 : 0);
3704 req
->txrx_pad_fcs_loop_en
= cpu_to_le32(loop_en
);
3706 /* 3 Config mac work mode with loopback flag
3707 * and its original configure parameters
3709 hclge_cmd_reuse_desc(&desc
, false);
3710 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3712 dev_err(&hdev
->pdev
->dev
,
3713 "mac loopback set fail, ret =%d.\n", ret
);
3717 static int hclge_set_serdes_loopback(struct hclge_dev
*hdev
, bool en
)
3719 #define HCLGE_SERDES_RETRY_MS 10
3720 #define HCLGE_SERDES_RETRY_NUM 100
3721 struct hclge_serdes_lb_cmd
*req
;
3722 struct hclge_desc desc
;
3725 req
= (struct hclge_serdes_lb_cmd
*)&desc
.data
[0];
3726 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_SERDES_LOOPBACK
, false);
3729 req
->enable
= HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B
;
3730 req
->mask
= HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B
;
3732 req
->mask
= HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B
;
3735 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3737 dev_err(&hdev
->pdev
->dev
,
3738 "serdes loopback set fail, ret = %d\n", ret
);
3743 msleep(HCLGE_SERDES_RETRY_MS
);
3744 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_SERDES_LOOPBACK
,
3746 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3748 dev_err(&hdev
->pdev
->dev
,
3749 "serdes loopback get, ret = %d\n", ret
);
3752 } while (++i
< HCLGE_SERDES_RETRY_NUM
&&
3753 !(req
->result
& HCLGE_CMD_SERDES_DONE_B
));
3755 if (!(req
->result
& HCLGE_CMD_SERDES_DONE_B
)) {
3756 dev_err(&hdev
->pdev
->dev
, "serdes loopback set timeout\n");
3758 } else if (!(req
->result
& HCLGE_CMD_SERDES_SUCCESS_B
)) {
3759 dev_err(&hdev
->pdev
->dev
, "serdes loopback set failed in fw\n");
3766 static int hclge_set_loopback(struct hnae3_handle
*handle
,
3767 enum hnae3_loop loop_mode
, bool en
)
3769 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3770 struct hclge_dev
*hdev
= vport
->back
;
3773 switch (loop_mode
) {
3774 case HNAE3_MAC_INTER_LOOP_MAC
:
3775 ret
= hclge_set_mac_loopback(hdev
, en
);
3777 case HNAE3_MAC_INTER_LOOP_SERDES
:
3778 ret
= hclge_set_serdes_loopback(hdev
, en
);
3782 dev_err(&hdev
->pdev
->dev
,
3783 "loop_mode %d is not supported\n", loop_mode
);
3790 static int hclge_tqp_enable(struct hclge_dev
*hdev
, int tqp_id
,
3791 int stream_id
, bool enable
)
3793 struct hclge_desc desc
;
3794 struct hclge_cfg_com_tqp_queue_cmd
*req
=
3795 (struct hclge_cfg_com_tqp_queue_cmd
*)desc
.data
;
3798 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_COM_TQP_QUEUE
, false);
3799 req
->tqp_id
= cpu_to_le16(tqp_id
& HCLGE_RING_ID_MASK
);
3800 req
->stream_id
= cpu_to_le16(stream_id
);
3801 req
->enable
|= enable
<< HCLGE_TQP_ENABLE_B
;
3803 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3805 dev_err(&hdev
->pdev
->dev
,
3806 "Tqp enable fail, status =%d.\n", ret
);
3810 static void hclge_reset_tqp_stats(struct hnae3_handle
*handle
)
3812 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3813 struct hnae3_queue
*queue
;
3814 struct hclge_tqp
*tqp
;
3817 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
3818 queue
= handle
->kinfo
.tqp
[i
];
3819 tqp
= container_of(queue
, struct hclge_tqp
, q
);
3820 memset(&tqp
->tqp_stats
, 0, sizeof(tqp
->tqp_stats
));
3824 static int hclge_ae_start(struct hnae3_handle
*handle
)
3826 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3827 struct hclge_dev
*hdev
= vport
->back
;
3830 for (i
= 0; i
< vport
->alloc_tqps
; i
++)
3831 hclge_tqp_enable(hdev
, i
, 0, true);
3834 hclge_cfg_mac_mode(hdev
, true);
3835 clear_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
3836 mod_timer(&hdev
->service_timer
, jiffies
+ HZ
);
3837 hdev
->hw
.mac
.link
= 0;
3839 /* reset tqp stats */
3840 hclge_reset_tqp_stats(handle
);
3842 ret
= hclge_mac_start_phy(hdev
);
3849 static void hclge_ae_stop(struct hnae3_handle
*handle
)
3851 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3852 struct hclge_dev
*hdev
= vport
->back
;
3855 del_timer_sync(&hdev
->service_timer
);
3856 cancel_work_sync(&hdev
->service_task
);
3857 clear_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
);
3859 if (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
)) {
3860 hclge_mac_stop_phy(hdev
);
3864 for (i
= 0; i
< vport
->alloc_tqps
; i
++)
3865 hclge_tqp_enable(hdev
, i
, 0, false);
3868 hclge_cfg_mac_mode(hdev
, false);
3870 hclge_mac_stop_phy(hdev
);
3872 /* reset tqp stats */
3873 hclge_reset_tqp_stats(handle
);
3874 del_timer_sync(&hdev
->service_timer
);
3875 cancel_work_sync(&hdev
->service_task
);
3876 hclge_update_link_status(hdev
);
3879 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport
*vport
,
3880 u16 cmdq_resp
, u8 resp_code
,
3881 enum hclge_mac_vlan_tbl_opcode op
)
3883 struct hclge_dev
*hdev
= vport
->back
;
3884 int return_status
= -EIO
;
3887 dev_err(&hdev
->pdev
->dev
,
3888 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
3893 if (op
== HCLGE_MAC_VLAN_ADD
) {
3894 if ((!resp_code
) || (resp_code
== 1)) {
3896 } else if (resp_code
== 2) {
3897 return_status
= -ENOSPC
;
3898 dev_err(&hdev
->pdev
->dev
,
3899 "add mac addr failed for uc_overflow.\n");
3900 } else if (resp_code
== 3) {
3901 return_status
= -ENOSPC
;
3902 dev_err(&hdev
->pdev
->dev
,
3903 "add mac addr failed for mc_overflow.\n");
3905 dev_err(&hdev
->pdev
->dev
,
3906 "add mac addr failed for undefined, code=%d.\n",
3909 } else if (op
== HCLGE_MAC_VLAN_REMOVE
) {
3912 } else if (resp_code
== 1) {
3913 return_status
= -ENOENT
;
3914 dev_dbg(&hdev
->pdev
->dev
,
3915 "remove mac addr failed for miss.\n");
3917 dev_err(&hdev
->pdev
->dev
,
3918 "remove mac addr failed for undefined, code=%d.\n",
3921 } else if (op
== HCLGE_MAC_VLAN_LKUP
) {
3924 } else if (resp_code
== 1) {
3925 return_status
= -ENOENT
;
3926 dev_dbg(&hdev
->pdev
->dev
,
3927 "lookup mac addr failed for miss.\n");
3929 dev_err(&hdev
->pdev
->dev
,
3930 "lookup mac addr failed for undefined, code=%d.\n",
3934 return_status
= -EINVAL
;
3935 dev_err(&hdev
->pdev
->dev
,
3936 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
3940 return return_status
;
3943 static int hclge_update_desc_vfid(struct hclge_desc
*desc
, int vfid
, bool clr
)
3948 if (vfid
> 255 || vfid
< 0)
3951 if (vfid
>= 0 && vfid
<= 191) {
3952 word_num
= vfid
/ 32;
3953 bit_num
= vfid
% 32;
3955 desc
[1].data
[word_num
] &= cpu_to_le32(~(1 << bit_num
));
3957 desc
[1].data
[word_num
] |= cpu_to_le32(1 << bit_num
);
3959 word_num
= (vfid
- 192) / 32;
3960 bit_num
= vfid
% 32;
3962 desc
[2].data
[word_num
] &= cpu_to_le32(~(1 << bit_num
));
3964 desc
[2].data
[word_num
] |= cpu_to_le32(1 << bit_num
);
3970 static bool hclge_is_all_function_id_zero(struct hclge_desc
*desc
)
3972 #define HCLGE_DESC_NUMBER 3
3973 #define HCLGE_FUNC_NUMBER_PER_DESC 6
3976 for (i
= 0; i
< HCLGE_DESC_NUMBER
; i
++)
3977 for (j
= 0; j
< HCLGE_FUNC_NUMBER_PER_DESC
; j
++)
3978 if (desc
[i
].data
[j
])
3984 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd
*new_req
,
3987 const unsigned char *mac_addr
= addr
;
3988 u32 high_val
= mac_addr
[2] << 16 | (mac_addr
[3] << 24) |
3989 (mac_addr
[0]) | (mac_addr
[1] << 8);
3990 u32 low_val
= mac_addr
[4] | (mac_addr
[5] << 8);
3992 new_req
->mac_addr_hi32
= cpu_to_le32(high_val
);
3993 new_req
->mac_addr_lo16
= cpu_to_le16(low_val
& 0xffff);
3996 static u16
hclge_get_mac_addr_to_mta_index(struct hclge_vport
*vport
,
3999 u16 high_val
= addr
[1] | (addr
[0] << 8);
4000 struct hclge_dev
*hdev
= vport
->back
;
4001 u32 rsh
= 4 - hdev
->mta_mac_sel_type
;
4002 u16 ret_val
= (high_val
>> rsh
) & 0xfff;
4007 static int hclge_set_mta_filter_mode(struct hclge_dev
*hdev
,
4008 enum hclge_mta_dmac_sel_type mta_mac_sel
,
4011 struct hclge_mta_filter_mode_cmd
*req
;
4012 struct hclge_desc desc
;
4015 req
= (struct hclge_mta_filter_mode_cmd
*)desc
.data
;
4016 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MTA_MAC_MODE_CFG
, false);
4018 hnae3_set_bit(req
->dmac_sel_en
, HCLGE_CFG_MTA_MAC_EN_B
,
4020 hnae3_set_field(req
->dmac_sel_en
, HCLGE_CFG_MTA_MAC_SEL_M
,
4021 HCLGE_CFG_MTA_MAC_SEL_S
, mta_mac_sel
);
4023 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4025 dev_err(&hdev
->pdev
->dev
,
4026 "Config mat filter mode failed for cmd_send, ret =%d.\n",
4032 int hclge_cfg_func_mta_filter(struct hclge_dev
*hdev
,
4036 struct hclge_cfg_func_mta_filter_cmd
*req
;
4037 struct hclge_desc desc
;
4040 req
= (struct hclge_cfg_func_mta_filter_cmd
*)desc
.data
;
4041 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MTA_MAC_FUNC_CFG
, false);
4043 hnae3_set_bit(req
->accept
, HCLGE_CFG_FUNC_MTA_ACCEPT_B
,
4045 req
->function_id
= func_id
;
4047 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4049 dev_err(&hdev
->pdev
->dev
,
4050 "Config func_id enable failed for cmd_send, ret =%d.\n",
4056 static int hclge_set_mta_table_item(struct hclge_vport
*vport
,
4060 struct hclge_dev
*hdev
= vport
->back
;
4061 struct hclge_cfg_func_mta_item_cmd
*req
;
4062 struct hclge_desc desc
;
4066 req
= (struct hclge_cfg_func_mta_item_cmd
*)desc
.data
;
4067 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MTA_TBL_ITEM_CFG
, false);
4068 hnae3_set_bit(req
->accept
, HCLGE_CFG_MTA_ITEM_ACCEPT_B
, enable
);
4070 hnae3_set_field(item_idx
, HCLGE_CFG_MTA_ITEM_IDX_M
,
4071 HCLGE_CFG_MTA_ITEM_IDX_S
, idx
);
4072 req
->item_idx
= cpu_to_le16(item_idx
);
4074 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4076 dev_err(&hdev
->pdev
->dev
,
4077 "Config mta table item failed for cmd_send, ret =%d.\n",
4083 set_bit(idx
, vport
->mta_shadow
);
4085 clear_bit(idx
, vport
->mta_shadow
);
4090 static int hclge_update_mta_status(struct hnae3_handle
*handle
)
4092 unsigned long mta_status
[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE
)];
4093 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4094 struct net_device
*netdev
= handle
->kinfo
.netdev
;
4095 struct netdev_hw_addr
*ha
;
4098 memset(mta_status
, 0, sizeof(mta_status
));
4100 /* update mta_status from mc addr list */
4101 netdev_for_each_mc_addr(ha
, netdev
) {
4102 tbl_idx
= hclge_get_mac_addr_to_mta_index(vport
, ha
->addr
);
4103 set_bit(tbl_idx
, mta_status
);
4106 return hclge_update_mta_status_common(vport
, mta_status
,
4107 0, HCLGE_MTA_TBL_SIZE
, true);
4110 int hclge_update_mta_status_common(struct hclge_vport
*vport
,
4111 unsigned long *status
,
4116 struct hclge_dev
*hdev
= vport
->back
;
4117 u16 update_max
= idx
+ count
;
4123 /* setup mta check range */
4124 if (update_filter
) {
4126 check_max
= HCLGE_MTA_TBL_SIZE
;
4129 check_max
= update_max
;
4133 /* check and update all mta item */
4134 for (; i
< check_max
; i
++) {
4135 /* ignore unused item */
4136 if (!test_bit(i
, vport
->mta_shadow
))
4139 /* if i in update range then update it */
4140 if (i
>= idx
&& i
< update_max
)
4141 if (!test_bit(i
- idx
, status
))
4142 hclge_set_mta_table_item(vport
, i
, false);
4144 if (!used
&& test_bit(i
, vport
->mta_shadow
))
4148 /* no longer use mta, disable it */
4149 if (vport
->accept_mta_mc
&& update_filter
&& !used
) {
4150 ret
= hclge_cfg_func_mta_filter(hdev
,
4154 dev_err(&hdev
->pdev
->dev
,
4155 "disable func mta filter fail ret=%d\n",
4158 vport
->accept_mta_mc
= false;
4164 static int hclge_remove_mac_vlan_tbl(struct hclge_vport
*vport
,
4165 struct hclge_mac_vlan_tbl_entry_cmd
*req
)
4167 struct hclge_dev
*hdev
= vport
->back
;
4168 struct hclge_desc desc
;
4173 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_REMOVE
, false);
4175 memcpy(desc
.data
, req
, sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
4177 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4179 dev_err(&hdev
->pdev
->dev
,
4180 "del mac addr failed for cmd_send, ret =%d.\n",
4184 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
4185 retval
= le16_to_cpu(desc
.retval
);
4187 return hclge_get_mac_vlan_cmd_status(vport
, retval
, resp_code
,
4188 HCLGE_MAC_VLAN_REMOVE
);
4191 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport
*vport
,
4192 struct hclge_mac_vlan_tbl_entry_cmd
*req
,
4193 struct hclge_desc
*desc
,
4196 struct hclge_dev
*hdev
= vport
->back
;
4201 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_MAC_VLAN_ADD
, true);
4203 desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
4204 memcpy(desc
[0].data
,
4206 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
4207 hclge_cmd_setup_basic_desc(&desc
[1],
4208 HCLGE_OPC_MAC_VLAN_ADD
,
4210 desc
[1].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
4211 hclge_cmd_setup_basic_desc(&desc
[2],
4212 HCLGE_OPC_MAC_VLAN_ADD
,
4214 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 3);
4216 memcpy(desc
[0].data
,
4218 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
4219 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
4222 dev_err(&hdev
->pdev
->dev
,
4223 "lookup mac addr failed for cmd_send, ret =%d.\n",
4227 resp_code
= (le32_to_cpu(desc
[0].data
[0]) >> 8) & 0xff;
4228 retval
= le16_to_cpu(desc
[0].retval
);
4230 return hclge_get_mac_vlan_cmd_status(vport
, retval
, resp_code
,
4231 HCLGE_MAC_VLAN_LKUP
);
4234 static int hclge_add_mac_vlan_tbl(struct hclge_vport
*vport
,
4235 struct hclge_mac_vlan_tbl_entry_cmd
*req
,
4236 struct hclge_desc
*mc_desc
)
4238 struct hclge_dev
*hdev
= vport
->back
;
4245 struct hclge_desc desc
;
4247 hclge_cmd_setup_basic_desc(&desc
,
4248 HCLGE_OPC_MAC_VLAN_ADD
,
4250 memcpy(desc
.data
, req
,
4251 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
4252 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4253 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
4254 retval
= le16_to_cpu(desc
.retval
);
4256 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, retval
,
4258 HCLGE_MAC_VLAN_ADD
);
4260 hclge_cmd_reuse_desc(&mc_desc
[0], false);
4261 mc_desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
4262 hclge_cmd_reuse_desc(&mc_desc
[1], false);
4263 mc_desc
[1].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
4264 hclge_cmd_reuse_desc(&mc_desc
[2], false);
4265 mc_desc
[2].flag
&= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT
);
4266 memcpy(mc_desc
[0].data
, req
,
4267 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
4268 ret
= hclge_cmd_send(&hdev
->hw
, mc_desc
, 3);
4269 resp_code
= (le32_to_cpu(mc_desc
[0].data
[0]) >> 8) & 0xff;
4270 retval
= le16_to_cpu(mc_desc
[0].retval
);
4272 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, retval
,
4274 HCLGE_MAC_VLAN_ADD
);
4278 dev_err(&hdev
->pdev
->dev
,
4279 "add mac addr failed for cmd_send, ret =%d.\n",
4287 static int hclge_add_uc_addr(struct hnae3_handle
*handle
,
4288 const unsigned char *addr
)
4290 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4292 return hclge_add_uc_addr_common(vport
, addr
);
4295 int hclge_add_uc_addr_common(struct hclge_vport
*vport
,
4296 const unsigned char *addr
)
4298 struct hclge_dev
*hdev
= vport
->back
;
4299 struct hclge_mac_vlan_tbl_entry_cmd req
;
4300 struct hclge_desc desc
;
4301 u16 egress_port
= 0;
4304 /* mac addr check */
4305 if (is_zero_ether_addr(addr
) ||
4306 is_broadcast_ether_addr(addr
) ||
4307 is_multicast_ether_addr(addr
)) {
4308 dev_err(&hdev
->pdev
->dev
,
4309 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
4311 is_zero_ether_addr(addr
),
4312 is_broadcast_ether_addr(addr
),
4313 is_multicast_ether_addr(addr
));
4317 memset(&req
, 0, sizeof(req
));
4318 hnae3_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
4320 hnae3_set_field(egress_port
, HCLGE_MAC_EPORT_VFID_M
,
4321 HCLGE_MAC_EPORT_VFID_S
, vport
->vport_id
);
4323 req
.egress_port
= cpu_to_le16(egress_port
);
4325 hclge_prepare_mac_addr(&req
, addr
);
4327 /* Lookup the mac address in the mac_vlan table, and add
4328 * it if the entry is inexistent. Repeated unicast entry
4329 * is not allowed in the mac vlan table.
4331 ret
= hclge_lookup_mac_vlan_tbl(vport
, &req
, &desc
, false);
4333 return hclge_add_mac_vlan_tbl(vport
, &req
, NULL
);
4335 /* check if we just hit the duplicate */
4339 dev_err(&hdev
->pdev
->dev
,
4340 "PF failed to add unicast entry(%pM) in the MAC table\n",
4346 static int hclge_rm_uc_addr(struct hnae3_handle
*handle
,
4347 const unsigned char *addr
)
4349 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4351 return hclge_rm_uc_addr_common(vport
, addr
);
4354 int hclge_rm_uc_addr_common(struct hclge_vport
*vport
,
4355 const unsigned char *addr
)
4357 struct hclge_dev
*hdev
= vport
->back
;
4358 struct hclge_mac_vlan_tbl_entry_cmd req
;
4361 /* mac addr check */
4362 if (is_zero_ether_addr(addr
) ||
4363 is_broadcast_ether_addr(addr
) ||
4364 is_multicast_ether_addr(addr
)) {
4365 dev_dbg(&hdev
->pdev
->dev
,
4366 "Remove mac err! invalid mac:%pM.\n",
4371 memset(&req
, 0, sizeof(req
));
4372 hnae3_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
4373 hnae3_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4374 hclge_prepare_mac_addr(&req
, addr
);
4375 ret
= hclge_remove_mac_vlan_tbl(vport
, &req
);
4380 static int hclge_add_mc_addr(struct hnae3_handle
*handle
,
4381 const unsigned char *addr
)
4383 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4385 return hclge_add_mc_addr_common(vport
, addr
);
4388 int hclge_add_mc_addr_common(struct hclge_vport
*vport
,
4389 const unsigned char *addr
)
4391 struct hclge_dev
*hdev
= vport
->back
;
4392 struct hclge_mac_vlan_tbl_entry_cmd req
;
4393 struct hclge_desc desc
[3];
4397 /* mac addr check */
4398 if (!is_multicast_ether_addr(addr
)) {
4399 dev_err(&hdev
->pdev
->dev
,
4400 "Add mc mac err! invalid mac:%pM.\n",
4404 memset(&req
, 0, sizeof(req
));
4405 hnae3_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
4406 hnae3_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4407 hnae3_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 1);
4408 hnae3_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4409 hclge_prepare_mac_addr(&req
, addr
);
4410 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
4412 /* This mac addr exist, update VFID for it */
4413 hclge_update_desc_vfid(desc
, vport
->vport_id
, false);
4414 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
4416 /* This mac addr do not exist, add new entry for it */
4417 memset(desc
[0].data
, 0, sizeof(desc
[0].data
));
4418 memset(desc
[1].data
, 0, sizeof(desc
[0].data
));
4419 memset(desc
[2].data
, 0, sizeof(desc
[0].data
));
4420 hclge_update_desc_vfid(desc
, vport
->vport_id
, false);
4421 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
4424 /* If mc mac vlan table is full, use MTA table */
4425 if (status
== -ENOSPC
) {
4426 if (!vport
->accept_mta_mc
) {
4427 status
= hclge_cfg_func_mta_filter(hdev
,
4431 dev_err(&hdev
->pdev
->dev
,
4432 "set mta filter mode fail ret=%d\n",
4436 vport
->accept_mta_mc
= true;
4439 /* Set MTA table for this MAC address */
4440 tbl_idx
= hclge_get_mac_addr_to_mta_index(vport
, addr
);
4441 status
= hclge_set_mta_table_item(vport
, tbl_idx
, true);
4447 static int hclge_rm_mc_addr(struct hnae3_handle
*handle
,
4448 const unsigned char *addr
)
4450 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4452 return hclge_rm_mc_addr_common(vport
, addr
);
4455 int hclge_rm_mc_addr_common(struct hclge_vport
*vport
,
4456 const unsigned char *addr
)
4458 struct hclge_dev
*hdev
= vport
->back
;
4459 struct hclge_mac_vlan_tbl_entry_cmd req
;
4460 enum hclge_cmd_status status
;
4461 struct hclge_desc desc
[3];
4463 /* mac addr check */
4464 if (!is_multicast_ether_addr(addr
)) {
4465 dev_dbg(&hdev
->pdev
->dev
,
4466 "Remove mc mac err! invalid mac:%pM.\n",
4471 memset(&req
, 0, sizeof(req
));
4472 hnae3_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
4473 hnae3_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4474 hnae3_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 1);
4475 hnae3_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4476 hclge_prepare_mac_addr(&req
, addr
);
4477 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
4479 /* This mac addr exist, remove this handle's VFID for it */
4480 hclge_update_desc_vfid(desc
, vport
->vport_id
, true);
4482 if (hclge_is_all_function_id_zero(desc
))
4483 /* All the vfid is zero, so need to delete this entry */
4484 status
= hclge_remove_mac_vlan_tbl(vport
, &req
);
4486 /* Not all the vfid is zero, update the vfid */
4487 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
4490 /* Maybe this mac address is in mta table, but it cannot be
4491 * deleted here because an entry of mta represents an address
4492 * range rather than a specific address. the delete action to
4493 * all entries will take effect in update_mta_status called by
4494 * hns3_nic_set_rx_mode.
4502 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev
*hdev
,
4503 u16 cmdq_resp
, u8 resp_code
)
4505 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
4506 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
4507 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
4508 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
4513 dev_err(&hdev
->pdev
->dev
,
4514 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
4519 switch (resp_code
) {
4520 case HCLGE_ETHERTYPE_SUCCESS_ADD
:
4521 case HCLGE_ETHERTYPE_ALREADY_ADD
:
4524 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW
:
4525 dev_err(&hdev
->pdev
->dev
,
4526 "add mac ethertype failed for manager table overflow.\n");
4527 return_status
= -EIO
;
4529 case HCLGE_ETHERTYPE_KEY_CONFLICT
:
4530 dev_err(&hdev
->pdev
->dev
,
4531 "add mac ethertype failed for key conflict.\n");
4532 return_status
= -EIO
;
4535 dev_err(&hdev
->pdev
->dev
,
4536 "add mac ethertype failed for undefined, code=%d.\n",
4538 return_status
= -EIO
;
4541 return return_status
;
4544 static int hclge_add_mgr_tbl(struct hclge_dev
*hdev
,
4545 const struct hclge_mac_mgr_tbl_entry_cmd
*req
)
4547 struct hclge_desc desc
;
4552 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_ETHTYPE_ADD
, false);
4553 memcpy(desc
.data
, req
, sizeof(struct hclge_mac_mgr_tbl_entry_cmd
));
4555 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4557 dev_err(&hdev
->pdev
->dev
,
4558 "add mac ethertype failed for cmd_send, ret =%d.\n",
4563 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
4564 retval
= le16_to_cpu(desc
.retval
);
4566 return hclge_get_mac_ethertype_cmd_status(hdev
, retval
, resp_code
);
4569 static int init_mgr_tbl(struct hclge_dev
*hdev
)
4574 for (i
= 0; i
< ARRAY_SIZE(hclge_mgr_table
); i
++) {
4575 ret
= hclge_add_mgr_tbl(hdev
, &hclge_mgr_table
[i
]);
4577 dev_err(&hdev
->pdev
->dev
,
4578 "add mac ethertype failed, ret =%d.\n",
4587 static void hclge_get_mac_addr(struct hnae3_handle
*handle
, u8
*p
)
4589 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4590 struct hclge_dev
*hdev
= vport
->back
;
4592 ether_addr_copy(p
, hdev
->hw
.mac
.mac_addr
);
4595 static int hclge_set_mac_addr(struct hnae3_handle
*handle
, void *p
,
4598 const unsigned char *new_addr
= (const unsigned char *)p
;
4599 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4600 struct hclge_dev
*hdev
= vport
->back
;
4603 /* mac addr check */
4604 if (is_zero_ether_addr(new_addr
) ||
4605 is_broadcast_ether_addr(new_addr
) ||
4606 is_multicast_ether_addr(new_addr
)) {
4607 dev_err(&hdev
->pdev
->dev
,
4608 "Change uc mac err! invalid mac:%p.\n",
4613 if (!is_first
&& hclge_rm_uc_addr(handle
, hdev
->hw
.mac
.mac_addr
))
4614 dev_warn(&hdev
->pdev
->dev
,
4615 "remove old uc mac address fail.\n");
4617 ret
= hclge_add_uc_addr(handle
, new_addr
);
4619 dev_err(&hdev
->pdev
->dev
,
4620 "add uc mac address fail, ret =%d.\n",
4624 hclge_add_uc_addr(handle
, hdev
->hw
.mac
.mac_addr
))
4625 dev_err(&hdev
->pdev
->dev
,
4626 "restore uc mac address fail.\n");
4631 ret
= hclge_pause_addr_cfg(hdev
, new_addr
);
4633 dev_err(&hdev
->pdev
->dev
,
4634 "configure mac pause address fail, ret =%d.\n",
4639 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, new_addr
);
4644 static int hclge_set_vlan_filter_ctrl(struct hclge_dev
*hdev
, u8 vlan_type
,
4647 struct hclge_vlan_filter_ctrl_cmd
*req
;
4648 struct hclge_desc desc
;
4651 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_CTRL
, false);
4653 req
= (struct hclge_vlan_filter_ctrl_cmd
*)desc
.data
;
4654 req
->vlan_type
= vlan_type
;
4655 req
->vlan_fe
= filter_en
;
4657 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4659 dev_err(&hdev
->pdev
->dev
, "set vlan filter fail, ret =%d.\n",
4665 #define HCLGE_FILTER_TYPE_VF 0
4666 #define HCLGE_FILTER_TYPE_PORT 1
4668 static void hclge_enable_vlan_filter(struct hnae3_handle
*handle
, bool enable
)
4670 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4671 struct hclge_dev
*hdev
= vport
->back
;
4673 hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
, enable
);
4676 static int hclge_set_vf_vlan_common(struct hclge_dev
*hdev
, int vfid
,
4677 bool is_kill
, u16 vlan
, u8 qos
,
4680 #define HCLGE_MAX_VF_BYTES 16
4681 struct hclge_vlan_filter_vf_cfg_cmd
*req0
;
4682 struct hclge_vlan_filter_vf_cfg_cmd
*req1
;
4683 struct hclge_desc desc
[2];
4688 hclge_cmd_setup_basic_desc(&desc
[0],
4689 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
4690 hclge_cmd_setup_basic_desc(&desc
[1],
4691 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
4693 desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
4695 vf_byte_off
= vfid
/ 8;
4696 vf_byte_val
= 1 << (vfid
% 8);
4698 req0
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[0].data
;
4699 req1
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[1].data
;
4701 req0
->vlan_id
= cpu_to_le16(vlan
);
4702 req0
->vlan_cfg
= is_kill
;
4704 if (vf_byte_off
< HCLGE_MAX_VF_BYTES
)
4705 req0
->vf_bitmap
[vf_byte_off
] = vf_byte_val
;
4707 req1
->vf_bitmap
[vf_byte_off
- HCLGE_MAX_VF_BYTES
] = vf_byte_val
;
4709 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
4711 dev_err(&hdev
->pdev
->dev
,
4712 "Send vf vlan command fail, ret =%d.\n",
4718 #define HCLGE_VF_VLAN_NO_ENTRY 2
4719 if (!req0
->resp_code
|| req0
->resp_code
== 1)
4722 if (req0
->resp_code
== HCLGE_VF_VLAN_NO_ENTRY
) {
4723 dev_warn(&hdev
->pdev
->dev
,
4724 "vf vlan table is full, vf vlan filter is disabled\n");
4728 dev_err(&hdev
->pdev
->dev
,
4729 "Add vf vlan filter fail, ret =%d.\n",
4732 if (!req0
->resp_code
)
4735 dev_err(&hdev
->pdev
->dev
,
4736 "Kill vf vlan filter fail, ret =%d.\n",
4743 static int hclge_set_port_vlan_filter(struct hclge_dev
*hdev
, __be16 proto
,
4744 u16 vlan_id
, bool is_kill
)
4746 struct hclge_vlan_filter_pf_cfg_cmd
*req
;
4747 struct hclge_desc desc
;
4748 u8 vlan_offset_byte_val
;
4749 u8 vlan_offset_byte
;
4753 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_PF_CFG
, false);
4755 vlan_offset_160
= vlan_id
/ 160;
4756 vlan_offset_byte
= (vlan_id
% 160) / 8;
4757 vlan_offset_byte_val
= 1 << (vlan_id
% 8);
4759 req
= (struct hclge_vlan_filter_pf_cfg_cmd
*)desc
.data
;
4760 req
->vlan_offset
= vlan_offset_160
;
4761 req
->vlan_cfg
= is_kill
;
4762 req
->vlan_offset_bitmap
[vlan_offset_byte
] = vlan_offset_byte_val
;
4764 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4766 dev_err(&hdev
->pdev
->dev
,
4767 "port vlan command, send fail, ret =%d.\n", ret
);
4771 static int hclge_set_vlan_filter_hw(struct hclge_dev
*hdev
, __be16 proto
,
4772 u16 vport_id
, u16 vlan_id
, u8 qos
,
4775 u16 vport_idx
, vport_num
= 0;
4778 ret
= hclge_set_vf_vlan_common(hdev
, vport_id
, is_kill
, vlan_id
,
4781 dev_err(&hdev
->pdev
->dev
,
4782 "Set %d vport vlan filter config fail, ret =%d.\n",
4787 /* vlan 0 may be added twice when 8021q module is enabled */
4788 if (!is_kill
&& !vlan_id
&&
4789 test_bit(vport_id
, hdev
->vlan_table
[vlan_id
]))
4792 if (!is_kill
&& test_and_set_bit(vport_id
, hdev
->vlan_table
[vlan_id
])) {
4793 dev_err(&hdev
->pdev
->dev
,
4794 "Add port vlan failed, vport %d is already in vlan %d\n",
4800 !test_and_clear_bit(vport_id
, hdev
->vlan_table
[vlan_id
])) {
4801 dev_err(&hdev
->pdev
->dev
,
4802 "Delete port vlan failed, vport %d is not in vlan %d\n",
4807 for_each_set_bit(vport_idx
, hdev
->vlan_table
[vlan_id
], VLAN_N_VID
)
4810 if ((is_kill
&& vport_num
== 0) || (!is_kill
&& vport_num
== 1))
4811 ret
= hclge_set_port_vlan_filter(hdev
, proto
, vlan_id
,
4817 int hclge_set_vlan_filter(struct hnae3_handle
*handle
, __be16 proto
,
4818 u16 vlan_id
, bool is_kill
)
4820 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4821 struct hclge_dev
*hdev
= vport
->back
;
4823 return hclge_set_vlan_filter_hw(hdev
, proto
, vport
->vport_id
, vlan_id
,
4827 static int hclge_set_vf_vlan_filter(struct hnae3_handle
*handle
, int vfid
,
4828 u16 vlan
, u8 qos
, __be16 proto
)
4830 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4831 struct hclge_dev
*hdev
= vport
->back
;
4833 if ((vfid
>= hdev
->num_alloc_vfs
) || (vlan
> 4095) || (qos
> 7))
4835 if (proto
!= htons(ETH_P_8021Q
))
4836 return -EPROTONOSUPPORT
;
4838 return hclge_set_vlan_filter_hw(hdev
, proto
, vfid
, vlan
, qos
, false);
4841 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport
*vport
)
4843 struct hclge_tx_vtag_cfg
*vcfg
= &vport
->txvlan_cfg
;
4844 struct hclge_vport_vtag_tx_cfg_cmd
*req
;
4845 struct hclge_dev
*hdev
= vport
->back
;
4846 struct hclge_desc desc
;
4849 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_PORT_TX_CFG
, false);
4851 req
= (struct hclge_vport_vtag_tx_cfg_cmd
*)desc
.data
;
4852 req
->def_vlan_tag1
= cpu_to_le16(vcfg
->default_tag1
);
4853 req
->def_vlan_tag2
= cpu_to_le16(vcfg
->default_tag2
);
4854 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_TAG1_B
,
4855 vcfg
->accept_tag1
? 1 : 0);
4856 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_UNTAG1_B
,
4857 vcfg
->accept_untag1
? 1 : 0);
4858 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_TAG2_B
,
4859 vcfg
->accept_tag2
? 1 : 0);
4860 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_UNTAG2_B
,
4861 vcfg
->accept_untag2
? 1 : 0);
4862 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_PORT_INS_TAG1_EN_B
,
4863 vcfg
->insert_tag1_en
? 1 : 0);
4864 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_PORT_INS_TAG2_EN_B
,
4865 vcfg
->insert_tag2_en
? 1 : 0);
4866 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_CFG_NIC_ROCE_SEL_B
, 0);
4868 req
->vf_offset
= vport
->vport_id
/ HCLGE_VF_NUM_PER_CMD
;
4869 req
->vf_bitmap
[req
->vf_offset
] =
4870 1 << (vport
->vport_id
% HCLGE_VF_NUM_PER_BYTE
);
4872 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4874 dev_err(&hdev
->pdev
->dev
,
4875 "Send port txvlan cfg command fail, ret =%d\n",
4881 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport
*vport
)
4883 struct hclge_rx_vtag_cfg
*vcfg
= &vport
->rxvlan_cfg
;
4884 struct hclge_vport_vtag_rx_cfg_cmd
*req
;
4885 struct hclge_dev
*hdev
= vport
->back
;
4886 struct hclge_desc desc
;
4889 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_PORT_RX_CFG
, false);
4891 req
= (struct hclge_vport_vtag_rx_cfg_cmd
*)desc
.data
;
4892 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_REM_TAG1_EN_B
,
4893 vcfg
->strip_tag1_en
? 1 : 0);
4894 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_REM_TAG2_EN_B
,
4895 vcfg
->strip_tag2_en
? 1 : 0);
4896 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_SHOW_TAG1_EN_B
,
4897 vcfg
->vlan1_vlan_prionly
? 1 : 0);
4898 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_SHOW_TAG2_EN_B
,
4899 vcfg
->vlan2_vlan_prionly
? 1 : 0);
4901 req
->vf_offset
= vport
->vport_id
/ HCLGE_VF_NUM_PER_CMD
;
4902 req
->vf_bitmap
[req
->vf_offset
] =
4903 1 << (vport
->vport_id
% HCLGE_VF_NUM_PER_BYTE
);
4905 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4907 dev_err(&hdev
->pdev
->dev
,
4908 "Send port rxvlan cfg command fail, ret =%d\n",
4914 static int hclge_set_vlan_protocol_type(struct hclge_dev
*hdev
)
4916 struct hclge_rx_vlan_type_cfg_cmd
*rx_req
;
4917 struct hclge_tx_vlan_type_cfg_cmd
*tx_req
;
4918 struct hclge_desc desc
;
4921 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_TYPE_ID
, false);
4922 rx_req
= (struct hclge_rx_vlan_type_cfg_cmd
*)desc
.data
;
4923 rx_req
->ot_fst_vlan_type
=
4924 cpu_to_le16(hdev
->vlan_type_cfg
.rx_ot_fst_vlan_type
);
4925 rx_req
->ot_sec_vlan_type
=
4926 cpu_to_le16(hdev
->vlan_type_cfg
.rx_ot_sec_vlan_type
);
4927 rx_req
->in_fst_vlan_type
=
4928 cpu_to_le16(hdev
->vlan_type_cfg
.rx_in_fst_vlan_type
);
4929 rx_req
->in_sec_vlan_type
=
4930 cpu_to_le16(hdev
->vlan_type_cfg
.rx_in_sec_vlan_type
);
4932 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4934 dev_err(&hdev
->pdev
->dev
,
4935 "Send rxvlan protocol type command fail, ret =%d\n",
4940 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_INSERT
, false);
4942 tx_req
= (struct hclge_tx_vlan_type_cfg_cmd
*)&desc
.data
;
4943 tx_req
->ot_vlan_type
= cpu_to_le16(hdev
->vlan_type_cfg
.tx_ot_vlan_type
);
4944 tx_req
->in_vlan_type
= cpu_to_le16(hdev
->vlan_type_cfg
.tx_in_vlan_type
);
4946 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4948 dev_err(&hdev
->pdev
->dev
,
4949 "Send txvlan protocol type command fail, ret =%d\n",
4955 static int hclge_init_vlan_config(struct hclge_dev
*hdev
)
4957 #define HCLGE_DEF_VLAN_TYPE 0x8100
4959 struct hnae3_handle
*handle
;
4960 struct hclge_vport
*vport
;
4964 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
, true);
4968 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_PORT
, true);
4972 hdev
->vlan_type_cfg
.rx_in_fst_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
4973 hdev
->vlan_type_cfg
.rx_in_sec_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
4974 hdev
->vlan_type_cfg
.rx_ot_fst_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
4975 hdev
->vlan_type_cfg
.rx_ot_sec_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
4976 hdev
->vlan_type_cfg
.tx_ot_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
4977 hdev
->vlan_type_cfg
.tx_in_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
4979 ret
= hclge_set_vlan_protocol_type(hdev
);
4983 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
4984 vport
= &hdev
->vport
[i
];
4985 vport
->txvlan_cfg
.accept_tag1
= true;
4986 vport
->txvlan_cfg
.accept_untag1
= true;
4988 /* accept_tag2 and accept_untag2 are not supported on
4989 * pdev revision(0x20), new revision support them. The
4990 * value of this two fields will not return error when driver
4991 * send command to fireware in revision(0x20).
4992 * This two fields can not configured by user.
4994 vport
->txvlan_cfg
.accept_tag2
= true;
4995 vport
->txvlan_cfg
.accept_untag2
= true;
4997 vport
->txvlan_cfg
.insert_tag1_en
= false;
4998 vport
->txvlan_cfg
.insert_tag2_en
= false;
4999 vport
->txvlan_cfg
.default_tag1
= 0;
5000 vport
->txvlan_cfg
.default_tag2
= 0;
5002 ret
= hclge_set_vlan_tx_offload_cfg(vport
);
5006 vport
->rxvlan_cfg
.strip_tag1_en
= false;
5007 vport
->rxvlan_cfg
.strip_tag2_en
= true;
5008 vport
->rxvlan_cfg
.vlan1_vlan_prionly
= false;
5009 vport
->rxvlan_cfg
.vlan2_vlan_prionly
= false;
5011 ret
= hclge_set_vlan_rx_offload_cfg(vport
);
5016 handle
= &hdev
->vport
[0].nic
;
5017 return hclge_set_vlan_filter(handle
, htons(ETH_P_8021Q
), 0, false);
5020 int hclge_en_hw_strip_rxvtag(struct hnae3_handle
*handle
, bool enable
)
5022 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5024 vport
->rxvlan_cfg
.strip_tag1_en
= false;
5025 vport
->rxvlan_cfg
.strip_tag2_en
= enable
;
5026 vport
->rxvlan_cfg
.vlan1_vlan_prionly
= false;
5027 vport
->rxvlan_cfg
.vlan2_vlan_prionly
= false;
5029 return hclge_set_vlan_rx_offload_cfg(vport
);
5032 static int hclge_set_mac_mtu(struct hclge_dev
*hdev
, int new_mtu
)
5034 struct hclge_config_max_frm_size_cmd
*req
;
5035 struct hclge_desc desc
;
5039 max_frm_size
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
5041 if (max_frm_size
< HCLGE_MAC_MIN_FRAME
||
5042 max_frm_size
> HCLGE_MAC_MAX_FRAME
)
5045 max_frm_size
= max(max_frm_size
, HCLGE_MAC_DEFAULT_FRAME
);
5047 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAX_FRM_SIZE
, false);
5049 req
= (struct hclge_config_max_frm_size_cmd
*)desc
.data
;
5050 req
->max_frm_size
= cpu_to_le16(max_frm_size
);
5051 req
->min_frm_size
= HCLGE_MAC_MIN_FRAME
;
5053 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5055 dev_err(&hdev
->pdev
->dev
, "set mtu fail, ret =%d.\n", ret
);
5057 hdev
->mps
= max_frm_size
;
5062 static int hclge_set_mtu(struct hnae3_handle
*handle
, int new_mtu
)
5064 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5065 struct hclge_dev
*hdev
= vport
->back
;
5068 ret
= hclge_set_mac_mtu(hdev
, new_mtu
);
5070 dev_err(&hdev
->pdev
->dev
,
5071 "Change mtu fail, ret =%d\n", ret
);
5075 ret
= hclge_buffer_alloc(hdev
);
5077 dev_err(&hdev
->pdev
->dev
,
5078 "Allocate buffer fail, ret =%d\n", ret
);
5083 static int hclge_send_reset_tqp_cmd(struct hclge_dev
*hdev
, u16 queue_id
,
5086 struct hclge_reset_tqp_queue_cmd
*req
;
5087 struct hclge_desc desc
;
5090 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, false);
5092 req
= (struct hclge_reset_tqp_queue_cmd
*)desc
.data
;
5093 req
->tqp_id
= cpu_to_le16(queue_id
& HCLGE_RING_ID_MASK
);
5094 hnae3_set_bit(req
->reset_req
, HCLGE_TQP_RESET_B
, enable
);
5096 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5098 dev_err(&hdev
->pdev
->dev
,
5099 "Send tqp reset cmd error, status =%d\n", ret
);
5106 static int hclge_get_reset_status(struct hclge_dev
*hdev
, u16 queue_id
)
5108 struct hclge_reset_tqp_queue_cmd
*req
;
5109 struct hclge_desc desc
;
5112 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, true);
5114 req
= (struct hclge_reset_tqp_queue_cmd
*)desc
.data
;
5115 req
->tqp_id
= cpu_to_le16(queue_id
& HCLGE_RING_ID_MASK
);
5117 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5119 dev_err(&hdev
->pdev
->dev
,
5120 "Get reset status error, status =%d\n", ret
);
5124 return hnae3_get_bit(req
->ready_to_reset
, HCLGE_TQP_RESET_B
);
5127 static u16
hclge_covert_handle_qid_global(struct hnae3_handle
*handle
,
5130 struct hnae3_queue
*queue
;
5131 struct hclge_tqp
*tqp
;
5133 queue
= handle
->kinfo
.tqp
[queue_id
];
5134 tqp
= container_of(queue
, struct hclge_tqp
, q
);
5139 void hclge_reset_tqp(struct hnae3_handle
*handle
, u16 queue_id
)
5141 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5142 struct hclge_dev
*hdev
= vport
->back
;
5143 int reset_try_times
= 0;
5148 if (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
5151 queue_gid
= hclge_covert_handle_qid_global(handle
, queue_id
);
5153 ret
= hclge_tqp_enable(hdev
, queue_id
, 0, false);
5155 dev_warn(&hdev
->pdev
->dev
, "Disable tqp fail, ret = %d\n", ret
);
5159 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_gid
, true);
5161 dev_warn(&hdev
->pdev
->dev
,
5162 "Send reset tqp cmd fail, ret = %d\n", ret
);
5166 reset_try_times
= 0;
5167 while (reset_try_times
++ < HCLGE_TQP_RESET_TRY_TIMES
) {
5168 /* Wait for tqp hw reset */
5170 reset_status
= hclge_get_reset_status(hdev
, queue_gid
);
5175 if (reset_try_times
>= HCLGE_TQP_RESET_TRY_TIMES
) {
5176 dev_warn(&hdev
->pdev
->dev
, "Reset TQP fail\n");
5180 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_gid
, false);
5182 dev_warn(&hdev
->pdev
->dev
,
5183 "Deassert the soft reset fail, ret = %d\n", ret
);
5188 void hclge_reset_vf_queue(struct hclge_vport
*vport
, u16 queue_id
)
5190 struct hclge_dev
*hdev
= vport
->back
;
5191 int reset_try_times
= 0;
5196 queue_gid
= hclge_covert_handle_qid_global(&vport
->nic
, queue_id
);
5198 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_gid
, true);
5200 dev_warn(&hdev
->pdev
->dev
,
5201 "Send reset tqp cmd fail, ret = %d\n", ret
);
5205 reset_try_times
= 0;
5206 while (reset_try_times
++ < HCLGE_TQP_RESET_TRY_TIMES
) {
5207 /* Wait for tqp hw reset */
5209 reset_status
= hclge_get_reset_status(hdev
, queue_gid
);
5214 if (reset_try_times
>= HCLGE_TQP_RESET_TRY_TIMES
) {
5215 dev_warn(&hdev
->pdev
->dev
, "Reset TQP fail\n");
5219 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_gid
, false);
5221 dev_warn(&hdev
->pdev
->dev
,
5222 "Deassert the soft reset fail, ret = %d\n", ret
);
5225 static u32
hclge_get_fw_version(struct hnae3_handle
*handle
)
5227 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5228 struct hclge_dev
*hdev
= vport
->back
;
5230 return hdev
->fw_version
;
5233 static void hclge_get_flowctrl_adv(struct hnae3_handle
*handle
,
5236 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5237 struct hclge_dev
*hdev
= vport
->back
;
5238 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
5243 *flowctrl_adv
|= (phydev
->advertising
& ADVERTISED_Pause
) |
5244 (phydev
->advertising
& ADVERTISED_Asym_Pause
);
5247 static void hclge_set_flowctrl_adv(struct hclge_dev
*hdev
, u32 rx_en
, u32 tx_en
)
5249 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
5254 phydev
->advertising
&= ~(ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
5257 phydev
->advertising
|= ADVERTISED_Pause
| ADVERTISED_Asym_Pause
;
5260 phydev
->advertising
^= ADVERTISED_Asym_Pause
;
5263 static int hclge_cfg_pauseparam(struct hclge_dev
*hdev
, u32 rx_en
, u32 tx_en
)
5268 hdev
->fc_mode_last_time
= HCLGE_FC_FULL
;
5269 else if (rx_en
&& !tx_en
)
5270 hdev
->fc_mode_last_time
= HCLGE_FC_RX_PAUSE
;
5271 else if (!rx_en
&& tx_en
)
5272 hdev
->fc_mode_last_time
= HCLGE_FC_TX_PAUSE
;
5274 hdev
->fc_mode_last_time
= HCLGE_FC_NONE
;
5276 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
)
5279 ret
= hclge_mac_pause_en_cfg(hdev
, tx_en
, rx_en
);
5281 dev_err(&hdev
->pdev
->dev
, "configure pauseparam error, ret = %d.\n",
5286 hdev
->tm_info
.fc_mode
= hdev
->fc_mode_last_time
;
5291 int hclge_cfg_flowctrl(struct hclge_dev
*hdev
)
5293 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
5294 u16 remote_advertising
= 0;
5295 u16 local_advertising
= 0;
5296 u32 rx_pause
, tx_pause
;
5299 if (!phydev
->link
|| !phydev
->autoneg
)
5302 if (phydev
->advertising
& ADVERTISED_Pause
)
5303 local_advertising
= ADVERTISE_PAUSE_CAP
;
5305 if (phydev
->advertising
& ADVERTISED_Asym_Pause
)
5306 local_advertising
|= ADVERTISE_PAUSE_ASYM
;
5309 remote_advertising
= LPA_PAUSE_CAP
;
5311 if (phydev
->asym_pause
)
5312 remote_advertising
|= LPA_PAUSE_ASYM
;
5314 flowctl
= mii_resolve_flowctrl_fdx(local_advertising
,
5315 remote_advertising
);
5316 tx_pause
= flowctl
& FLOW_CTRL_TX
;
5317 rx_pause
= flowctl
& FLOW_CTRL_RX
;
5319 if (phydev
->duplex
== HCLGE_MAC_HALF
) {
5324 return hclge_cfg_pauseparam(hdev
, rx_pause
, tx_pause
);
5327 static void hclge_get_pauseparam(struct hnae3_handle
*handle
, u32
*auto_neg
,
5328 u32
*rx_en
, u32
*tx_en
)
5330 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5331 struct hclge_dev
*hdev
= vport
->back
;
5333 *auto_neg
= hclge_get_autoneg(handle
);
5335 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
) {
5341 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_RX_PAUSE
) {
5344 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_TX_PAUSE
) {
5347 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_FULL
) {
5356 static int hclge_set_pauseparam(struct hnae3_handle
*handle
, u32 auto_neg
,
5357 u32 rx_en
, u32 tx_en
)
5359 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5360 struct hclge_dev
*hdev
= vport
->back
;
5361 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
5364 fc_autoneg
= hclge_get_autoneg(handle
);
5365 if (auto_neg
!= fc_autoneg
) {
5366 dev_info(&hdev
->pdev
->dev
,
5367 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
5371 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
) {
5372 dev_info(&hdev
->pdev
->dev
,
5373 "Priority flow control enabled. Cannot set link flow control.\n");
5377 hclge_set_flowctrl_adv(hdev
, rx_en
, tx_en
);
5380 return hclge_cfg_pauseparam(hdev
, rx_en
, tx_en
);
5382 /* Only support flow control negotiation for netdev with
5383 * phy attached for now.
5388 return phy_start_aneg(phydev
);
5391 static void hclge_get_ksettings_an_result(struct hnae3_handle
*handle
,
5392 u8
*auto_neg
, u32
*speed
, u8
*duplex
)
5394 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5395 struct hclge_dev
*hdev
= vport
->back
;
5398 *speed
= hdev
->hw
.mac
.speed
;
5400 *duplex
= hdev
->hw
.mac
.duplex
;
5402 *auto_neg
= hdev
->hw
.mac
.autoneg
;
5405 static void hclge_get_media_type(struct hnae3_handle
*handle
, u8
*media_type
)
5407 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5408 struct hclge_dev
*hdev
= vport
->back
;
5411 *media_type
= hdev
->hw
.mac
.media_type
;
5414 static void hclge_get_mdix_mode(struct hnae3_handle
*handle
,
5415 u8
*tp_mdix_ctrl
, u8
*tp_mdix
)
5417 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5418 struct hclge_dev
*hdev
= vport
->back
;
5419 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
5420 int mdix_ctrl
, mdix
, retval
, is_resolved
;
5423 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
5424 *tp_mdix
= ETH_TP_MDI_INVALID
;
5428 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_MDIX
);
5430 retval
= phy_read(phydev
, HCLGE_PHY_CSC_REG
);
5431 mdix_ctrl
= hnae3_get_field(retval
, HCLGE_PHY_MDIX_CTRL_M
,
5432 HCLGE_PHY_MDIX_CTRL_S
);
5434 retval
= phy_read(phydev
, HCLGE_PHY_CSS_REG
);
5435 mdix
= hnae3_get_bit(retval
, HCLGE_PHY_MDIX_STATUS_B
);
5436 is_resolved
= hnae3_get_bit(retval
, HCLGE_PHY_SPEED_DUP_RESOLVE_B
);
5438 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_COPPER
);
5440 switch (mdix_ctrl
) {
5442 *tp_mdix_ctrl
= ETH_TP_MDI
;
5445 *tp_mdix_ctrl
= ETH_TP_MDI_X
;
5448 *tp_mdix_ctrl
= ETH_TP_MDI_AUTO
;
5451 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
5456 *tp_mdix
= ETH_TP_MDI_INVALID
;
5458 *tp_mdix
= ETH_TP_MDI_X
;
5460 *tp_mdix
= ETH_TP_MDI
;
5463 static int hclge_init_client_instance(struct hnae3_client
*client
,
5464 struct hnae3_ae_dev
*ae_dev
)
5466 struct hclge_dev
*hdev
= ae_dev
->priv
;
5467 struct hclge_vport
*vport
;
5470 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
5471 vport
= &hdev
->vport
[i
];
5473 switch (client
->type
) {
5474 case HNAE3_CLIENT_KNIC
:
5476 hdev
->nic_client
= client
;
5477 vport
->nic
.client
= client
;
5478 ret
= client
->ops
->init_instance(&vport
->nic
);
5482 if (hdev
->roce_client
&&
5483 hnae3_dev_roce_supported(hdev
)) {
5484 struct hnae3_client
*rc
= hdev
->roce_client
;
5486 ret
= hclge_init_roce_base_info(vport
);
5490 ret
= rc
->ops
->init_instance(&vport
->roce
);
5496 case HNAE3_CLIENT_UNIC
:
5497 hdev
->nic_client
= client
;
5498 vport
->nic
.client
= client
;
5500 ret
= client
->ops
->init_instance(&vport
->nic
);
5505 case HNAE3_CLIENT_ROCE
:
5506 if (hnae3_dev_roce_supported(hdev
)) {
5507 hdev
->roce_client
= client
;
5508 vport
->roce
.client
= client
;
5511 if (hdev
->roce_client
&& hdev
->nic_client
) {
5512 ret
= hclge_init_roce_base_info(vport
);
5516 ret
= client
->ops
->init_instance(&vport
->roce
);
5526 static void hclge_uninit_client_instance(struct hnae3_client
*client
,
5527 struct hnae3_ae_dev
*ae_dev
)
5529 struct hclge_dev
*hdev
= ae_dev
->priv
;
5530 struct hclge_vport
*vport
;
5533 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
5534 vport
= &hdev
->vport
[i
];
5535 if (hdev
->roce_client
) {
5536 hdev
->roce_client
->ops
->uninit_instance(&vport
->roce
,
5538 hdev
->roce_client
= NULL
;
5539 vport
->roce
.client
= NULL
;
5541 if (client
->type
== HNAE3_CLIENT_ROCE
)
5543 if (client
->ops
->uninit_instance
) {
5544 client
->ops
->uninit_instance(&vport
->nic
, 0);
5545 hdev
->nic_client
= NULL
;
5546 vport
->nic
.client
= NULL
;
5551 static int hclge_pci_init(struct hclge_dev
*hdev
)
5553 struct pci_dev
*pdev
= hdev
->pdev
;
5554 struct hclge_hw
*hw
;
5557 ret
= pci_enable_device(pdev
);
5559 dev_err(&pdev
->dev
, "failed to enable PCI device\n");
5563 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
5565 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
5568 "can't set consistent PCI DMA");
5569 goto err_disable_device
;
5571 dev_warn(&pdev
->dev
, "set DMA mask to 32 bits\n");
5574 ret
= pci_request_regions(pdev
, HCLGE_DRIVER_NAME
);
5576 dev_err(&pdev
->dev
, "PCI request regions failed %d\n", ret
);
5577 goto err_disable_device
;
5580 pci_set_master(pdev
);
5582 hw
->io_base
= pcim_iomap(pdev
, 2, 0);
5584 dev_err(&pdev
->dev
, "Can't map configuration register space\n");
5586 goto err_clr_master
;
5589 hdev
->num_req_vfs
= pci_sriov_get_totalvfs(pdev
);
5593 pci_clear_master(pdev
);
5594 pci_release_regions(pdev
);
5596 pci_disable_device(pdev
);
5601 static void hclge_pci_uninit(struct hclge_dev
*hdev
)
5603 struct pci_dev
*pdev
= hdev
->pdev
;
5605 pcim_iounmap(pdev
, hdev
->hw
.io_base
);
5606 pci_free_irq_vectors(pdev
);
5607 pci_clear_master(pdev
);
5608 pci_release_mem_regions(pdev
);
5609 pci_disable_device(pdev
);
5612 static void hclge_state_init(struct hclge_dev
*hdev
)
5614 set_bit(HCLGE_STATE_SERVICE_INITED
, &hdev
->state
);
5615 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
5616 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
);
5617 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
5618 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
5619 clear_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
);
5622 static void hclge_state_uninit(struct hclge_dev
*hdev
)
5624 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
5626 if (hdev
->service_timer
.function
)
5627 del_timer_sync(&hdev
->service_timer
);
5628 if (hdev
->service_task
.func
)
5629 cancel_work_sync(&hdev
->service_task
);
5630 if (hdev
->rst_service_task
.func
)
5631 cancel_work_sync(&hdev
->rst_service_task
);
5632 if (hdev
->mbx_service_task
.func
)
5633 cancel_work_sync(&hdev
->mbx_service_task
);
5636 static int hclge_init_ae_dev(struct hnae3_ae_dev
*ae_dev
)
5638 struct pci_dev
*pdev
= ae_dev
->pdev
;
5639 struct hclge_dev
*hdev
;
5642 hdev
= devm_kzalloc(&pdev
->dev
, sizeof(*hdev
), GFP_KERNEL
);
5649 hdev
->ae_dev
= ae_dev
;
5650 hdev
->reset_type
= HNAE3_NONE_RESET
;
5651 ae_dev
->priv
= hdev
;
5653 ret
= hclge_pci_init(hdev
);
5655 dev_err(&pdev
->dev
, "PCI init failed\n");
5659 /* Firmware command queue initialize */
5660 ret
= hclge_cmd_queue_init(hdev
);
5662 dev_err(&pdev
->dev
, "Cmd queue init failed, ret = %d.\n", ret
);
5663 goto err_pci_uninit
;
5666 /* Firmware command initialize */
5667 ret
= hclge_cmd_init(hdev
);
5669 goto err_cmd_uninit
;
5671 ret
= hclge_get_cap(hdev
);
5673 dev_err(&pdev
->dev
, "get hw capability error, ret = %d.\n",
5675 goto err_cmd_uninit
;
5678 ret
= hclge_configure(hdev
);
5680 dev_err(&pdev
->dev
, "Configure dev error, ret = %d.\n", ret
);
5681 goto err_cmd_uninit
;
5684 ret
= hclge_init_msi(hdev
);
5686 dev_err(&pdev
->dev
, "Init MSI/MSI-X error, ret = %d.\n", ret
);
5687 goto err_cmd_uninit
;
5690 ret
= hclge_misc_irq_init(hdev
);
5693 "Misc IRQ(vector0) init error, ret = %d.\n",
5695 goto err_msi_uninit
;
5698 ret
= hclge_alloc_tqps(hdev
);
5700 dev_err(&pdev
->dev
, "Allocate TQPs error, ret = %d.\n", ret
);
5701 goto err_msi_irq_uninit
;
5704 ret
= hclge_alloc_vport(hdev
);
5706 dev_err(&pdev
->dev
, "Allocate vport error, ret = %d.\n", ret
);
5707 goto err_msi_irq_uninit
;
5710 ret
= hclge_map_tqp(hdev
);
5712 dev_err(&pdev
->dev
, "Map tqp error, ret = %d.\n", ret
);
5713 goto err_msi_irq_uninit
;
5716 if (hdev
->hw
.mac
.media_type
== HNAE3_MEDIA_TYPE_COPPER
) {
5717 ret
= hclge_mac_mdio_config(hdev
);
5719 dev_err(&hdev
->pdev
->dev
,
5720 "mdio config fail ret=%d\n", ret
);
5721 goto err_msi_irq_uninit
;
5725 ret
= hclge_mac_init(hdev
);
5727 dev_err(&pdev
->dev
, "Mac init error, ret = %d\n", ret
);
5728 goto err_mdiobus_unreg
;
5731 ret
= hclge_config_tso(hdev
, HCLGE_TSO_MSS_MIN
, HCLGE_TSO_MSS_MAX
);
5733 dev_err(&pdev
->dev
, "Enable tso fail, ret =%d\n", ret
);
5734 goto err_mdiobus_unreg
;
5737 ret
= hclge_init_vlan_config(hdev
);
5739 dev_err(&pdev
->dev
, "VLAN init fail, ret =%d\n", ret
);
5740 goto err_mdiobus_unreg
;
5743 ret
= hclge_tm_schd_init(hdev
);
5745 dev_err(&pdev
->dev
, "tm schd init fail, ret =%d\n", ret
);
5746 goto err_mdiobus_unreg
;
5749 hclge_rss_init_cfg(hdev
);
5750 ret
= hclge_rss_init_hw(hdev
);
5752 dev_err(&pdev
->dev
, "Rss init fail, ret =%d\n", ret
);
5753 goto err_mdiobus_unreg
;
5756 ret
= init_mgr_tbl(hdev
);
5758 dev_err(&pdev
->dev
, "manager table init fail, ret =%d\n", ret
);
5759 goto err_mdiobus_unreg
;
5762 hclge_dcb_ops_set(hdev
);
5764 timer_setup(&hdev
->service_timer
, hclge_service_timer
, 0);
5765 INIT_WORK(&hdev
->service_task
, hclge_service_task
);
5766 INIT_WORK(&hdev
->rst_service_task
, hclge_reset_service_task
);
5767 INIT_WORK(&hdev
->mbx_service_task
, hclge_mailbox_service_task
);
5769 hclge_clear_all_event_cause(hdev
);
5771 /* Enable MISC vector(vector0) */
5772 hclge_enable_vector(&hdev
->misc_vector
, true);
5774 hclge_state_init(hdev
);
5776 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME
);
5780 if (hdev
->hw
.mac
.phydev
)
5781 mdiobus_unregister(hdev
->hw
.mac
.mdio_bus
);
5783 hclge_misc_irq_uninit(hdev
);
5785 pci_free_irq_vectors(pdev
);
5787 hclge_destroy_cmd_queue(&hdev
->hw
);
5789 pcim_iounmap(pdev
, hdev
->hw
.io_base
);
5790 pci_clear_master(pdev
);
5791 pci_release_regions(pdev
);
5792 pci_disable_device(pdev
);
5797 static void hclge_stats_clear(struct hclge_dev
*hdev
)
5799 memset(&hdev
->hw_stats
, 0, sizeof(hdev
->hw_stats
));
5802 static int hclge_reset_ae_dev(struct hnae3_ae_dev
*ae_dev
)
5804 struct hclge_dev
*hdev
= ae_dev
->priv
;
5805 struct pci_dev
*pdev
= ae_dev
->pdev
;
5808 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
5810 hclge_stats_clear(hdev
);
5811 memset(hdev
->vlan_table
, 0, sizeof(hdev
->vlan_table
));
5813 ret
= hclge_cmd_init(hdev
);
5815 dev_err(&pdev
->dev
, "Cmd queue init failed\n");
5819 ret
= hclge_get_cap(hdev
);
5821 dev_err(&pdev
->dev
, "get hw capability error, ret = %d.\n",
5826 ret
= hclge_configure(hdev
);
5828 dev_err(&pdev
->dev
, "Configure dev error, ret = %d.\n", ret
);
5832 ret
= hclge_map_tqp(hdev
);
5834 dev_err(&pdev
->dev
, "Map tqp error, ret = %d.\n", ret
);
5838 ret
= hclge_mac_init(hdev
);
5840 dev_err(&pdev
->dev
, "Mac init error, ret = %d\n", ret
);
5844 ret
= hclge_config_tso(hdev
, HCLGE_TSO_MSS_MIN
, HCLGE_TSO_MSS_MAX
);
5846 dev_err(&pdev
->dev
, "Enable tso fail, ret =%d\n", ret
);
5850 ret
= hclge_init_vlan_config(hdev
);
5852 dev_err(&pdev
->dev
, "VLAN init fail, ret =%d\n", ret
);
5856 ret
= hclge_tm_init_hw(hdev
);
5858 dev_err(&pdev
->dev
, "tm init hw fail, ret =%d\n", ret
);
5862 ret
= hclge_rss_init_hw(hdev
);
5864 dev_err(&pdev
->dev
, "Rss init fail, ret =%d\n", ret
);
5868 dev_info(&pdev
->dev
, "Reset done, %s driver initialization finished.\n",
5874 static void hclge_uninit_ae_dev(struct hnae3_ae_dev
*ae_dev
)
5876 struct hclge_dev
*hdev
= ae_dev
->priv
;
5877 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
5879 hclge_state_uninit(hdev
);
5882 mdiobus_unregister(mac
->mdio_bus
);
5884 /* Disable MISC vector(vector0) */
5885 hclge_enable_vector(&hdev
->misc_vector
, false);
5886 synchronize_irq(hdev
->misc_vector
.vector_irq
);
5888 hclge_destroy_cmd_queue(&hdev
->hw
);
5889 hclge_misc_irq_uninit(hdev
);
5890 hclge_pci_uninit(hdev
);
5891 ae_dev
->priv
= NULL
;
5894 static u32
hclge_get_max_channels(struct hnae3_handle
*handle
)
5896 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
5897 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5898 struct hclge_dev
*hdev
= vport
->back
;
5900 return min_t(u32
, hdev
->rss_size_max
* kinfo
->num_tc
, hdev
->num_tqps
);
5903 static void hclge_get_channels(struct hnae3_handle
*handle
,
5904 struct ethtool_channels
*ch
)
5906 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5908 ch
->max_combined
= hclge_get_max_channels(handle
);
5909 ch
->other_count
= 1;
5911 ch
->combined_count
= vport
->alloc_tqps
;
5914 static void hclge_get_tqps_and_rss_info(struct hnae3_handle
*handle
,
5915 u16
*free_tqps
, u16
*max_rss_size
)
5917 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5918 struct hclge_dev
*hdev
= vport
->back
;
5922 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
5923 if (!hdev
->htqp
[i
].alloced
)
5926 *free_tqps
= temp_tqps
;
5927 *max_rss_size
= hdev
->rss_size_max
;
5930 static void hclge_release_tqp(struct hclge_vport
*vport
)
5932 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
5933 struct hclge_dev
*hdev
= vport
->back
;
5936 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
5937 struct hclge_tqp
*tqp
=
5938 container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
5940 tqp
->q
.handle
= NULL
;
5941 tqp
->q
.tqp_index
= 0;
5942 tqp
->alloced
= false;
5945 devm_kfree(&hdev
->pdev
->dev
, kinfo
->tqp
);
5949 static int hclge_set_channels(struct hnae3_handle
*handle
, u32 new_tqps_num
)
5951 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5952 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
5953 struct hclge_dev
*hdev
= vport
->back
;
5954 int cur_rss_size
= kinfo
->rss_size
;
5955 int cur_tqps
= kinfo
->num_tqps
;
5956 u16 tc_offset
[HCLGE_MAX_TC_NUM
];
5957 u16 tc_valid
[HCLGE_MAX_TC_NUM
];
5958 u16 tc_size
[HCLGE_MAX_TC_NUM
];
5963 /* Free old tqps, and reallocate with new tqp number when nic setup */
5964 hclge_release_tqp(vport
);
5966 ret
= hclge_knic_setup(vport
, new_tqps_num
);
5968 dev_err(&hdev
->pdev
->dev
, "setup nic fail, ret =%d\n", ret
);
5972 ret
= hclge_map_tqp_to_vport(hdev
, vport
);
5974 dev_err(&hdev
->pdev
->dev
, "map vport tqp fail, ret =%d\n", ret
);
5978 ret
= hclge_tm_schd_init(hdev
);
5980 dev_err(&hdev
->pdev
->dev
, "tm schd init fail, ret =%d\n", ret
);
5984 roundup_size
= roundup_pow_of_two(kinfo
->rss_size
);
5985 roundup_size
= ilog2(roundup_size
);
5986 /* Set the RSS TC mode according to the new RSS size */
5987 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
5990 if (!(hdev
->hw_tc_map
& BIT(i
)))
5994 tc_size
[i
] = roundup_size
;
5995 tc_offset
[i
] = kinfo
->rss_size
* i
;
5997 ret
= hclge_set_rss_tc_mode(hdev
, tc_valid
, tc_size
, tc_offset
);
6001 /* Reinitializes the rss indirect table according to the new RSS size */
6002 rss_indir
= kcalloc(HCLGE_RSS_IND_TBL_SIZE
, sizeof(u32
), GFP_KERNEL
);
6006 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
6007 rss_indir
[i
] = i
% kinfo
->rss_size
;
6009 ret
= hclge_set_rss(handle
, rss_indir
, NULL
, 0);
6011 dev_err(&hdev
->pdev
->dev
, "set rss indir table fail, ret=%d\n",
6017 dev_info(&hdev
->pdev
->dev
,
6018 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
6019 cur_rss_size
, kinfo
->rss_size
,
6020 cur_tqps
, kinfo
->rss_size
* kinfo
->num_tc
);
6025 static int hclge_get_regs_num(struct hclge_dev
*hdev
, u32
*regs_num_32_bit
,
6026 u32
*regs_num_64_bit
)
6028 struct hclge_desc desc
;
6032 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_REG_NUM
, true);
6033 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
6035 dev_err(&hdev
->pdev
->dev
,
6036 "Query register number cmd failed, ret = %d.\n", ret
);
6040 *regs_num_32_bit
= le32_to_cpu(desc
.data
[0]);
6041 *regs_num_64_bit
= le32_to_cpu(desc
.data
[1]);
6043 total_num
= *regs_num_32_bit
+ *regs_num_64_bit
;
6050 static int hclge_get_32_bit_regs(struct hclge_dev
*hdev
, u32 regs_num
,
6053 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
6055 struct hclge_desc
*desc
;
6056 u32
*reg_val
= data
;
6065 cmd_num
= DIV_ROUND_UP(regs_num
+ 2, HCLGE_32_BIT_REG_RTN_DATANUM
);
6066 desc
= kcalloc(cmd_num
, sizeof(struct hclge_desc
), GFP_KERNEL
);
6070 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_QUERY_32_BIT_REG
, true);
6071 ret
= hclge_cmd_send(&hdev
->hw
, desc
, cmd_num
);
6073 dev_err(&hdev
->pdev
->dev
,
6074 "Query 32 bit register cmd failed, ret = %d.\n", ret
);
6079 for (i
= 0; i
< cmd_num
; i
++) {
6081 desc_data
= (__le32
*)(&desc
[i
].data
[0]);
6082 n
= HCLGE_32_BIT_REG_RTN_DATANUM
- 2;
6084 desc_data
= (__le32
*)(&desc
[i
]);
6085 n
= HCLGE_32_BIT_REG_RTN_DATANUM
;
6087 for (k
= 0; k
< n
; k
++) {
6088 *reg_val
++ = le32_to_cpu(*desc_data
++);
6100 static int hclge_get_64_bit_regs(struct hclge_dev
*hdev
, u32 regs_num
,
6103 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
6105 struct hclge_desc
*desc
;
6106 u64
*reg_val
= data
;
6115 cmd_num
= DIV_ROUND_UP(regs_num
+ 1, HCLGE_64_BIT_REG_RTN_DATANUM
);
6116 desc
= kcalloc(cmd_num
, sizeof(struct hclge_desc
), GFP_KERNEL
);
6120 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_QUERY_64_BIT_REG
, true);
6121 ret
= hclge_cmd_send(&hdev
->hw
, desc
, cmd_num
);
6123 dev_err(&hdev
->pdev
->dev
,
6124 "Query 64 bit register cmd failed, ret = %d.\n", ret
);
6129 for (i
= 0; i
< cmd_num
; i
++) {
6131 desc_data
= (__le64
*)(&desc
[i
].data
[0]);
6132 n
= HCLGE_64_BIT_REG_RTN_DATANUM
- 1;
6134 desc_data
= (__le64
*)(&desc
[i
]);
6135 n
= HCLGE_64_BIT_REG_RTN_DATANUM
;
6137 for (k
= 0; k
< n
; k
++) {
6138 *reg_val
++ = le64_to_cpu(*desc_data
++);
6150 static int hclge_get_regs_len(struct hnae3_handle
*handle
)
6152 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6153 struct hclge_dev
*hdev
= vport
->back
;
6154 u32 regs_num_32_bit
, regs_num_64_bit
;
6157 ret
= hclge_get_regs_num(hdev
, ®s_num_32_bit
, ®s_num_64_bit
);
6159 dev_err(&hdev
->pdev
->dev
,
6160 "Get register number failed, ret = %d.\n", ret
);
6164 return regs_num_32_bit
* sizeof(u32
) + regs_num_64_bit
* sizeof(u64
);
6167 static void hclge_get_regs(struct hnae3_handle
*handle
, u32
*version
,
6170 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6171 struct hclge_dev
*hdev
= vport
->back
;
6172 u32 regs_num_32_bit
, regs_num_64_bit
;
6175 *version
= hdev
->fw_version
;
6177 ret
= hclge_get_regs_num(hdev
, ®s_num_32_bit
, ®s_num_64_bit
);
6179 dev_err(&hdev
->pdev
->dev
,
6180 "Get register number failed, ret = %d.\n", ret
);
6184 ret
= hclge_get_32_bit_regs(hdev
, regs_num_32_bit
, data
);
6186 dev_err(&hdev
->pdev
->dev
,
6187 "Get 32 bit register failed, ret = %d.\n", ret
);
6191 data
= (u32
*)data
+ regs_num_32_bit
;
6192 ret
= hclge_get_64_bit_regs(hdev
, regs_num_64_bit
,
6195 dev_err(&hdev
->pdev
->dev
,
6196 "Get 64 bit register failed, ret = %d.\n", ret
);
6199 static int hclge_set_led_status(struct hclge_dev
*hdev
, u8 locate_led_status
)
6201 struct hclge_set_led_state_cmd
*req
;
6202 struct hclge_desc desc
;
6205 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_LED_STATUS_CFG
, false);
6207 req
= (struct hclge_set_led_state_cmd
*)desc
.data
;
6208 hnae3_set_field(req
->locate_led_config
, HCLGE_LED_LOCATE_STATE_M
,
6209 HCLGE_LED_LOCATE_STATE_S
, locate_led_status
);
6211 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
6213 dev_err(&hdev
->pdev
->dev
,
6214 "Send set led state cmd error, ret =%d\n", ret
);
6219 enum hclge_led_status
{
6222 HCLGE_LED_NO_CHANGE
= 0xFF,
6225 static int hclge_set_led_id(struct hnae3_handle
*handle
,
6226 enum ethtool_phys_id_state status
)
6228 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6229 struct hclge_dev
*hdev
= vport
->back
;
6232 case ETHTOOL_ID_ACTIVE
:
6233 return hclge_set_led_status(hdev
, HCLGE_LED_ON
);
6234 case ETHTOOL_ID_INACTIVE
:
6235 return hclge_set_led_status(hdev
, HCLGE_LED_OFF
);
6241 static void hclge_get_link_mode(struct hnae3_handle
*handle
,
6242 unsigned long *supported
,
6243 unsigned long *advertising
)
6245 unsigned int size
= BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS
);
6246 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6247 struct hclge_dev
*hdev
= vport
->back
;
6248 unsigned int idx
= 0;
6250 for (; idx
< size
; idx
++) {
6251 supported
[idx
] = hdev
->hw
.mac
.supported
[idx
];
6252 advertising
[idx
] = hdev
->hw
.mac
.advertising
[idx
];
6256 static void hclge_get_port_type(struct hnae3_handle
*handle
,
6259 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6260 struct hclge_dev
*hdev
= vport
->back
;
6261 u8 media_type
= hdev
->hw
.mac
.media_type
;
6263 switch (media_type
) {
6264 case HNAE3_MEDIA_TYPE_FIBER
:
6265 *port_type
= PORT_FIBRE
;
6267 case HNAE3_MEDIA_TYPE_COPPER
:
6268 *port_type
= PORT_TP
;
6270 case HNAE3_MEDIA_TYPE_UNKNOWN
:
6272 *port_type
= PORT_OTHER
;
6277 static const struct hnae3_ae_ops hclge_ops
= {
6278 .init_ae_dev
= hclge_init_ae_dev
,
6279 .uninit_ae_dev
= hclge_uninit_ae_dev
,
6280 .init_client_instance
= hclge_init_client_instance
,
6281 .uninit_client_instance
= hclge_uninit_client_instance
,
6282 .map_ring_to_vector
= hclge_map_ring_to_vector
,
6283 .unmap_ring_from_vector
= hclge_unmap_ring_frm_vector
,
6284 .get_vector
= hclge_get_vector
,
6285 .put_vector
= hclge_put_vector
,
6286 .set_promisc_mode
= hclge_set_promisc_mode
,
6287 .set_loopback
= hclge_set_loopback
,
6288 .start
= hclge_ae_start
,
6289 .stop
= hclge_ae_stop
,
6290 .get_status
= hclge_get_status
,
6291 .get_ksettings_an_result
= hclge_get_ksettings_an_result
,
6292 .update_speed_duplex_h
= hclge_update_speed_duplex_h
,
6293 .cfg_mac_speed_dup_h
= hclge_cfg_mac_speed_dup_h
,
6294 .get_media_type
= hclge_get_media_type
,
6295 .get_rss_key_size
= hclge_get_rss_key_size
,
6296 .get_rss_indir_size
= hclge_get_rss_indir_size
,
6297 .get_rss
= hclge_get_rss
,
6298 .set_rss
= hclge_set_rss
,
6299 .set_rss_tuple
= hclge_set_rss_tuple
,
6300 .get_rss_tuple
= hclge_get_rss_tuple
,
6301 .get_tc_size
= hclge_get_tc_size
,
6302 .get_mac_addr
= hclge_get_mac_addr
,
6303 .set_mac_addr
= hclge_set_mac_addr
,
6304 .add_uc_addr
= hclge_add_uc_addr
,
6305 .rm_uc_addr
= hclge_rm_uc_addr
,
6306 .add_mc_addr
= hclge_add_mc_addr
,
6307 .rm_mc_addr
= hclge_rm_mc_addr
,
6308 .update_mta_status
= hclge_update_mta_status
,
6309 .set_autoneg
= hclge_set_autoneg
,
6310 .get_autoneg
= hclge_get_autoneg
,
6311 .get_pauseparam
= hclge_get_pauseparam
,
6312 .set_pauseparam
= hclge_set_pauseparam
,
6313 .set_mtu
= hclge_set_mtu
,
6314 .reset_queue
= hclge_reset_tqp
,
6315 .get_stats
= hclge_get_stats
,
6316 .update_stats
= hclge_update_stats
,
6317 .get_strings
= hclge_get_strings
,
6318 .get_sset_count
= hclge_get_sset_count
,
6319 .get_fw_version
= hclge_get_fw_version
,
6320 .get_mdix_mode
= hclge_get_mdix_mode
,
6321 .enable_vlan_filter
= hclge_enable_vlan_filter
,
6322 .set_vlan_filter
= hclge_set_vlan_filter
,
6323 .set_vf_vlan_filter
= hclge_set_vf_vlan_filter
,
6324 .enable_hw_strip_rxvtag
= hclge_en_hw_strip_rxvtag
,
6325 .reset_event
= hclge_reset_event
,
6326 .get_tqps_and_rss_info
= hclge_get_tqps_and_rss_info
,
6327 .set_channels
= hclge_set_channels
,
6328 .get_channels
= hclge_get_channels
,
6329 .get_flowctrl_adv
= hclge_get_flowctrl_adv
,
6330 .get_regs_len
= hclge_get_regs_len
,
6331 .get_regs
= hclge_get_regs
,
6332 .set_led_id
= hclge_set_led_id
,
6333 .get_link_mode
= hclge_get_link_mode
,
6334 .get_port_type
= hclge_get_port_type
,
6337 static struct hnae3_ae_algo ae_algo
= {
6339 .pdev_id_table
= ae_algo_pci_tbl
,
6342 static int hclge_init(void)
6344 pr_info("%s is initializing\n", HCLGE_NAME
);
6346 hnae3_register_ae_algo(&ae_algo
);
6351 static void hclge_exit(void)
6353 hnae3_unregister_ae_algo(&ae_algo
);
6355 module_init(hclge_init
);
6356 module_exit(hclge_exit
);
6358 MODULE_LICENSE("GPL");
6359 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
6360 MODULE_DESCRIPTION("HCLGE Driver");
6361 MODULE_VERSION(HCLGE_MOD_VERSION
);