2 * Copyright (c) 2016-2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/acpi.h>
11 #include <linux/device.h>
12 #include <linux/etherdevice.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/pci.h>
19 #include <linux/platform_device.h>
20 #include <linux/if_vlan.h>
21 #include <net/rtnetlink.h>
22 #include "hclge_cmd.h"
23 #include "hclge_dcb.h"
24 #include "hclge_main.h"
25 #include "hclge_mbx.h"
26 #include "hclge_mdio.h"
30 #define HCLGE_NAME "hclge"
31 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
32 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
33 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
34 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
36 static int hclge_set_mta_filter_mode(struct hclge_dev
*hdev
,
37 enum hclge_mta_dmac_sel_type mta_mac_sel
,
39 static int hclge_set_mtu(struct hnae3_handle
*handle
, int new_mtu
);
40 static int hclge_init_vlan_config(struct hclge_dev
*hdev
);
41 static int hclge_reset_ae_dev(struct hnae3_ae_dev
*ae_dev
);
43 static struct hnae3_ae_algo ae_algo
;
45 static const struct pci_device_id ae_algo_pci_tbl
[] = {
46 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
47 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
48 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
), 0},
49 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
), 0},
50 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
), 0},
51 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
), 0},
52 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
), 0},
53 /* required last entry */
57 static const char hns3_nic_test_strs
[][ETH_GSTRING_LEN
] = {
59 "Serdes Loopback test",
63 static const struct hclge_comm_stats_str g_all_64bit_stats_string
[] = {
64 {"igu_rx_oversize_pkt",
65 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt
)},
66 {"igu_rx_undersize_pkt",
67 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt
)},
68 {"igu_rx_out_all_pkt",
69 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt
)},
71 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt
)},
73 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt
)},
75 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt
)},
76 {"egu_tx_out_all_pkt",
77 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt
)},
79 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt
)},
81 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt
)},
83 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt
)},
84 {"ssu_ppp_mac_key_num",
85 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num
)},
86 {"ssu_ppp_host_key_num",
87 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num
)},
88 {"ppp_ssu_mac_rlt_num",
89 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num
)},
90 {"ppp_ssu_host_rlt_num",
91 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num
)},
93 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num
)},
95 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num
)},
97 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num
)},
99 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num
)}
102 static const struct hclge_comm_stats_str g_all_32bit_stats_string
[] = {
104 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt
)},
105 {"igu_rx_no_eof_pkt",
106 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt
)},
107 {"igu_rx_no_sof_pkt",
108 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt
)},
110 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt
)},
111 {"ssu_full_drop_num",
112 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num
)},
113 {"ssu_part_drop_num",
114 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num
)},
116 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num
)},
118 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num
)},
120 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num
)},
122 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt
)},
124 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt
)},
126 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt
)},
127 {"qcn_fb_invaild_cnt",
128 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt
)},
129 {"rx_packet_tc0_in_cnt",
130 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt
)},
131 {"rx_packet_tc1_in_cnt",
132 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt
)},
133 {"rx_packet_tc2_in_cnt",
134 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt
)},
135 {"rx_packet_tc3_in_cnt",
136 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt
)},
137 {"rx_packet_tc4_in_cnt",
138 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt
)},
139 {"rx_packet_tc5_in_cnt",
140 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt
)},
141 {"rx_packet_tc6_in_cnt",
142 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt
)},
143 {"rx_packet_tc7_in_cnt",
144 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt
)},
145 {"rx_packet_tc0_out_cnt",
146 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt
)},
147 {"rx_packet_tc1_out_cnt",
148 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt
)},
149 {"rx_packet_tc2_out_cnt",
150 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt
)},
151 {"rx_packet_tc3_out_cnt",
152 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt
)},
153 {"rx_packet_tc4_out_cnt",
154 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt
)},
155 {"rx_packet_tc5_out_cnt",
156 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt
)},
157 {"rx_packet_tc6_out_cnt",
158 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt
)},
159 {"rx_packet_tc7_out_cnt",
160 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt
)},
161 {"tx_packet_tc0_in_cnt",
162 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt
)},
163 {"tx_packet_tc1_in_cnt",
164 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt
)},
165 {"tx_packet_tc2_in_cnt",
166 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt
)},
167 {"tx_packet_tc3_in_cnt",
168 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt
)},
169 {"tx_packet_tc4_in_cnt",
170 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt
)},
171 {"tx_packet_tc5_in_cnt",
172 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt
)},
173 {"tx_packet_tc6_in_cnt",
174 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt
)},
175 {"tx_packet_tc7_in_cnt",
176 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt
)},
177 {"tx_packet_tc0_out_cnt",
178 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt
)},
179 {"tx_packet_tc1_out_cnt",
180 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt
)},
181 {"tx_packet_tc2_out_cnt",
182 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt
)},
183 {"tx_packet_tc3_out_cnt",
184 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt
)},
185 {"tx_packet_tc4_out_cnt",
186 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt
)},
187 {"tx_packet_tc5_out_cnt",
188 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt
)},
189 {"tx_packet_tc6_out_cnt",
190 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt
)},
191 {"tx_packet_tc7_out_cnt",
192 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt
)},
193 {"pkt_curr_buf_tc0_cnt",
194 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt
)},
195 {"pkt_curr_buf_tc1_cnt",
196 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt
)},
197 {"pkt_curr_buf_tc2_cnt",
198 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt
)},
199 {"pkt_curr_buf_tc3_cnt",
200 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt
)},
201 {"pkt_curr_buf_tc4_cnt",
202 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt
)},
203 {"pkt_curr_buf_tc5_cnt",
204 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt
)},
205 {"pkt_curr_buf_tc6_cnt",
206 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt
)},
207 {"pkt_curr_buf_tc7_cnt",
208 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt
)},
210 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num
)},
211 {"lo_pri_unicast_rlt_drop_num",
212 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num
)},
213 {"hi_pri_multicast_rlt_drop_num",
214 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num
)},
215 {"lo_pri_multicast_rlt_drop_num",
216 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num
)},
217 {"rx_oq_drop_pkt_cnt",
218 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt
)},
219 {"tx_oq_drop_pkt_cnt",
220 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt
)},
221 {"nic_l2_err_drop_pkt_cnt",
222 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt
)},
223 {"roc_l2_err_drop_pkt_cnt",
224 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt
)}
227 static const struct hclge_comm_stats_str g_mac_stats_string
[] = {
228 {"mac_tx_mac_pause_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num
)},
230 {"mac_rx_mac_pause_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num
)},
232 {"mac_tx_pfc_pri0_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num
)},
234 {"mac_tx_pfc_pri1_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num
)},
236 {"mac_tx_pfc_pri2_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num
)},
238 {"mac_tx_pfc_pri3_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num
)},
240 {"mac_tx_pfc_pri4_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num
)},
242 {"mac_tx_pfc_pri5_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num
)},
244 {"mac_tx_pfc_pri6_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num
)},
246 {"mac_tx_pfc_pri7_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num
)},
248 {"mac_rx_pfc_pri0_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num
)},
250 {"mac_rx_pfc_pri1_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num
)},
252 {"mac_rx_pfc_pri2_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num
)},
254 {"mac_rx_pfc_pri3_pkt_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num
)},
256 {"mac_rx_pfc_pri4_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num
)},
258 {"mac_rx_pfc_pri5_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num
)},
260 {"mac_rx_pfc_pri6_pkt_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num
)},
262 {"mac_rx_pfc_pri7_pkt_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num
)},
264 {"mac_tx_total_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num
)},
266 {"mac_tx_total_oct_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num
)},
268 {"mac_tx_good_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num
)},
270 {"mac_tx_bad_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num
)},
272 {"mac_tx_good_oct_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num
)},
274 {"mac_tx_bad_oct_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num
)},
276 {"mac_tx_uni_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num
)},
278 {"mac_tx_multi_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num
)},
280 {"mac_tx_broad_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num
)},
282 {"mac_tx_undersize_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num
)},
284 {"mac_tx_oversize_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num
)},
286 {"mac_tx_64_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num
)},
288 {"mac_tx_65_127_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num
)},
290 {"mac_tx_128_255_oct_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num
)},
292 {"mac_tx_256_511_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num
)},
294 {"mac_tx_512_1023_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num
)},
296 {"mac_tx_1024_1518_oct_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num
)},
298 {"mac_tx_1519_2047_oct_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num
)},
300 {"mac_tx_2048_4095_oct_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num
)},
302 {"mac_tx_4096_8191_oct_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num
)},
304 {"mac_tx_8192_12287_oct_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_12287_oct_pkt_num
)},
306 {"mac_tx_8192_9216_oct_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num
)},
308 {"mac_tx_9217_12287_oct_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num
)},
310 {"mac_tx_12288_16383_oct_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num
)},
312 {"mac_tx_1519_max_good_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num
)},
314 {"mac_tx_1519_max_bad_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num
)},
316 {"mac_rx_total_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num
)},
318 {"mac_rx_total_oct_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num
)},
320 {"mac_rx_good_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num
)},
322 {"mac_rx_bad_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num
)},
324 {"mac_rx_good_oct_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num
)},
326 {"mac_rx_bad_oct_num",
327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num
)},
328 {"mac_rx_uni_pkt_num",
329 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num
)},
330 {"mac_rx_multi_pkt_num",
331 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num
)},
332 {"mac_rx_broad_pkt_num",
333 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num
)},
334 {"mac_rx_undersize_pkt_num",
335 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num
)},
336 {"mac_rx_oversize_pkt_num",
337 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num
)},
338 {"mac_rx_64_oct_pkt_num",
339 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num
)},
340 {"mac_rx_65_127_oct_pkt_num",
341 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num
)},
342 {"mac_rx_128_255_oct_pkt_num",
343 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num
)},
344 {"mac_rx_256_511_oct_pkt_num",
345 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num
)},
346 {"mac_rx_512_1023_oct_pkt_num",
347 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num
)},
348 {"mac_rx_1024_1518_oct_pkt_num",
349 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num
)},
350 {"mac_rx_1519_2047_oct_pkt_num",
351 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num
)},
352 {"mac_rx_2048_4095_oct_pkt_num",
353 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num
)},
354 {"mac_rx_4096_8191_oct_pkt_num",
355 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num
)},
356 {"mac_rx_8192_12287_oct_pkt_num",
357 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_12287_oct_pkt_num
)},
358 {"mac_rx_8192_9216_oct_pkt_num",
359 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num
)},
360 {"mac_rx_9217_12287_oct_pkt_num",
361 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num
)},
362 {"mac_rx_12288_16383_oct_pkt_num",
363 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num
)},
364 {"mac_rx_1519_max_good_pkt_num",
365 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num
)},
366 {"mac_rx_1519_max_bad_pkt_num",
367 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num
)},
369 {"mac_tx_fragment_pkt_num",
370 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num
)},
371 {"mac_tx_undermin_pkt_num",
372 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num
)},
373 {"mac_tx_jabber_pkt_num",
374 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num
)},
375 {"mac_tx_err_all_pkt_num",
376 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num
)},
377 {"mac_tx_from_app_good_pkt_num",
378 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num
)},
379 {"mac_tx_from_app_bad_pkt_num",
380 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num
)},
381 {"mac_rx_fragment_pkt_num",
382 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num
)},
383 {"mac_rx_undermin_pkt_num",
384 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num
)},
385 {"mac_rx_jabber_pkt_num",
386 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num
)},
387 {"mac_rx_fcs_err_pkt_num",
388 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num
)},
389 {"mac_rx_send_app_good_pkt_num",
390 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num
)},
391 {"mac_rx_send_app_bad_pkt_num",
392 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num
)}
395 static int hclge_64_bit_update_stats(struct hclge_dev
*hdev
)
397 #define HCLGE_64_BIT_CMD_NUM 5
398 #define HCLGE_64_BIT_RTN_DATANUM 4
399 u64
*data
= (u64
*)(&hdev
->hw_stats
.all_64_bit_stats
);
400 struct hclge_desc desc
[HCLGE_64_BIT_CMD_NUM
];
405 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_64_BIT
, true);
406 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_64_BIT_CMD_NUM
);
408 dev_err(&hdev
->pdev
->dev
,
409 "Get 64 bit pkt stats fail, status = %d.\n", ret
);
413 for (i
= 0; i
< HCLGE_64_BIT_CMD_NUM
; i
++) {
414 if (unlikely(i
== 0)) {
415 desc_data
= (__le64
*)(&desc
[i
].data
[0]);
416 n
= HCLGE_64_BIT_RTN_DATANUM
- 1;
418 desc_data
= (__le64
*)(&desc
[i
]);
419 n
= HCLGE_64_BIT_RTN_DATANUM
;
421 for (k
= 0; k
< n
; k
++) {
422 *data
++ += le64_to_cpu(*desc_data
);
430 static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats
*stats
)
432 stats
->pkt_curr_buf_cnt
= 0;
433 stats
->pkt_curr_buf_tc0_cnt
= 0;
434 stats
->pkt_curr_buf_tc1_cnt
= 0;
435 stats
->pkt_curr_buf_tc2_cnt
= 0;
436 stats
->pkt_curr_buf_tc3_cnt
= 0;
437 stats
->pkt_curr_buf_tc4_cnt
= 0;
438 stats
->pkt_curr_buf_tc5_cnt
= 0;
439 stats
->pkt_curr_buf_tc6_cnt
= 0;
440 stats
->pkt_curr_buf_tc7_cnt
= 0;
443 static int hclge_32_bit_update_stats(struct hclge_dev
*hdev
)
445 #define HCLGE_32_BIT_CMD_NUM 8
446 #define HCLGE_32_BIT_RTN_DATANUM 8
448 struct hclge_desc desc
[HCLGE_32_BIT_CMD_NUM
];
449 struct hclge_32_bit_stats
*all_32_bit_stats
;
455 all_32_bit_stats
= &hdev
->hw_stats
.all_32_bit_stats
;
456 data
= (u64
*)(&all_32_bit_stats
->egu_tx_1588_pkt
);
458 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_32_BIT
, true);
459 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_32_BIT_CMD_NUM
);
461 dev_err(&hdev
->pdev
->dev
,
462 "Get 32 bit pkt stats fail, status = %d.\n", ret
);
467 hclge_reset_partial_32bit_counter(all_32_bit_stats
);
468 for (i
= 0; i
< HCLGE_32_BIT_CMD_NUM
; i
++) {
469 if (unlikely(i
== 0)) {
470 __le16
*desc_data_16bit
;
472 all_32_bit_stats
->igu_rx_err_pkt
+=
473 le32_to_cpu(desc
[i
].data
[0]);
475 desc_data_16bit
= (__le16
*)&desc
[i
].data
[1];
476 all_32_bit_stats
->igu_rx_no_eof_pkt
+=
477 le16_to_cpu(*desc_data_16bit
);
480 all_32_bit_stats
->igu_rx_no_sof_pkt
+=
481 le16_to_cpu(*desc_data_16bit
);
483 desc_data
= &desc
[i
].data
[2];
484 n
= HCLGE_32_BIT_RTN_DATANUM
- 4;
486 desc_data
= (__le32
*)&desc
[i
];
487 n
= HCLGE_32_BIT_RTN_DATANUM
;
489 for (k
= 0; k
< n
; k
++) {
490 *data
++ += le32_to_cpu(*desc_data
);
498 static int hclge_mac_update_stats(struct hclge_dev
*hdev
)
500 #define HCLGE_MAC_CMD_NUM 21
501 #define HCLGE_RTN_DATA_NUM 4
503 u64
*data
= (u64
*)(&hdev
->hw_stats
.mac_stats
);
504 struct hclge_desc desc
[HCLGE_MAC_CMD_NUM
];
509 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_MAC
, true);
510 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_MAC_CMD_NUM
);
512 dev_err(&hdev
->pdev
->dev
,
513 "Get MAC pkt stats fail, status = %d.\n", ret
);
518 for (i
= 0; i
< HCLGE_MAC_CMD_NUM
; i
++) {
519 if (unlikely(i
== 0)) {
520 desc_data
= (__le64
*)(&desc
[i
].data
[0]);
521 n
= HCLGE_RTN_DATA_NUM
- 2;
523 desc_data
= (__le64
*)(&desc
[i
]);
524 n
= HCLGE_RTN_DATA_NUM
;
526 for (k
= 0; k
< n
; k
++) {
527 *data
++ += le64_to_cpu(*desc_data
);
535 static int hclge_tqps_update_stats(struct hnae3_handle
*handle
)
537 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
538 struct hclge_vport
*vport
= hclge_get_vport(handle
);
539 struct hclge_dev
*hdev
= vport
->back
;
540 struct hnae3_queue
*queue
;
541 struct hclge_desc desc
[1];
542 struct hclge_tqp
*tqp
;
545 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
546 queue
= handle
->kinfo
.tqp
[i
];
547 tqp
= container_of(queue
, struct hclge_tqp
, q
);
548 /* command : HCLGE_OPC_QUERY_IGU_STAT */
549 hclge_cmd_setup_basic_desc(&desc
[0],
550 HCLGE_OPC_QUERY_RX_STATUS
,
553 desc
[0].data
[0] = cpu_to_le32((tqp
->index
& 0x1ff));
554 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
556 dev_err(&hdev
->pdev
->dev
,
557 "Query tqp stat fail, status = %d,queue = %d\n",
561 tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
+=
562 le32_to_cpu(desc
[0].data
[1]);
565 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
566 queue
= handle
->kinfo
.tqp
[i
];
567 tqp
= container_of(queue
, struct hclge_tqp
, q
);
568 /* command : HCLGE_OPC_QUERY_IGU_STAT */
569 hclge_cmd_setup_basic_desc(&desc
[0],
570 HCLGE_OPC_QUERY_TX_STATUS
,
573 desc
[0].data
[0] = cpu_to_le32((tqp
->index
& 0x1ff));
574 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
576 dev_err(&hdev
->pdev
->dev
,
577 "Query tqp stat fail, status = %d,queue = %d\n",
581 tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
+=
582 le32_to_cpu(desc
[0].data
[1]);
588 static u64
*hclge_tqps_get_stats(struct hnae3_handle
*handle
, u64
*data
)
590 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
591 struct hclge_tqp
*tqp
;
595 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
596 tqp
= container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
597 *buff
++ = tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
;
600 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
601 tqp
= container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
602 *buff
++ = tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
;
608 static int hclge_tqps_get_sset_count(struct hnae3_handle
*handle
, int stringset
)
610 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
612 return kinfo
->num_tqps
* (2);
615 static u8
*hclge_tqps_get_strings(struct hnae3_handle
*handle
, u8
*data
)
617 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
621 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
622 struct hclge_tqp
*tqp
= container_of(handle
->kinfo
.tqp
[i
],
623 struct hclge_tqp
, q
);
624 snprintf(buff
, ETH_GSTRING_LEN
, "txq#%d_pktnum_rcd",
626 buff
= buff
+ ETH_GSTRING_LEN
;
629 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
630 struct hclge_tqp
*tqp
= container_of(kinfo
->tqp
[i
],
631 struct hclge_tqp
, q
);
632 snprintf(buff
, ETH_GSTRING_LEN
, "rxq#%d_pktnum_rcd",
634 buff
= buff
+ ETH_GSTRING_LEN
;
640 static u64
*hclge_comm_get_stats(void *comm_stats
,
641 const struct hclge_comm_stats_str strs
[],
647 for (i
= 0; i
< size
; i
++)
648 buf
[i
] = HCLGE_STATS_READ(comm_stats
, strs
[i
].offset
);
653 static u8
*hclge_comm_get_strings(u32 stringset
,
654 const struct hclge_comm_stats_str strs
[],
657 char *buff
= (char *)data
;
660 if (stringset
!= ETH_SS_STATS
)
663 for (i
= 0; i
< size
; i
++) {
664 snprintf(buff
, ETH_GSTRING_LEN
,
666 buff
= buff
+ ETH_GSTRING_LEN
;
672 static void hclge_update_netstat(struct hclge_hw_stats
*hw_stats
,
673 struct net_device_stats
*net_stats
)
675 net_stats
->tx_dropped
= 0;
676 net_stats
->rx_dropped
= hw_stats
->all_32_bit_stats
.ssu_full_drop_num
;
677 net_stats
->rx_dropped
+= hw_stats
->all_32_bit_stats
.ppp_key_drop_num
;
678 net_stats
->rx_dropped
+= hw_stats
->all_32_bit_stats
.ssu_key_drop_num
;
680 net_stats
->rx_errors
= hw_stats
->mac_stats
.mac_rx_oversize_pkt_num
;
681 net_stats
->rx_errors
+= hw_stats
->mac_stats
.mac_rx_undersize_pkt_num
;
682 net_stats
->rx_errors
+= hw_stats
->all_32_bit_stats
.igu_rx_no_eof_pkt
;
683 net_stats
->rx_errors
+= hw_stats
->all_32_bit_stats
.igu_rx_no_sof_pkt
;
684 net_stats
->rx_errors
+= hw_stats
->mac_stats
.mac_rx_fcs_err_pkt_num
;
686 net_stats
->multicast
= hw_stats
->mac_stats
.mac_tx_multi_pkt_num
;
687 net_stats
->multicast
+= hw_stats
->mac_stats
.mac_rx_multi_pkt_num
;
689 net_stats
->rx_crc_errors
= hw_stats
->mac_stats
.mac_rx_fcs_err_pkt_num
;
690 net_stats
->rx_length_errors
=
691 hw_stats
->mac_stats
.mac_rx_undersize_pkt_num
;
692 net_stats
->rx_length_errors
+=
693 hw_stats
->mac_stats
.mac_rx_oversize_pkt_num
;
694 net_stats
->rx_over_errors
=
695 hw_stats
->mac_stats
.mac_rx_oversize_pkt_num
;
698 static void hclge_update_stats_for_all(struct hclge_dev
*hdev
)
700 struct hnae3_handle
*handle
;
703 handle
= &hdev
->vport
[0].nic
;
704 if (handle
->client
) {
705 status
= hclge_tqps_update_stats(handle
);
707 dev_err(&hdev
->pdev
->dev
,
708 "Update TQPS stats fail, status = %d.\n",
713 status
= hclge_mac_update_stats(hdev
);
715 dev_err(&hdev
->pdev
->dev
,
716 "Update MAC stats fail, status = %d.\n", status
);
718 status
= hclge_32_bit_update_stats(hdev
);
720 dev_err(&hdev
->pdev
->dev
,
721 "Update 32 bit stats fail, status = %d.\n",
724 hclge_update_netstat(&hdev
->hw_stats
, &handle
->kinfo
.netdev
->stats
);
727 static void hclge_update_stats(struct hnae3_handle
*handle
,
728 struct net_device_stats
*net_stats
)
730 struct hclge_vport
*vport
= hclge_get_vport(handle
);
731 struct hclge_dev
*hdev
= vport
->back
;
732 struct hclge_hw_stats
*hw_stats
= &hdev
->hw_stats
;
735 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING
, &hdev
->state
))
738 status
= hclge_mac_update_stats(hdev
);
740 dev_err(&hdev
->pdev
->dev
,
741 "Update MAC stats fail, status = %d.\n",
744 status
= hclge_32_bit_update_stats(hdev
);
746 dev_err(&hdev
->pdev
->dev
,
747 "Update 32 bit stats fail, status = %d.\n",
750 status
= hclge_64_bit_update_stats(hdev
);
752 dev_err(&hdev
->pdev
->dev
,
753 "Update 64 bit stats fail, status = %d.\n",
756 status
= hclge_tqps_update_stats(handle
);
758 dev_err(&hdev
->pdev
->dev
,
759 "Update TQPS stats fail, status = %d.\n",
762 hclge_update_netstat(hw_stats
, net_stats
);
764 clear_bit(HCLGE_STATE_STATISTICS_UPDATING
, &hdev
->state
);
767 static int hclge_get_sset_count(struct hnae3_handle
*handle
, int stringset
)
769 #define HCLGE_LOOPBACK_TEST_FLAGS 0x7
771 struct hclge_vport
*vport
= hclge_get_vport(handle
);
772 struct hclge_dev
*hdev
= vport
->back
;
775 /* Loopback test support rules:
776 * mac: only GE mode support
777 * serdes: all mac mode will support include GE/XGE/LGE/CGE
778 * phy: only support when phy device exist on board
780 if (stringset
== ETH_SS_TEST
) {
781 /* clear loopback bit flags at first */
782 handle
->flags
= (handle
->flags
& (~HCLGE_LOOPBACK_TEST_FLAGS
));
783 if (hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_10M
||
784 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_100M
||
785 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_1G
) {
787 handle
->flags
|= HNAE3_SUPPORT_MAC_LOOPBACK
;
791 } else if (stringset
== ETH_SS_STATS
) {
792 count
= ARRAY_SIZE(g_mac_stats_string
) +
793 ARRAY_SIZE(g_all_32bit_stats_string
) +
794 ARRAY_SIZE(g_all_64bit_stats_string
) +
795 hclge_tqps_get_sset_count(handle
, stringset
);
801 static void hclge_get_strings(struct hnae3_handle
*handle
,
805 u8
*p
= (char *)data
;
808 if (stringset
== ETH_SS_STATS
) {
809 size
= ARRAY_SIZE(g_mac_stats_string
);
810 p
= hclge_comm_get_strings(stringset
,
814 size
= ARRAY_SIZE(g_all_32bit_stats_string
);
815 p
= hclge_comm_get_strings(stringset
,
816 g_all_32bit_stats_string
,
819 size
= ARRAY_SIZE(g_all_64bit_stats_string
);
820 p
= hclge_comm_get_strings(stringset
,
821 g_all_64bit_stats_string
,
824 p
= hclge_tqps_get_strings(handle
, p
);
825 } else if (stringset
== ETH_SS_TEST
) {
826 if (handle
->flags
& HNAE3_SUPPORT_MAC_LOOPBACK
) {
828 hns3_nic_test_strs
[HNAE3_MAC_INTER_LOOP_MAC
],
830 p
+= ETH_GSTRING_LEN
;
832 if (handle
->flags
& HNAE3_SUPPORT_SERDES_LOOPBACK
) {
834 hns3_nic_test_strs
[HNAE3_MAC_INTER_LOOP_SERDES
],
836 p
+= ETH_GSTRING_LEN
;
838 if (handle
->flags
& HNAE3_SUPPORT_PHY_LOOPBACK
) {
840 hns3_nic_test_strs
[HNAE3_MAC_INTER_LOOP_PHY
],
842 p
+= ETH_GSTRING_LEN
;
847 static void hclge_get_stats(struct hnae3_handle
*handle
, u64
*data
)
849 struct hclge_vport
*vport
= hclge_get_vport(handle
);
850 struct hclge_dev
*hdev
= vport
->back
;
853 p
= hclge_comm_get_stats(&hdev
->hw_stats
.mac_stats
,
855 ARRAY_SIZE(g_mac_stats_string
),
857 p
= hclge_comm_get_stats(&hdev
->hw_stats
.all_32_bit_stats
,
858 g_all_32bit_stats_string
,
859 ARRAY_SIZE(g_all_32bit_stats_string
),
861 p
= hclge_comm_get_stats(&hdev
->hw_stats
.all_64_bit_stats
,
862 g_all_64bit_stats_string
,
863 ARRAY_SIZE(g_all_64bit_stats_string
),
865 p
= hclge_tqps_get_stats(handle
, p
);
868 static int hclge_parse_func_status(struct hclge_dev
*hdev
,
869 struct hclge_func_status_cmd
*status
)
871 if (!(status
->pf_state
& HCLGE_PF_STATE_DONE
))
874 /* Set the pf to main pf */
875 if (status
->pf_state
& HCLGE_PF_STATE_MAIN
)
876 hdev
->flag
|= HCLGE_FLAG_MAIN
;
878 hdev
->flag
&= ~HCLGE_FLAG_MAIN
;
883 static int hclge_query_function_status(struct hclge_dev
*hdev
)
885 struct hclge_func_status_cmd
*req
;
886 struct hclge_desc desc
;
890 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_FUNC_STATUS
, true);
891 req
= (struct hclge_func_status_cmd
*)desc
.data
;
894 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
896 dev_err(&hdev
->pdev
->dev
,
897 "query function status failed %d.\n",
903 /* Check pf reset is done */
906 usleep_range(1000, 2000);
907 } while (timeout
++ < 5);
909 ret
= hclge_parse_func_status(hdev
, req
);
914 static int hclge_query_pf_resource(struct hclge_dev
*hdev
)
916 struct hclge_pf_res_cmd
*req
;
917 struct hclge_desc desc
;
920 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_PF_RSRC
, true);
921 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
923 dev_err(&hdev
->pdev
->dev
,
924 "query pf resource failed %d.\n", ret
);
928 req
= (struct hclge_pf_res_cmd
*)desc
.data
;
929 hdev
->num_tqps
= __le16_to_cpu(req
->tqp_num
);
930 hdev
->pkt_buf_size
= __le16_to_cpu(req
->buf_size
) << HCLGE_BUF_UNIT_S
;
932 if (hnae3_dev_roce_supported(hdev
)) {
934 hnae_get_field(__le16_to_cpu(req
->pf_intr_vector_number
),
935 HCLGE_PF_VEC_NUM_M
, HCLGE_PF_VEC_NUM_S
);
937 /* PF should have NIC vectors and Roce vectors,
938 * NIC vectors are queued before Roce vectors.
940 hdev
->num_msi
= hdev
->num_roce_msi
+ HCLGE_ROCE_VECTOR_OFFSET
;
943 hnae_get_field(__le16_to_cpu(req
->pf_intr_vector_number
),
944 HCLGE_PF_VEC_NUM_M
, HCLGE_PF_VEC_NUM_S
);
950 static int hclge_parse_speed(int speed_cmd
, int *speed
)
954 *speed
= HCLGE_MAC_SPEED_10M
;
957 *speed
= HCLGE_MAC_SPEED_100M
;
960 *speed
= HCLGE_MAC_SPEED_1G
;
963 *speed
= HCLGE_MAC_SPEED_10G
;
966 *speed
= HCLGE_MAC_SPEED_25G
;
969 *speed
= HCLGE_MAC_SPEED_40G
;
972 *speed
= HCLGE_MAC_SPEED_50G
;
975 *speed
= HCLGE_MAC_SPEED_100G
;
984 static void hclge_parse_cfg(struct hclge_cfg
*cfg
, struct hclge_desc
*desc
)
986 struct hclge_cfg_param_cmd
*req
;
987 u64 mac_addr_tmp_high
;
991 req
= (struct hclge_cfg_param_cmd
*)desc
[0].data
;
993 /* get the configuration */
994 cfg
->vmdq_vport_num
= hnae_get_field(__le32_to_cpu(req
->param
[0]),
997 cfg
->tc_num
= hnae_get_field(__le32_to_cpu(req
->param
[0]),
998 HCLGE_CFG_TC_NUM_M
, HCLGE_CFG_TC_NUM_S
);
999 cfg
->tqp_desc_num
= hnae_get_field(__le32_to_cpu(req
->param
[0]),
1000 HCLGE_CFG_TQP_DESC_N_M
,
1001 HCLGE_CFG_TQP_DESC_N_S
);
1003 cfg
->phy_addr
= hnae_get_field(__le32_to_cpu(req
->param
[1]),
1004 HCLGE_CFG_PHY_ADDR_M
,
1005 HCLGE_CFG_PHY_ADDR_S
);
1006 cfg
->media_type
= hnae_get_field(__le32_to_cpu(req
->param
[1]),
1007 HCLGE_CFG_MEDIA_TP_M
,
1008 HCLGE_CFG_MEDIA_TP_S
);
1009 cfg
->rx_buf_len
= hnae_get_field(__le32_to_cpu(req
->param
[1]),
1010 HCLGE_CFG_RX_BUF_LEN_M
,
1011 HCLGE_CFG_RX_BUF_LEN_S
);
1012 /* get mac_address */
1013 mac_addr_tmp
= __le32_to_cpu(req
->param
[2]);
1014 mac_addr_tmp_high
= hnae_get_field(__le32_to_cpu(req
->param
[3]),
1015 HCLGE_CFG_MAC_ADDR_H_M
,
1016 HCLGE_CFG_MAC_ADDR_H_S
);
1018 mac_addr_tmp
|= (mac_addr_tmp_high
<< 31) << 1;
1020 cfg
->default_speed
= hnae_get_field(__le32_to_cpu(req
->param
[3]),
1021 HCLGE_CFG_DEFAULT_SPEED_M
,
1022 HCLGE_CFG_DEFAULT_SPEED_S
);
1023 cfg
->rss_size_max
= hnae_get_field(__le32_to_cpu(req
->param
[3]),
1024 HCLGE_CFG_RSS_SIZE_M
,
1025 HCLGE_CFG_RSS_SIZE_S
);
1027 for (i
= 0; i
< ETH_ALEN
; i
++)
1028 cfg
->mac_addr
[i
] = (mac_addr_tmp
>> (8 * i
)) & 0xff;
1030 req
= (struct hclge_cfg_param_cmd
*)desc
[1].data
;
1031 cfg
->numa_node_map
= __le32_to_cpu(req
->param
[0]);
1034 /* hclge_get_cfg: query the static parameter from flash
1035 * @hdev: pointer to struct hclge_dev
1036 * @hcfg: the config structure to be getted
1038 static int hclge_get_cfg(struct hclge_dev
*hdev
, struct hclge_cfg
*hcfg
)
1040 struct hclge_desc desc
[HCLGE_PF_CFG_DESC_NUM
];
1041 struct hclge_cfg_param_cmd
*req
;
1044 for (i
= 0; i
< HCLGE_PF_CFG_DESC_NUM
; i
++) {
1047 req
= (struct hclge_cfg_param_cmd
*)desc
[i
].data
;
1048 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_GET_CFG_PARAM
,
1050 hnae_set_field(offset
, HCLGE_CFG_OFFSET_M
,
1051 HCLGE_CFG_OFFSET_S
, i
* HCLGE_CFG_RD_LEN_BYTES
);
1052 /* Len should be united by 4 bytes when send to hardware */
1053 hnae_set_field(offset
, HCLGE_CFG_RD_LEN_M
, HCLGE_CFG_RD_LEN_S
,
1054 HCLGE_CFG_RD_LEN_BYTES
/ HCLGE_CFG_RD_LEN_UNIT
);
1055 req
->offset
= cpu_to_le32(offset
);
1058 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_PF_CFG_DESC_NUM
);
1060 dev_err(&hdev
->pdev
->dev
,
1061 "get config failed %d.\n", ret
);
1065 hclge_parse_cfg(hcfg
, desc
);
1069 static int hclge_get_cap(struct hclge_dev
*hdev
)
1073 ret
= hclge_query_function_status(hdev
);
1075 dev_err(&hdev
->pdev
->dev
,
1076 "query function status error %d.\n", ret
);
1080 /* get pf resource */
1081 ret
= hclge_query_pf_resource(hdev
);
1083 dev_err(&hdev
->pdev
->dev
,
1084 "query pf resource error %d.\n", ret
);
1091 static int hclge_configure(struct hclge_dev
*hdev
)
1093 struct hclge_cfg cfg
;
1096 ret
= hclge_get_cfg(hdev
, &cfg
);
1098 dev_err(&hdev
->pdev
->dev
, "get mac mode error %d.\n", ret
);
1102 hdev
->num_vmdq_vport
= cfg
.vmdq_vport_num
;
1103 hdev
->base_tqp_pid
= 0;
1104 hdev
->rss_size_max
= cfg
.rss_size_max
;
1105 hdev
->rx_buf_len
= cfg
.rx_buf_len
;
1106 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, cfg
.mac_addr
);
1107 hdev
->hw
.mac
.media_type
= cfg
.media_type
;
1108 hdev
->hw
.mac
.phy_addr
= cfg
.phy_addr
;
1109 hdev
->num_desc
= cfg
.tqp_desc_num
;
1110 hdev
->tm_info
.num_pg
= 1;
1111 hdev
->tc_max
= cfg
.tc_num
;
1112 hdev
->tm_info
.hw_pfc_map
= 0;
1114 ret
= hclge_parse_speed(cfg
.default_speed
, &hdev
->hw
.mac
.speed
);
1116 dev_err(&hdev
->pdev
->dev
, "Get wrong speed ret=%d.\n", ret
);
1120 if ((hdev
->tc_max
> HNAE3_MAX_TC
) ||
1121 (hdev
->tc_max
< 1)) {
1122 dev_warn(&hdev
->pdev
->dev
, "TC num = %d.\n",
1127 /* Dev does not support DCB */
1128 if (!hnae3_dev_dcb_supported(hdev
)) {
1132 hdev
->pfc_max
= hdev
->tc_max
;
1135 hdev
->tm_info
.num_tc
= hdev
->tc_max
;
1137 /* Currently not support uncontiuous tc */
1138 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++)
1139 hnae_set_bit(hdev
->hw_tc_map
, i
, 1);
1141 hdev
->tx_sch_mode
= HCLGE_FLAG_TC_BASE_SCH_MODE
;
1146 static int hclge_config_tso(struct hclge_dev
*hdev
, int tso_mss_min
,
1149 struct hclge_cfg_tso_status_cmd
*req
;
1150 struct hclge_desc desc
;
1153 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TSO_GENERIC_CONFIG
, false);
1155 req
= (struct hclge_cfg_tso_status_cmd
*)desc
.data
;
1158 hnae_set_field(tso_mss
, HCLGE_TSO_MSS_MIN_M
,
1159 HCLGE_TSO_MSS_MIN_S
, tso_mss_min
);
1160 req
->tso_mss_min
= cpu_to_le16(tso_mss
);
1163 hnae_set_field(tso_mss
, HCLGE_TSO_MSS_MIN_M
,
1164 HCLGE_TSO_MSS_MIN_S
, tso_mss_max
);
1165 req
->tso_mss_max
= cpu_to_le16(tso_mss
);
1167 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1170 static int hclge_alloc_tqps(struct hclge_dev
*hdev
)
1172 struct hclge_tqp
*tqp
;
1175 hdev
->htqp
= devm_kcalloc(&hdev
->pdev
->dev
, hdev
->num_tqps
,
1176 sizeof(struct hclge_tqp
), GFP_KERNEL
);
1182 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
1183 tqp
->dev
= &hdev
->pdev
->dev
;
1186 tqp
->q
.ae_algo
= &ae_algo
;
1187 tqp
->q
.buf_size
= hdev
->rx_buf_len
;
1188 tqp
->q
.desc_num
= hdev
->num_desc
;
1189 tqp
->q
.io_base
= hdev
->hw
.io_base
+ HCLGE_TQP_REG_OFFSET
+
1190 i
* HCLGE_TQP_REG_SIZE
;
1198 static int hclge_map_tqps_to_func(struct hclge_dev
*hdev
, u16 func_id
,
1199 u16 tqp_pid
, u16 tqp_vid
, bool is_pf
)
1201 struct hclge_tqp_map_cmd
*req
;
1202 struct hclge_desc desc
;
1205 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_SET_TQP_MAP
, false);
1207 req
= (struct hclge_tqp_map_cmd
*)desc
.data
;
1208 req
->tqp_id
= cpu_to_le16(tqp_pid
);
1209 req
->tqp_vf
= func_id
;
1210 req
->tqp_flag
= !is_pf
<< HCLGE_TQP_MAP_TYPE_B
|
1211 1 << HCLGE_TQP_MAP_EN_B
;
1212 req
->tqp_vid
= cpu_to_le16(tqp_vid
);
1214 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1216 dev_err(&hdev
->pdev
->dev
, "TQP map failed %d.\n",
1224 static int hclge_assign_tqp(struct hclge_vport
*vport
,
1225 struct hnae3_queue
**tqp
, u16 num_tqps
)
1227 struct hclge_dev
*hdev
= vport
->back
;
1230 for (i
= 0, alloced
= 0; i
< hdev
->num_tqps
&&
1231 alloced
< num_tqps
; i
++) {
1232 if (!hdev
->htqp
[i
].alloced
) {
1233 hdev
->htqp
[i
].q
.handle
= &vport
->nic
;
1234 hdev
->htqp
[i
].q
.tqp_index
= alloced
;
1235 tqp
[alloced
] = &hdev
->htqp
[i
].q
;
1236 hdev
->htqp
[i
].alloced
= true;
1240 vport
->alloc_tqps
= num_tqps
;
1245 static int hclge_knic_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1247 struct hnae3_handle
*nic
= &vport
->nic
;
1248 struct hnae3_knic_private_info
*kinfo
= &nic
->kinfo
;
1249 struct hclge_dev
*hdev
= vport
->back
;
1252 kinfo
->num_desc
= hdev
->num_desc
;
1253 kinfo
->rx_buf_len
= hdev
->rx_buf_len
;
1254 kinfo
->num_tc
= min_t(u16
, num_tqps
, hdev
->tm_info
.num_tc
);
1256 = min_t(u16
, hdev
->rss_size_max
, num_tqps
/ kinfo
->num_tc
);
1257 kinfo
->num_tqps
= kinfo
->rss_size
* kinfo
->num_tc
;
1259 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
1260 if (hdev
->hw_tc_map
& BIT(i
)) {
1261 kinfo
->tc_info
[i
].enable
= true;
1262 kinfo
->tc_info
[i
].tqp_offset
= i
* kinfo
->rss_size
;
1263 kinfo
->tc_info
[i
].tqp_count
= kinfo
->rss_size
;
1264 kinfo
->tc_info
[i
].tc
= i
;
1266 /* Set to default queue if TC is disable */
1267 kinfo
->tc_info
[i
].enable
= false;
1268 kinfo
->tc_info
[i
].tqp_offset
= 0;
1269 kinfo
->tc_info
[i
].tqp_count
= 1;
1270 kinfo
->tc_info
[i
].tc
= 0;
1274 kinfo
->tqp
= devm_kcalloc(&hdev
->pdev
->dev
, kinfo
->num_tqps
,
1275 sizeof(struct hnae3_queue
*), GFP_KERNEL
);
1279 ret
= hclge_assign_tqp(vport
, kinfo
->tqp
, kinfo
->num_tqps
);
1281 dev_err(&hdev
->pdev
->dev
, "fail to assign TQPs %d.\n", ret
);
1288 static int hclge_map_tqp_to_vport(struct hclge_dev
*hdev
,
1289 struct hclge_vport
*vport
)
1291 struct hnae3_handle
*nic
= &vport
->nic
;
1292 struct hnae3_knic_private_info
*kinfo
;
1295 kinfo
= &nic
->kinfo
;
1296 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
1297 struct hclge_tqp
*q
=
1298 container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
1302 is_pf
= !(vport
->vport_id
);
1303 ret
= hclge_map_tqps_to_func(hdev
, vport
->vport_id
, q
->index
,
1312 static int hclge_map_tqp(struct hclge_dev
*hdev
)
1314 struct hclge_vport
*vport
= hdev
->vport
;
1317 num_vport
= hdev
->num_vmdq_vport
+ hdev
->num_req_vfs
+ 1;
1318 for (i
= 0; i
< num_vport
; i
++) {
1321 ret
= hclge_map_tqp_to_vport(hdev
, vport
);
1331 static void hclge_unic_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1333 /* this would be initialized later */
1336 static int hclge_vport_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1338 struct hnae3_handle
*nic
= &vport
->nic
;
1339 struct hclge_dev
*hdev
= vport
->back
;
1342 nic
->pdev
= hdev
->pdev
;
1343 nic
->ae_algo
= &ae_algo
;
1344 nic
->numa_node_mask
= hdev
->numa_node_mask
;
1346 if (hdev
->ae_dev
->dev_type
== HNAE3_DEV_KNIC
) {
1347 ret
= hclge_knic_setup(vport
, num_tqps
);
1349 dev_err(&hdev
->pdev
->dev
, "knic setup failed %d\n",
1354 hclge_unic_setup(vport
, num_tqps
);
1360 static int hclge_alloc_vport(struct hclge_dev
*hdev
)
1362 struct pci_dev
*pdev
= hdev
->pdev
;
1363 struct hclge_vport
*vport
;
1369 /* We need to alloc a vport for main NIC of PF */
1370 num_vport
= hdev
->num_vmdq_vport
+ hdev
->num_req_vfs
+ 1;
1372 if (hdev
->num_tqps
< num_vport
)
1373 num_vport
= hdev
->num_tqps
;
1375 /* Alloc the same number of TQPs for every vport */
1376 tqp_per_vport
= hdev
->num_tqps
/ num_vport
;
1377 tqp_main_vport
= tqp_per_vport
+ hdev
->num_tqps
% num_vport
;
1379 vport
= devm_kcalloc(&pdev
->dev
, num_vport
, sizeof(struct hclge_vport
),
1384 hdev
->vport
= vport
;
1385 hdev
->num_alloc_vport
= num_vport
;
1387 #ifdef CONFIG_PCI_IOV
1389 if (hdev
->num_req_vfs
) {
1390 dev_info(&pdev
->dev
, "active VFs(%d) found, enabling SRIOV\n",
1392 ret
= pci_enable_sriov(hdev
->pdev
, hdev
->num_req_vfs
);
1394 hdev
->num_alloc_vfs
= 0;
1395 dev_err(&pdev
->dev
, "SRIOV enable failed %d\n",
1400 hdev
->num_alloc_vfs
= hdev
->num_req_vfs
;
1403 for (i
= 0; i
< num_vport
; i
++) {
1405 vport
->vport_id
= i
;
1408 ret
= hclge_vport_setup(vport
, tqp_main_vport
);
1410 ret
= hclge_vport_setup(vport
, tqp_per_vport
);
1413 "vport setup failed for vport %d, %d\n",
1424 static int hclge_cmd_alloc_tx_buff(struct hclge_dev
*hdev
,
1425 struct hclge_pkt_buf_alloc
*buf_alloc
)
1427 /* TX buffer size is unit by 128 byte */
1428 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1429 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1430 struct hclge_tx_buff_alloc_cmd
*req
;
1431 struct hclge_desc desc
;
1435 req
= (struct hclge_tx_buff_alloc_cmd
*)desc
.data
;
1437 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TX_BUFF_ALLOC
, 0);
1438 for (i
= 0; i
< HCLGE_TC_NUM
; i
++) {
1439 u32 buf_size
= buf_alloc
->priv_buf
[i
].tx_buf_size
;
1441 req
->tx_pkt_buff
[i
] =
1442 cpu_to_le16((buf_size
>> HCLGE_BUF_SIZE_UNIT_SHIFT
) |
1443 HCLGE_BUF_SIZE_UPDATE_EN_MSK
);
1446 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1448 dev_err(&hdev
->pdev
->dev
, "tx buffer alloc cmd failed %d.\n",
1456 static int hclge_tx_buffer_alloc(struct hclge_dev
*hdev
,
1457 struct hclge_pkt_buf_alloc
*buf_alloc
)
1459 int ret
= hclge_cmd_alloc_tx_buff(hdev
, buf_alloc
);
1462 dev_err(&hdev
->pdev
->dev
,
1463 "tx buffer alloc failed %d\n", ret
);
1470 static int hclge_get_tc_num(struct hclge_dev
*hdev
)
1474 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1475 if (hdev
->hw_tc_map
& BIT(i
))
1480 static int hclge_get_pfc_enalbe_num(struct hclge_dev
*hdev
)
1484 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1485 if (hdev
->hw_tc_map
& BIT(i
) &&
1486 hdev
->tm_info
.hw_pfc_map
& BIT(i
))
1491 /* Get the number of pfc enabled TCs, which have private buffer */
1492 static int hclge_get_pfc_priv_num(struct hclge_dev
*hdev
,
1493 struct hclge_pkt_buf_alloc
*buf_alloc
)
1495 struct hclge_priv_buf
*priv
;
1498 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1499 priv
= &buf_alloc
->priv_buf
[i
];
1500 if ((hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1508 /* Get the number of pfc disabled TCs, which have private buffer */
1509 static int hclge_get_no_pfc_priv_num(struct hclge_dev
*hdev
,
1510 struct hclge_pkt_buf_alloc
*buf_alloc
)
1512 struct hclge_priv_buf
*priv
;
1515 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1516 priv
= &buf_alloc
->priv_buf
[i
];
1517 if (hdev
->hw_tc_map
& BIT(i
) &&
1518 !(hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1526 static u32
hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc
*buf_alloc
)
1528 struct hclge_priv_buf
*priv
;
1532 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1533 priv
= &buf_alloc
->priv_buf
[i
];
1535 rx_priv
+= priv
->buf_size
;
1540 static u32
hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc
*buf_alloc
)
1542 u32 i
, total_tx_size
= 0;
1544 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1545 total_tx_size
+= buf_alloc
->priv_buf
[i
].tx_buf_size
;
1547 return total_tx_size
;
1550 static bool hclge_is_rx_buf_ok(struct hclge_dev
*hdev
,
1551 struct hclge_pkt_buf_alloc
*buf_alloc
,
1554 u32 shared_buf_min
, shared_buf_tc
, shared_std
;
1555 int tc_num
, pfc_enable_num
;
1560 tc_num
= hclge_get_tc_num(hdev
);
1561 pfc_enable_num
= hclge_get_pfc_enalbe_num(hdev
);
1563 if (hnae3_dev_dcb_supported(hdev
))
1564 shared_buf_min
= 2 * hdev
->mps
+ HCLGE_DEFAULT_DV
;
1566 shared_buf_min
= 2 * hdev
->mps
+ HCLGE_DEFAULT_NON_DCB_DV
;
1568 shared_buf_tc
= pfc_enable_num
* hdev
->mps
+
1569 (tc_num
- pfc_enable_num
) * hdev
->mps
/ 2 +
1571 shared_std
= max_t(u32
, shared_buf_min
, shared_buf_tc
);
1573 rx_priv
= hclge_get_rx_priv_buff_alloced(buf_alloc
);
1574 if (rx_all
<= rx_priv
+ shared_std
)
1577 shared_buf
= rx_all
- rx_priv
;
1578 buf_alloc
->s_buf
.buf_size
= shared_buf
;
1579 buf_alloc
->s_buf
.self
.high
= shared_buf
;
1580 buf_alloc
->s_buf
.self
.low
= 2 * hdev
->mps
;
1582 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1583 if ((hdev
->hw_tc_map
& BIT(i
)) &&
1584 (hdev
->tm_info
.hw_pfc_map
& BIT(i
))) {
1585 buf_alloc
->s_buf
.tc_thrd
[i
].low
= hdev
->mps
;
1586 buf_alloc
->s_buf
.tc_thrd
[i
].high
= 2 * hdev
->mps
;
1588 buf_alloc
->s_buf
.tc_thrd
[i
].low
= 0;
1589 buf_alloc
->s_buf
.tc_thrd
[i
].high
= hdev
->mps
;
1596 static int hclge_tx_buffer_calc(struct hclge_dev
*hdev
,
1597 struct hclge_pkt_buf_alloc
*buf_alloc
)
1601 total_size
= hdev
->pkt_buf_size
;
1603 /* alloc tx buffer for all enabled tc */
1604 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1605 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
1607 if (total_size
< HCLGE_DEFAULT_TX_BUF
)
1610 if (hdev
->hw_tc_map
& BIT(i
))
1611 priv
->tx_buf_size
= HCLGE_DEFAULT_TX_BUF
;
1613 priv
->tx_buf_size
= 0;
1615 total_size
-= priv
->tx_buf_size
;
1621 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1622 * @hdev: pointer to struct hclge_dev
1623 * @buf_alloc: pointer to buffer calculation data
1624 * @return: 0: calculate sucessful, negative: fail
1626 static int hclge_rx_buffer_calc(struct hclge_dev
*hdev
,
1627 struct hclge_pkt_buf_alloc
*buf_alloc
)
1629 u32 rx_all
= hdev
->pkt_buf_size
;
1630 int no_pfc_priv_num
, pfc_priv_num
;
1631 struct hclge_priv_buf
*priv
;
1634 rx_all
-= hclge_get_tx_buff_alloced(buf_alloc
);
1636 /* When DCB is not supported, rx private
1637 * buffer is not allocated.
1639 if (!hnae3_dev_dcb_supported(hdev
)) {
1640 if (!hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1646 /* step 1, try to alloc private buffer for all enabled tc */
1647 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1648 priv
= &buf_alloc
->priv_buf
[i
];
1649 if (hdev
->hw_tc_map
& BIT(i
)) {
1651 if (hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1652 priv
->wl
.low
= hdev
->mps
;
1653 priv
->wl
.high
= priv
->wl
.low
+ hdev
->mps
;
1654 priv
->buf_size
= priv
->wl
.high
+
1658 priv
->wl
.high
= 2 * hdev
->mps
;
1659 priv
->buf_size
= priv
->wl
.high
;
1669 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1672 /* step 2, try to decrease the buffer size of
1673 * no pfc TC's private buffer
1675 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1676 priv
= &buf_alloc
->priv_buf
[i
];
1683 if (!(hdev
->hw_tc_map
& BIT(i
)))
1688 if (hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1690 priv
->wl
.high
= priv
->wl
.low
+ hdev
->mps
;
1691 priv
->buf_size
= priv
->wl
.high
+ HCLGE_DEFAULT_DV
;
1694 priv
->wl
.high
= hdev
->mps
;
1695 priv
->buf_size
= priv
->wl
.high
;
1699 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1702 /* step 3, try to reduce the number of pfc disabled TCs,
1703 * which have private buffer
1705 /* get the total no pfc enable TC number, which have private buffer */
1706 no_pfc_priv_num
= hclge_get_no_pfc_priv_num(hdev
, buf_alloc
);
1708 /* let the last to be cleared first */
1709 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
1710 priv
= &buf_alloc
->priv_buf
[i
];
1712 if (hdev
->hw_tc_map
& BIT(i
) &&
1713 !(hdev
->tm_info
.hw_pfc_map
& BIT(i
))) {
1714 /* Clear the no pfc TC private buffer */
1722 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
) ||
1723 no_pfc_priv_num
== 0)
1727 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1730 /* step 4, try to reduce the number of pfc enabled TCs
1731 * which have private buffer.
1733 pfc_priv_num
= hclge_get_pfc_priv_num(hdev
, buf_alloc
);
1735 /* let the last to be cleared first */
1736 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
1737 priv
= &buf_alloc
->priv_buf
[i
];
1739 if (hdev
->hw_tc_map
& BIT(i
) &&
1740 hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1741 /* Reduce the number of pfc TC with private buffer */
1749 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
) ||
1753 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1759 static int hclge_rx_priv_buf_alloc(struct hclge_dev
*hdev
,
1760 struct hclge_pkt_buf_alloc
*buf_alloc
)
1762 struct hclge_rx_priv_buff_cmd
*req
;
1763 struct hclge_desc desc
;
1767 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_PRIV_BUFF_ALLOC
, false);
1768 req
= (struct hclge_rx_priv_buff_cmd
*)desc
.data
;
1770 /* Alloc private buffer TCs */
1771 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1772 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
1775 cpu_to_le16(priv
->buf_size
>> HCLGE_BUF_UNIT_S
);
1777 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B
);
1781 cpu_to_le16((buf_alloc
->s_buf
.buf_size
>> HCLGE_BUF_UNIT_S
) |
1782 (1 << HCLGE_TC0_PRI_BUF_EN_B
));
1784 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1786 dev_err(&hdev
->pdev
->dev
,
1787 "rx private buffer alloc cmd failed %d\n", ret
);
1794 #define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
1796 static int hclge_rx_priv_wl_config(struct hclge_dev
*hdev
,
1797 struct hclge_pkt_buf_alloc
*buf_alloc
)
1799 struct hclge_rx_priv_wl_buf
*req
;
1800 struct hclge_priv_buf
*priv
;
1801 struct hclge_desc desc
[2];
1805 for (i
= 0; i
< 2; i
++) {
1806 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_RX_PRIV_WL_ALLOC
,
1808 req
= (struct hclge_rx_priv_wl_buf
*)desc
[i
].data
;
1810 /* The first descriptor set the NEXT bit to 1 */
1812 desc
[i
].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1814 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1816 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
1817 u32 idx
= i
* HCLGE_TC_NUM_ONE_DESC
+ j
;
1819 priv
= &buf_alloc
->priv_buf
[idx
];
1820 req
->tc_wl
[j
].high
=
1821 cpu_to_le16(priv
->wl
.high
>> HCLGE_BUF_UNIT_S
);
1822 req
->tc_wl
[j
].high
|=
1823 cpu_to_le16(HCLGE_PRIV_ENABLE(priv
->wl
.high
) <<
1824 HCLGE_RX_PRIV_EN_B
);
1826 cpu_to_le16(priv
->wl
.low
>> HCLGE_BUF_UNIT_S
);
1827 req
->tc_wl
[j
].low
|=
1828 cpu_to_le16(HCLGE_PRIV_ENABLE(priv
->wl
.low
) <<
1829 HCLGE_RX_PRIV_EN_B
);
1833 /* Send 2 descriptor at one time */
1834 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
1836 dev_err(&hdev
->pdev
->dev
,
1837 "rx private waterline config cmd failed %d\n",
1844 static int hclge_common_thrd_config(struct hclge_dev
*hdev
,
1845 struct hclge_pkt_buf_alloc
*buf_alloc
)
1847 struct hclge_shared_buf
*s_buf
= &buf_alloc
->s_buf
;
1848 struct hclge_rx_com_thrd
*req
;
1849 struct hclge_desc desc
[2];
1850 struct hclge_tc_thrd
*tc
;
1854 for (i
= 0; i
< 2; i
++) {
1855 hclge_cmd_setup_basic_desc(&desc
[i
],
1856 HCLGE_OPC_RX_COM_THRD_ALLOC
, false);
1857 req
= (struct hclge_rx_com_thrd
*)&desc
[i
].data
;
1859 /* The first descriptor set the NEXT bit to 1 */
1861 desc
[i
].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1863 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1865 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
1866 tc
= &s_buf
->tc_thrd
[i
* HCLGE_TC_NUM_ONE_DESC
+ j
];
1868 req
->com_thrd
[j
].high
=
1869 cpu_to_le16(tc
->high
>> HCLGE_BUF_UNIT_S
);
1870 req
->com_thrd
[j
].high
|=
1871 cpu_to_le16(HCLGE_PRIV_ENABLE(tc
->high
) <<
1872 HCLGE_RX_PRIV_EN_B
);
1873 req
->com_thrd
[j
].low
=
1874 cpu_to_le16(tc
->low
>> HCLGE_BUF_UNIT_S
);
1875 req
->com_thrd
[j
].low
|=
1876 cpu_to_le16(HCLGE_PRIV_ENABLE(tc
->low
) <<
1877 HCLGE_RX_PRIV_EN_B
);
1881 /* Send 2 descriptors at one time */
1882 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
1884 dev_err(&hdev
->pdev
->dev
,
1885 "common threshold config cmd failed %d\n", ret
);
1891 static int hclge_common_wl_config(struct hclge_dev
*hdev
,
1892 struct hclge_pkt_buf_alloc
*buf_alloc
)
1894 struct hclge_shared_buf
*buf
= &buf_alloc
->s_buf
;
1895 struct hclge_rx_com_wl
*req
;
1896 struct hclge_desc desc
;
1899 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_COM_WL_ALLOC
, false);
1901 req
= (struct hclge_rx_com_wl
*)desc
.data
;
1902 req
->com_wl
.high
= cpu_to_le16(buf
->self
.high
>> HCLGE_BUF_UNIT_S
);
1904 cpu_to_le16(HCLGE_PRIV_ENABLE(buf
->self
.high
) <<
1905 HCLGE_RX_PRIV_EN_B
);
1907 req
->com_wl
.low
= cpu_to_le16(buf
->self
.low
>> HCLGE_BUF_UNIT_S
);
1909 cpu_to_le16(HCLGE_PRIV_ENABLE(buf
->self
.low
) <<
1910 HCLGE_RX_PRIV_EN_B
);
1912 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1914 dev_err(&hdev
->pdev
->dev
,
1915 "common waterline config cmd failed %d\n", ret
);
1922 int hclge_buffer_alloc(struct hclge_dev
*hdev
)
1924 struct hclge_pkt_buf_alloc
*pkt_buf
;
1927 pkt_buf
= kzalloc(sizeof(*pkt_buf
), GFP_KERNEL
);
1931 ret
= hclge_tx_buffer_calc(hdev
, pkt_buf
);
1933 dev_err(&hdev
->pdev
->dev
,
1934 "could not calc tx buffer size for all TCs %d\n", ret
);
1938 ret
= hclge_tx_buffer_alloc(hdev
, pkt_buf
);
1940 dev_err(&hdev
->pdev
->dev
,
1941 "could not alloc tx buffers %d\n", ret
);
1945 ret
= hclge_rx_buffer_calc(hdev
, pkt_buf
);
1947 dev_err(&hdev
->pdev
->dev
,
1948 "could not calc rx priv buffer size for all TCs %d\n",
1953 ret
= hclge_rx_priv_buf_alloc(hdev
, pkt_buf
);
1955 dev_err(&hdev
->pdev
->dev
, "could not alloc rx priv buffer %d\n",
1960 if (hnae3_dev_dcb_supported(hdev
)) {
1961 ret
= hclge_rx_priv_wl_config(hdev
, pkt_buf
);
1963 dev_err(&hdev
->pdev
->dev
,
1964 "could not configure rx private waterline %d\n",
1969 ret
= hclge_common_thrd_config(hdev
, pkt_buf
);
1971 dev_err(&hdev
->pdev
->dev
,
1972 "could not configure common threshold %d\n",
1978 ret
= hclge_common_wl_config(hdev
, pkt_buf
);
1980 dev_err(&hdev
->pdev
->dev
,
1981 "could not configure common waterline %d\n", ret
);
1988 static int hclge_init_roce_base_info(struct hclge_vport
*vport
)
1990 struct hnae3_handle
*roce
= &vport
->roce
;
1991 struct hnae3_handle
*nic
= &vport
->nic
;
1993 roce
->rinfo
.num_vectors
= vport
->back
->num_roce_msi
;
1995 if (vport
->back
->num_msi_left
< vport
->roce
.rinfo
.num_vectors
||
1996 vport
->back
->num_msi_left
== 0)
1999 roce
->rinfo
.base_vector
= vport
->back
->roce_base_vector
;
2001 roce
->rinfo
.netdev
= nic
->kinfo
.netdev
;
2002 roce
->rinfo
.roce_io_base
= vport
->back
->hw
.io_base
;
2004 roce
->pdev
= nic
->pdev
;
2005 roce
->ae_algo
= nic
->ae_algo
;
2006 roce
->numa_node_mask
= nic
->numa_node_mask
;
2011 static int hclge_init_msi(struct hclge_dev
*hdev
)
2013 struct pci_dev
*pdev
= hdev
->pdev
;
2017 vectors
= pci_alloc_irq_vectors(pdev
, 1, hdev
->num_msi
,
2018 PCI_IRQ_MSI
| PCI_IRQ_MSIX
);
2021 "failed(%d) to allocate MSI/MSI-X vectors\n",
2025 if (vectors
< hdev
->num_msi
)
2026 dev_warn(&hdev
->pdev
->dev
,
2027 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2028 hdev
->num_msi
, vectors
);
2030 hdev
->num_msi
= vectors
;
2031 hdev
->num_msi_left
= vectors
;
2032 hdev
->base_msi_vector
= pdev
->irq
;
2033 hdev
->roce_base_vector
= hdev
->base_msi_vector
+
2034 HCLGE_ROCE_VECTOR_OFFSET
;
2036 hdev
->vector_status
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
2037 sizeof(u16
), GFP_KERNEL
);
2038 if (!hdev
->vector_status
) {
2039 pci_free_irq_vectors(pdev
);
2043 for (i
= 0; i
< hdev
->num_msi
; i
++)
2044 hdev
->vector_status
[i
] = HCLGE_INVALID_VPORT
;
2046 hdev
->vector_irq
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
2047 sizeof(int), GFP_KERNEL
);
2048 if (!hdev
->vector_irq
) {
2049 pci_free_irq_vectors(pdev
);
2056 static void hclge_check_speed_dup(struct hclge_dev
*hdev
, int duplex
, int speed
)
2058 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2060 if ((speed
== HCLGE_MAC_SPEED_10M
) || (speed
== HCLGE_MAC_SPEED_100M
))
2061 mac
->duplex
= (u8
)duplex
;
2063 mac
->duplex
= HCLGE_MAC_FULL
;
2068 int hclge_cfg_mac_speed_dup(struct hclge_dev
*hdev
, int speed
, u8 duplex
)
2070 struct hclge_config_mac_speed_dup_cmd
*req
;
2071 struct hclge_desc desc
;
2074 req
= (struct hclge_config_mac_speed_dup_cmd
*)desc
.data
;
2076 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_SPEED_DUP
, false);
2078 hnae_set_bit(req
->speed_dup
, HCLGE_CFG_DUPLEX_B
, !!duplex
);
2081 case HCLGE_MAC_SPEED_10M
:
2082 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2083 HCLGE_CFG_SPEED_S
, 6);
2085 case HCLGE_MAC_SPEED_100M
:
2086 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2087 HCLGE_CFG_SPEED_S
, 7);
2089 case HCLGE_MAC_SPEED_1G
:
2090 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2091 HCLGE_CFG_SPEED_S
, 0);
2093 case HCLGE_MAC_SPEED_10G
:
2094 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2095 HCLGE_CFG_SPEED_S
, 1);
2097 case HCLGE_MAC_SPEED_25G
:
2098 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2099 HCLGE_CFG_SPEED_S
, 2);
2101 case HCLGE_MAC_SPEED_40G
:
2102 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2103 HCLGE_CFG_SPEED_S
, 3);
2105 case HCLGE_MAC_SPEED_50G
:
2106 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2107 HCLGE_CFG_SPEED_S
, 4);
2109 case HCLGE_MAC_SPEED_100G
:
2110 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2111 HCLGE_CFG_SPEED_S
, 5);
2114 dev_err(&hdev
->pdev
->dev
, "invalid speed (%d)\n", speed
);
2118 hnae_set_bit(req
->mac_change_fec_en
, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B
,
2121 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2123 dev_err(&hdev
->pdev
->dev
,
2124 "mac speed/duplex config cmd failed %d.\n", ret
);
2128 hclge_check_speed_dup(hdev
, duplex
, speed
);
2133 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle
*handle
, int speed
,
2136 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2137 struct hclge_dev
*hdev
= vport
->back
;
2139 return hclge_cfg_mac_speed_dup(hdev
, speed
, duplex
);
2142 static int hclge_query_mac_an_speed_dup(struct hclge_dev
*hdev
, int *speed
,
2145 struct hclge_query_an_speed_dup_cmd
*req
;
2146 struct hclge_desc desc
;
2150 req
= (struct hclge_query_an_speed_dup_cmd
*)desc
.data
;
2152 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_AN_RESULT
, true);
2153 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2155 dev_err(&hdev
->pdev
->dev
,
2156 "mac speed/autoneg/duplex query cmd failed %d\n",
2161 *duplex
= hnae_get_bit(req
->an_syn_dup_speed
, HCLGE_QUERY_DUPLEX_B
);
2162 speed_tmp
= hnae_get_field(req
->an_syn_dup_speed
, HCLGE_QUERY_SPEED_M
,
2163 HCLGE_QUERY_SPEED_S
);
2165 ret
= hclge_parse_speed(speed_tmp
, speed
);
2167 dev_err(&hdev
->pdev
->dev
,
2168 "could not parse speed(=%d), %d\n", speed_tmp
, ret
);
2175 static int hclge_set_autoneg_en(struct hclge_dev
*hdev
, bool enable
)
2177 struct hclge_config_auto_neg_cmd
*req
;
2178 struct hclge_desc desc
;
2182 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_AN_MODE
, false);
2184 req
= (struct hclge_config_auto_neg_cmd
*)desc
.data
;
2185 hnae_set_bit(flag
, HCLGE_MAC_CFG_AN_EN_B
, !!enable
);
2186 req
->cfg_an_cmd_flag
= cpu_to_le32(flag
);
2188 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2190 dev_err(&hdev
->pdev
->dev
, "auto neg set cmd failed %d.\n",
2198 static int hclge_set_autoneg(struct hnae3_handle
*handle
, bool enable
)
2200 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2201 struct hclge_dev
*hdev
= vport
->back
;
2203 return hclge_set_autoneg_en(hdev
, enable
);
2206 static int hclge_get_autoneg(struct hnae3_handle
*handle
)
2208 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2209 struct hclge_dev
*hdev
= vport
->back
;
2210 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
2213 return phydev
->autoneg
;
2215 return hdev
->hw
.mac
.autoneg
;
2218 static int hclge_set_default_mac_vlan_mask(struct hclge_dev
*hdev
,
2222 struct hclge_mac_vlan_mask_entry_cmd
*req
;
2223 struct hclge_desc desc
;
2226 req
= (struct hclge_mac_vlan_mask_entry_cmd
*)desc
.data
;
2227 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_MASK_SET
, false);
2229 hnae_set_bit(req
->vlan_mask
, HCLGE_VLAN_MASK_EN_B
,
2231 ether_addr_copy(req
->mac_mask
, mac_mask
);
2233 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2235 dev_err(&hdev
->pdev
->dev
,
2236 "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
2242 static int hclge_mac_init(struct hclge_dev
*hdev
)
2244 struct hnae3_handle
*handle
= &hdev
->vport
[0].nic
;
2245 struct net_device
*netdev
= handle
->kinfo
.netdev
;
2246 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2247 u8 mac_mask
[ETH_ALEN
] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
2251 ret
= hclge_cfg_mac_speed_dup(hdev
, hdev
->hw
.mac
.speed
, HCLGE_MAC_FULL
);
2253 dev_err(&hdev
->pdev
->dev
,
2254 "Config mac speed dup fail ret=%d\n", ret
);
2260 /* Initialize the MTA table work mode */
2261 hdev
->accept_mta_mc
= true;
2262 hdev
->enable_mta
= true;
2263 hdev
->mta_mac_sel_type
= HCLGE_MAC_ADDR_47_36
;
2265 ret
= hclge_set_mta_filter_mode(hdev
,
2266 hdev
->mta_mac_sel_type
,
2269 dev_err(&hdev
->pdev
->dev
, "set mta filter mode failed %d\n",
2274 ret
= hclge_cfg_func_mta_filter(hdev
, 0, hdev
->accept_mta_mc
);
2276 dev_err(&hdev
->pdev
->dev
,
2277 "set mta filter mode fail ret=%d\n", ret
);
2281 ret
= hclge_set_default_mac_vlan_mask(hdev
, true, mac_mask
);
2283 dev_err(&hdev
->pdev
->dev
,
2284 "set default mac_vlan_mask fail ret=%d\n", ret
);
2293 ret
= hclge_set_mtu(handle
, mtu
);
2295 dev_err(&hdev
->pdev
->dev
,
2296 "set mtu failed ret=%d\n", ret
);
2303 static void hclge_mbx_task_schedule(struct hclge_dev
*hdev
)
2305 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
))
2306 schedule_work(&hdev
->mbx_service_task
);
2309 static void hclge_reset_task_schedule(struct hclge_dev
*hdev
)
2311 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
))
2312 schedule_work(&hdev
->rst_service_task
);
2315 static void hclge_task_schedule(struct hclge_dev
*hdev
)
2317 if (!test_bit(HCLGE_STATE_DOWN
, &hdev
->state
) &&
2318 !test_bit(HCLGE_STATE_REMOVING
, &hdev
->state
) &&
2319 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
))
2320 (void)schedule_work(&hdev
->service_task
);
2323 static int hclge_get_mac_link_status(struct hclge_dev
*hdev
)
2325 struct hclge_link_status_cmd
*req
;
2326 struct hclge_desc desc
;
2330 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_LINK_STATUS
, true);
2331 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2333 dev_err(&hdev
->pdev
->dev
, "get link status cmd failed %d\n",
2338 req
= (struct hclge_link_status_cmd
*)desc
.data
;
2339 link_status
= req
->status
& HCLGE_LINK_STATUS
;
2341 return !!link_status
;
2344 static int hclge_get_mac_phy_link(struct hclge_dev
*hdev
)
2349 mac_state
= hclge_get_mac_link_status(hdev
);
2351 if (hdev
->hw
.mac
.phydev
) {
2352 if (!genphy_read_status(hdev
->hw
.mac
.phydev
))
2353 link_stat
= mac_state
&
2354 hdev
->hw
.mac
.phydev
->link
;
2359 link_stat
= mac_state
;
2365 static void hclge_update_link_status(struct hclge_dev
*hdev
)
2367 struct hnae3_client
*client
= hdev
->nic_client
;
2368 struct hnae3_handle
*handle
;
2374 state
= hclge_get_mac_phy_link(hdev
);
2375 if (state
!= hdev
->hw
.mac
.link
) {
2376 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
2377 handle
= &hdev
->vport
[i
].nic
;
2378 client
->ops
->link_status_change(handle
, state
);
2380 hdev
->hw
.mac
.link
= state
;
2384 static int hclge_update_speed_duplex(struct hclge_dev
*hdev
)
2386 struct hclge_mac mac
= hdev
->hw
.mac
;
2391 /* get the speed and duplex as autoneg'result from mac cmd when phy
2394 if (mac
.phydev
|| !mac
.autoneg
)
2397 ret
= hclge_query_mac_an_speed_dup(hdev
, &speed
, &duplex
);
2399 dev_err(&hdev
->pdev
->dev
,
2400 "mac autoneg/speed/duplex query failed %d\n", ret
);
2404 if ((mac
.speed
!= speed
) || (mac
.duplex
!= duplex
)) {
2405 ret
= hclge_cfg_mac_speed_dup(hdev
, speed
, duplex
);
2407 dev_err(&hdev
->pdev
->dev
,
2408 "mac speed/duplex config failed %d\n", ret
);
2416 static int hclge_update_speed_duplex_h(struct hnae3_handle
*handle
)
2418 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2419 struct hclge_dev
*hdev
= vport
->back
;
2421 return hclge_update_speed_duplex(hdev
);
2424 static int hclge_get_status(struct hnae3_handle
*handle
)
2426 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2427 struct hclge_dev
*hdev
= vport
->back
;
2429 hclge_update_link_status(hdev
);
2431 return hdev
->hw
.mac
.link
;
2434 static void hclge_service_timer(struct timer_list
*t
)
2436 struct hclge_dev
*hdev
= from_timer(hdev
, t
, service_timer
);
2438 mod_timer(&hdev
->service_timer
, jiffies
+ HZ
);
2439 hdev
->hw_stats
.stats_timer
++;
2440 hclge_task_schedule(hdev
);
2443 static void hclge_service_complete(struct hclge_dev
*hdev
)
2445 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
));
2447 /* Flush memory before next watchdog */
2448 smp_mb__before_atomic();
2449 clear_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
);
2452 static u32
hclge_check_event_cause(struct hclge_dev
*hdev
, u32
*clearval
)
2457 /* fetch the events from their corresponding regs */
2458 rst_src_reg
= hclge_read_dev(&hdev
->hw
, HCLGE_MISC_RESET_STS_REG
);
2459 cmdq_src_reg
= hclge_read_dev(&hdev
->hw
, HCLGE_VECTOR0_CMDQ_SRC_REG
);
2461 /* Assumption: If by any chance reset and mailbox events are reported
2462 * together then we will only process reset event in this go and will
2463 * defer the processing of the mailbox events. Since, we would have not
2464 * cleared RX CMDQ event this time we would receive again another
2465 * interrupt from H/W just for the mailbox.
2468 /* check for vector0 reset event sources */
2469 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
) & rst_src_reg
) {
2470 set_bit(HNAE3_GLOBAL_RESET
, &hdev
->reset_pending
);
2471 *clearval
= BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
);
2472 return HCLGE_VECTOR0_EVENT_RST
;
2475 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B
) & rst_src_reg
) {
2476 set_bit(HNAE3_CORE_RESET
, &hdev
->reset_pending
);
2477 *clearval
= BIT(HCLGE_VECTOR0_CORERESET_INT_B
);
2478 return HCLGE_VECTOR0_EVENT_RST
;
2481 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B
) & rst_src_reg
) {
2482 set_bit(HNAE3_IMP_RESET
, &hdev
->reset_pending
);
2483 *clearval
= BIT(HCLGE_VECTOR0_IMPRESET_INT_B
);
2484 return HCLGE_VECTOR0_EVENT_RST
;
2487 /* check for vector0 mailbox(=CMDQ RX) event source */
2488 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B
) & cmdq_src_reg
) {
2489 cmdq_src_reg
&= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B
);
2490 *clearval
= cmdq_src_reg
;
2491 return HCLGE_VECTOR0_EVENT_MBX
;
2494 return HCLGE_VECTOR0_EVENT_OTHER
;
2497 static void hclge_clear_event_cause(struct hclge_dev
*hdev
, u32 event_type
,
2500 switch (event_type
) {
2501 case HCLGE_VECTOR0_EVENT_RST
:
2502 hclge_write_dev(&hdev
->hw
, HCLGE_MISC_RESET_STS_REG
, regclr
);
2504 case HCLGE_VECTOR0_EVENT_MBX
:
2505 hclge_write_dev(&hdev
->hw
, HCLGE_VECTOR0_CMDQ_SRC_REG
, regclr
);
2510 static void hclge_enable_vector(struct hclge_misc_vector
*vector
, bool enable
)
2512 writel(enable
? 1 : 0, vector
->addr
);
2515 static irqreturn_t
hclge_misc_irq_handle(int irq
, void *data
)
2517 struct hclge_dev
*hdev
= data
;
2521 hclge_enable_vector(&hdev
->misc_vector
, false);
2522 event_cause
= hclge_check_event_cause(hdev
, &clearval
);
2524 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2525 switch (event_cause
) {
2526 case HCLGE_VECTOR0_EVENT_RST
:
2527 hclge_reset_task_schedule(hdev
);
2529 case HCLGE_VECTOR0_EVENT_MBX
:
2530 /* If we are here then,
2531 * 1. Either we are not handling any mbx task and we are not
2534 * 2. We could be handling a mbx task but nothing more is
2536 * In both cases, we should schedule mbx task as there are more
2537 * mbx messages reported by this interrupt.
2539 hclge_mbx_task_schedule(hdev
);
2542 dev_dbg(&hdev
->pdev
->dev
,
2543 "received unknown or unhandled event of vector0\n");
2547 /* we should clear the source of interrupt */
2548 hclge_clear_event_cause(hdev
, event_cause
, clearval
);
2549 hclge_enable_vector(&hdev
->misc_vector
, true);
2554 static void hclge_free_vector(struct hclge_dev
*hdev
, int vector_id
)
2556 hdev
->vector_status
[vector_id
] = HCLGE_INVALID_VPORT
;
2557 hdev
->num_msi_left
+= 1;
2558 hdev
->num_msi_used
-= 1;
2561 static void hclge_get_misc_vector(struct hclge_dev
*hdev
)
2563 struct hclge_misc_vector
*vector
= &hdev
->misc_vector
;
2565 vector
->vector_irq
= pci_irq_vector(hdev
->pdev
, 0);
2567 vector
->addr
= hdev
->hw
.io_base
+ HCLGE_MISC_VECTOR_REG_BASE
;
2568 hdev
->vector_status
[0] = 0;
2570 hdev
->num_msi_left
-= 1;
2571 hdev
->num_msi_used
+= 1;
2574 static int hclge_misc_irq_init(struct hclge_dev
*hdev
)
2578 hclge_get_misc_vector(hdev
);
2580 /* this would be explicitly freed in the end */
2581 ret
= request_irq(hdev
->misc_vector
.vector_irq
, hclge_misc_irq_handle
,
2582 0, "hclge_misc", hdev
);
2584 hclge_free_vector(hdev
, 0);
2585 dev_err(&hdev
->pdev
->dev
, "request misc irq(%d) fail\n",
2586 hdev
->misc_vector
.vector_irq
);
2592 static void hclge_misc_irq_uninit(struct hclge_dev
*hdev
)
2594 free_irq(hdev
->misc_vector
.vector_irq
, hdev
);
2595 hclge_free_vector(hdev
, 0);
2598 static int hclge_notify_client(struct hclge_dev
*hdev
,
2599 enum hnae3_reset_notify_type type
)
2601 struct hnae3_client
*client
= hdev
->nic_client
;
2604 if (!client
->ops
->reset_notify
)
2607 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
2608 struct hnae3_handle
*handle
= &hdev
->vport
[i
].nic
;
2611 ret
= client
->ops
->reset_notify(handle
, type
);
2619 static int hclge_reset_wait(struct hclge_dev
*hdev
)
2621 #define HCLGE_RESET_WATI_MS 100
2622 #define HCLGE_RESET_WAIT_CNT 5
2623 u32 val
, reg
, reg_bit
;
2626 switch (hdev
->reset_type
) {
2627 case HNAE3_GLOBAL_RESET
:
2628 reg
= HCLGE_GLOBAL_RESET_REG
;
2629 reg_bit
= HCLGE_GLOBAL_RESET_BIT
;
2631 case HNAE3_CORE_RESET
:
2632 reg
= HCLGE_GLOBAL_RESET_REG
;
2633 reg_bit
= HCLGE_CORE_RESET_BIT
;
2635 case HNAE3_FUNC_RESET
:
2636 reg
= HCLGE_FUN_RST_ING
;
2637 reg_bit
= HCLGE_FUN_RST_ING_B
;
2640 dev_err(&hdev
->pdev
->dev
,
2641 "Wait for unsupported reset type: %d\n",
2646 val
= hclge_read_dev(&hdev
->hw
, reg
);
2647 while (hnae_get_bit(val
, reg_bit
) && cnt
< HCLGE_RESET_WAIT_CNT
) {
2648 msleep(HCLGE_RESET_WATI_MS
);
2649 val
= hclge_read_dev(&hdev
->hw
, reg
);
2653 if (cnt
>= HCLGE_RESET_WAIT_CNT
) {
2654 dev_warn(&hdev
->pdev
->dev
,
2655 "Wait for reset timeout: %d\n", hdev
->reset_type
);
2662 static int hclge_func_reset_cmd(struct hclge_dev
*hdev
, int func_id
)
2664 struct hclge_desc desc
;
2665 struct hclge_reset_cmd
*req
= (struct hclge_reset_cmd
*)desc
.data
;
2668 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_RST_TRIGGER
, false);
2669 hnae_set_bit(req
->mac_func_reset
, HCLGE_CFG_RESET_MAC_B
, 0);
2670 hnae_set_bit(req
->mac_func_reset
, HCLGE_CFG_RESET_FUNC_B
, 1);
2671 req
->fun_reset_vfid
= func_id
;
2673 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2675 dev_err(&hdev
->pdev
->dev
,
2676 "send function reset cmd fail, status =%d\n", ret
);
2681 static void hclge_do_reset(struct hclge_dev
*hdev
)
2683 struct pci_dev
*pdev
= hdev
->pdev
;
2686 switch (hdev
->reset_type
) {
2687 case HNAE3_GLOBAL_RESET
:
2688 val
= hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
);
2689 hnae_set_bit(val
, HCLGE_GLOBAL_RESET_BIT
, 1);
2690 hclge_write_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
, val
);
2691 dev_info(&pdev
->dev
, "Global Reset requested\n");
2693 case HNAE3_CORE_RESET
:
2694 val
= hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
);
2695 hnae_set_bit(val
, HCLGE_CORE_RESET_BIT
, 1);
2696 hclge_write_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
, val
);
2697 dev_info(&pdev
->dev
, "Core Reset requested\n");
2699 case HNAE3_FUNC_RESET
:
2700 dev_info(&pdev
->dev
, "PF Reset requested\n");
2701 hclge_func_reset_cmd(hdev
, 0);
2702 /* schedule again to check later */
2703 set_bit(HNAE3_FUNC_RESET
, &hdev
->reset_pending
);
2704 hclge_reset_task_schedule(hdev
);
2707 dev_warn(&pdev
->dev
,
2708 "Unsupported reset type: %d\n", hdev
->reset_type
);
2713 static enum hnae3_reset_type
hclge_get_reset_level(struct hclge_dev
*hdev
,
2714 unsigned long *addr
)
2716 enum hnae3_reset_type rst_level
= HNAE3_NONE_RESET
;
2718 /* return the highest priority reset level amongst all */
2719 if (test_bit(HNAE3_GLOBAL_RESET
, addr
))
2720 rst_level
= HNAE3_GLOBAL_RESET
;
2721 else if (test_bit(HNAE3_CORE_RESET
, addr
))
2722 rst_level
= HNAE3_CORE_RESET
;
2723 else if (test_bit(HNAE3_IMP_RESET
, addr
))
2724 rst_level
= HNAE3_IMP_RESET
;
2725 else if (test_bit(HNAE3_FUNC_RESET
, addr
))
2726 rst_level
= HNAE3_FUNC_RESET
;
2728 /* now, clear all other resets */
2729 clear_bit(HNAE3_GLOBAL_RESET
, addr
);
2730 clear_bit(HNAE3_CORE_RESET
, addr
);
2731 clear_bit(HNAE3_IMP_RESET
, addr
);
2732 clear_bit(HNAE3_FUNC_RESET
, addr
);
2737 static void hclge_reset(struct hclge_dev
*hdev
)
2739 /* perform reset of the stack & ae device for a client */
2741 hclge_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
2743 if (!hclge_reset_wait(hdev
)) {
2745 hclge_notify_client(hdev
, HNAE3_UNINIT_CLIENT
);
2746 hclge_reset_ae_dev(hdev
->ae_dev
);
2747 hclge_notify_client(hdev
, HNAE3_INIT_CLIENT
);
2750 /* schedule again to check pending resets later */
2751 set_bit(hdev
->reset_type
, &hdev
->reset_pending
);
2752 hclge_reset_task_schedule(hdev
);
2755 hclge_notify_client(hdev
, HNAE3_UP_CLIENT
);
2758 static void hclge_reset_event(struct hnae3_handle
*handle
,
2759 enum hnae3_reset_type reset
)
2761 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2762 struct hclge_dev
*hdev
= vport
->back
;
2764 dev_info(&hdev
->pdev
->dev
,
2765 "Receive reset event , reset_type is %d", reset
);
2768 case HNAE3_FUNC_RESET
:
2769 case HNAE3_CORE_RESET
:
2770 case HNAE3_GLOBAL_RESET
:
2771 /* request reset & schedule reset task */
2772 set_bit(reset
, &hdev
->reset_request
);
2773 hclge_reset_task_schedule(hdev
);
2776 dev_warn(&hdev
->pdev
->dev
, "Unsupported reset event:%d", reset
);
2781 static void hclge_reset_subtask(struct hclge_dev
*hdev
)
2783 /* check if there is any ongoing reset in the hardware. This status can
2784 * be checked from reset_pending. If there is then, we need to wait for
2785 * hardware to complete reset.
2786 * a. If we are able to figure out in reasonable time that hardware
2787 * has fully resetted then, we can proceed with driver, client
2789 * b. else, we can come back later to check this status so re-sched
2792 hdev
->reset_type
= hclge_get_reset_level(hdev
, &hdev
->reset_pending
);
2793 if (hdev
->reset_type
!= HNAE3_NONE_RESET
)
2796 /* check if we got any *new* reset requests to be honored */
2797 hdev
->reset_type
= hclge_get_reset_level(hdev
, &hdev
->reset_request
);
2798 if (hdev
->reset_type
!= HNAE3_NONE_RESET
)
2799 hclge_do_reset(hdev
);
2801 hdev
->reset_type
= HNAE3_NONE_RESET
;
2804 static void hclge_reset_service_task(struct work_struct
*work
)
2806 struct hclge_dev
*hdev
=
2807 container_of(work
, struct hclge_dev
, rst_service_task
);
2809 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
2812 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
);
2814 hclge_reset_subtask(hdev
);
2816 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
2819 static void hclge_mailbox_service_task(struct work_struct
*work
)
2821 struct hclge_dev
*hdev
=
2822 container_of(work
, struct hclge_dev
, mbx_service_task
);
2824 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
))
2827 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
2829 hclge_mbx_handler(hdev
);
2831 clear_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
);
2834 static void hclge_service_task(struct work_struct
*work
)
2836 struct hclge_dev
*hdev
=
2837 container_of(work
, struct hclge_dev
, service_task
);
2839 if (hdev
->hw_stats
.stats_timer
>= HCLGE_STATS_TIMER_INTERVAL
) {
2840 hclge_update_stats_for_all(hdev
);
2841 hdev
->hw_stats
.stats_timer
= 0;
2844 hclge_update_speed_duplex(hdev
);
2845 hclge_update_link_status(hdev
);
2846 hclge_service_complete(hdev
);
2849 static void hclge_disable_sriov(struct hclge_dev
*hdev
)
2851 /* If our VFs are assigned we cannot shut down SR-IOV
2852 * without causing issues, so just leave the hardware
2853 * available but disabled
2855 if (pci_vfs_assigned(hdev
->pdev
)) {
2856 dev_warn(&hdev
->pdev
->dev
,
2857 "disabling driver while VFs are assigned\n");
2861 pci_disable_sriov(hdev
->pdev
);
2864 struct hclge_vport
*hclge_get_vport(struct hnae3_handle
*handle
)
2866 /* VF handle has no client */
2867 if (!handle
->client
)
2868 return container_of(handle
, struct hclge_vport
, nic
);
2869 else if (handle
->client
->type
== HNAE3_CLIENT_ROCE
)
2870 return container_of(handle
, struct hclge_vport
, roce
);
2872 return container_of(handle
, struct hclge_vport
, nic
);
2875 static int hclge_get_vector(struct hnae3_handle
*handle
, u16 vector_num
,
2876 struct hnae3_vector_info
*vector_info
)
2878 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2879 struct hnae3_vector_info
*vector
= vector_info
;
2880 struct hclge_dev
*hdev
= vport
->back
;
2884 vector_num
= min(hdev
->num_msi_left
, vector_num
);
2886 for (j
= 0; j
< vector_num
; j
++) {
2887 for (i
= 1; i
< hdev
->num_msi
; i
++) {
2888 if (hdev
->vector_status
[i
] == HCLGE_INVALID_VPORT
) {
2889 vector
->vector
= pci_irq_vector(hdev
->pdev
, i
);
2890 vector
->io_addr
= hdev
->hw
.io_base
+
2891 HCLGE_VECTOR_REG_BASE
+
2892 (i
- 1) * HCLGE_VECTOR_REG_OFFSET
+
2894 HCLGE_VECTOR_VF_OFFSET
;
2895 hdev
->vector_status
[i
] = vport
->vport_id
;
2896 hdev
->vector_irq
[i
] = vector
->vector
;
2905 hdev
->num_msi_left
-= alloc
;
2906 hdev
->num_msi_used
+= alloc
;
2911 static int hclge_get_vector_index(struct hclge_dev
*hdev
, int vector
)
2915 for (i
= 0; i
< hdev
->num_msi
; i
++)
2916 if (vector
== hdev
->vector_irq
[i
])
2922 static u32
hclge_get_rss_key_size(struct hnae3_handle
*handle
)
2924 return HCLGE_RSS_KEY_SIZE
;
2927 static u32
hclge_get_rss_indir_size(struct hnae3_handle
*handle
)
2929 return HCLGE_RSS_IND_TBL_SIZE
;
2932 static int hclge_get_rss_algo(struct hclge_dev
*hdev
)
2934 struct hclge_rss_config_cmd
*req
;
2935 struct hclge_desc desc
;
2939 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_GENERIC_CONFIG
, true);
2941 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2943 dev_err(&hdev
->pdev
->dev
,
2944 "Get link status error, status =%d\n", ret
);
2948 req
= (struct hclge_rss_config_cmd
*)desc
.data
;
2949 rss_hash_algo
= (req
->hash_config
& HCLGE_RSS_HASH_ALGO_MASK
);
2951 if (rss_hash_algo
== HCLGE_RSS_HASH_ALGO_TOEPLITZ
)
2952 return ETH_RSS_HASH_TOP
;
2957 static int hclge_set_rss_algo_key(struct hclge_dev
*hdev
,
2958 const u8 hfunc
, const u8
*key
)
2960 struct hclge_rss_config_cmd
*req
;
2961 struct hclge_desc desc
;
2966 req
= (struct hclge_rss_config_cmd
*)desc
.data
;
2968 for (key_offset
= 0; key_offset
< 3; key_offset
++) {
2969 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_GENERIC_CONFIG
,
2972 req
->hash_config
|= (hfunc
& HCLGE_RSS_HASH_ALGO_MASK
);
2973 req
->hash_config
|= (key_offset
<< HCLGE_RSS_HASH_KEY_OFFSET_B
);
2975 if (key_offset
== 2)
2977 HCLGE_RSS_KEY_SIZE
- HCLGE_RSS_HASH_KEY_NUM
* 2;
2979 key_size
= HCLGE_RSS_HASH_KEY_NUM
;
2981 memcpy(req
->hash_key
,
2982 key
+ key_offset
* HCLGE_RSS_HASH_KEY_NUM
, key_size
);
2984 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2986 dev_err(&hdev
->pdev
->dev
,
2987 "Configure RSS config fail, status = %d\n",
2995 static int hclge_set_rss_indir_table(struct hclge_dev
*hdev
, const u32
*indir
)
2997 struct hclge_rss_indirection_table_cmd
*req
;
2998 struct hclge_desc desc
;
3002 req
= (struct hclge_rss_indirection_table_cmd
*)desc
.data
;
3004 for (i
= 0; i
< HCLGE_RSS_CFG_TBL_NUM
; i
++) {
3005 hclge_cmd_setup_basic_desc
3006 (&desc
, HCLGE_OPC_RSS_INDIR_TABLE
, false);
3008 req
->start_table_index
=
3009 cpu_to_le16(i
* HCLGE_RSS_CFG_TBL_SIZE
);
3010 req
->rss_set_bitmap
= cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK
);
3012 for (j
= 0; j
< HCLGE_RSS_CFG_TBL_SIZE
; j
++)
3013 req
->rss_result
[j
] =
3014 indir
[i
* HCLGE_RSS_CFG_TBL_SIZE
+ j
];
3016 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3018 dev_err(&hdev
->pdev
->dev
,
3019 "Configure rss indir table fail,status = %d\n",
3027 static int hclge_set_rss_tc_mode(struct hclge_dev
*hdev
, u16
*tc_valid
,
3028 u16
*tc_size
, u16
*tc_offset
)
3030 struct hclge_rss_tc_mode_cmd
*req
;
3031 struct hclge_desc desc
;
3035 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_TC_MODE
, false);
3036 req
= (struct hclge_rss_tc_mode_cmd
*)desc
.data
;
3038 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
3041 hnae_set_bit(mode
, HCLGE_RSS_TC_VALID_B
, (tc_valid
[i
] & 0x1));
3042 hnae_set_field(mode
, HCLGE_RSS_TC_SIZE_M
,
3043 HCLGE_RSS_TC_SIZE_S
, tc_size
[i
]);
3044 hnae_set_field(mode
, HCLGE_RSS_TC_OFFSET_M
,
3045 HCLGE_RSS_TC_OFFSET_S
, tc_offset
[i
]);
3047 req
->rss_tc_mode
[i
] = cpu_to_le16(mode
);
3050 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3052 dev_err(&hdev
->pdev
->dev
,
3053 "Configure rss tc mode fail, status = %d\n", ret
);
3060 static int hclge_set_rss_input_tuple(struct hclge_dev
*hdev
)
3062 struct hclge_rss_input_tuple_cmd
*req
;
3063 struct hclge_desc desc
;
3066 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_INPUT_TUPLE
, false);
3068 req
= (struct hclge_rss_input_tuple_cmd
*)desc
.data
;
3069 req
->ipv4_tcp_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3070 req
->ipv4_udp_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3071 req
->ipv4_sctp_en
= HCLGE_RSS_INPUT_TUPLE_SCTP
;
3072 req
->ipv4_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3073 req
->ipv6_tcp_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3074 req
->ipv6_udp_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3075 req
->ipv6_sctp_en
= HCLGE_RSS_INPUT_TUPLE_SCTP
;
3076 req
->ipv6_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3077 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3079 dev_err(&hdev
->pdev
->dev
,
3080 "Configure rss input fail, status = %d\n", ret
);
3087 static int hclge_get_rss(struct hnae3_handle
*handle
, u32
*indir
,
3090 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3091 struct hclge_dev
*hdev
= vport
->back
;
3094 /* Get hash algorithm */
3096 *hfunc
= hclge_get_rss_algo(hdev
);
3098 /* Get the RSS Key required by the user */
3100 memcpy(key
, vport
->rss_hash_key
, HCLGE_RSS_KEY_SIZE
);
3102 /* Get indirect table */
3104 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
3105 indir
[i
] = vport
->rss_indirection_tbl
[i
];
3110 static int hclge_set_rss(struct hnae3_handle
*handle
, const u32
*indir
,
3111 const u8
*key
, const u8 hfunc
)
3113 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3114 struct hclge_dev
*hdev
= vport
->back
;
3118 /* Set the RSS Hash Key if specififed by the user */
3120 /* Update the shadow RSS key with user specified qids */
3121 memcpy(vport
->rss_hash_key
, key
, HCLGE_RSS_KEY_SIZE
);
3123 if (hfunc
== ETH_RSS_HASH_TOP
||
3124 hfunc
== ETH_RSS_HASH_NO_CHANGE
)
3125 hash_algo
= HCLGE_RSS_HASH_ALGO_TOEPLITZ
;
3128 ret
= hclge_set_rss_algo_key(hdev
, hash_algo
, key
);
3133 /* Update the shadow RSS table with user specified qids */
3134 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
3135 vport
->rss_indirection_tbl
[i
] = indir
[i
];
3137 /* Update the hardware */
3138 ret
= hclge_set_rss_indir_table(hdev
, indir
);
3142 static u8
hclge_get_rss_hash_bits(struct ethtool_rxnfc
*nfc
)
3144 u8 hash_sets
= nfc
->data
& RXH_L4_B_0_1
? HCLGE_S_PORT_BIT
: 0;
3146 if (nfc
->data
& RXH_L4_B_2_3
)
3147 hash_sets
|= HCLGE_D_PORT_BIT
;
3149 hash_sets
&= ~HCLGE_D_PORT_BIT
;
3151 if (nfc
->data
& RXH_IP_SRC
)
3152 hash_sets
|= HCLGE_S_IP_BIT
;
3154 hash_sets
&= ~HCLGE_S_IP_BIT
;
3156 if (nfc
->data
& RXH_IP_DST
)
3157 hash_sets
|= HCLGE_D_IP_BIT
;
3159 hash_sets
&= ~HCLGE_D_IP_BIT
;
3161 if (nfc
->flow_type
== SCTP_V4_FLOW
|| nfc
->flow_type
== SCTP_V6_FLOW
)
3162 hash_sets
|= HCLGE_V_TAG_BIT
;
3167 static int hclge_set_rss_tuple(struct hnae3_handle
*handle
,
3168 struct ethtool_rxnfc
*nfc
)
3170 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3171 struct hclge_dev
*hdev
= vport
->back
;
3172 struct hclge_rss_input_tuple_cmd
*req
;
3173 struct hclge_desc desc
;
3177 if (nfc
->data
& ~(RXH_IP_SRC
| RXH_IP_DST
|
3178 RXH_L4_B_0_1
| RXH_L4_B_2_3
))
3181 req
= (struct hclge_rss_input_tuple_cmd
*)desc
.data
;
3182 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_INPUT_TUPLE
, true);
3183 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3185 dev_err(&hdev
->pdev
->dev
,
3186 "Read rss tuple fail, status = %d\n", ret
);
3190 hclge_cmd_reuse_desc(&desc
, false);
3192 tuple_sets
= hclge_get_rss_hash_bits(nfc
);
3193 switch (nfc
->flow_type
) {
3195 req
->ipv4_tcp_en
= tuple_sets
;
3198 req
->ipv6_tcp_en
= tuple_sets
;
3201 req
->ipv4_udp_en
= tuple_sets
;
3204 req
->ipv6_udp_en
= tuple_sets
;
3207 req
->ipv4_sctp_en
= tuple_sets
;
3210 if ((nfc
->data
& RXH_L4_B_0_1
) ||
3211 (nfc
->data
& RXH_L4_B_2_3
))
3214 req
->ipv6_sctp_en
= tuple_sets
;
3217 req
->ipv4_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3220 req
->ipv6_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3226 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3228 dev_err(&hdev
->pdev
->dev
,
3229 "Set rss tuple fail, status = %d\n", ret
);
3234 static int hclge_get_rss_tuple(struct hnae3_handle
*handle
,
3235 struct ethtool_rxnfc
*nfc
)
3237 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3238 struct hclge_dev
*hdev
= vport
->back
;
3239 struct hclge_rss_input_tuple_cmd
*req
;
3240 struct hclge_desc desc
;
3246 req
= (struct hclge_rss_input_tuple_cmd
*)desc
.data
;
3247 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_INPUT_TUPLE
, true);
3248 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3250 dev_err(&hdev
->pdev
->dev
,
3251 "Read rss tuple fail, status = %d\n", ret
);
3255 switch (nfc
->flow_type
) {
3257 tuple_sets
= req
->ipv4_tcp_en
;
3260 tuple_sets
= req
->ipv4_udp_en
;
3263 tuple_sets
= req
->ipv6_tcp_en
;
3266 tuple_sets
= req
->ipv6_udp_en
;
3269 tuple_sets
= req
->ipv4_sctp_en
;
3272 tuple_sets
= req
->ipv6_sctp_en
;
3276 tuple_sets
= HCLGE_S_IP_BIT
| HCLGE_D_IP_BIT
;
3285 if (tuple_sets
& HCLGE_D_PORT_BIT
)
3286 nfc
->data
|= RXH_L4_B_2_3
;
3287 if (tuple_sets
& HCLGE_S_PORT_BIT
)
3288 nfc
->data
|= RXH_L4_B_0_1
;
3289 if (tuple_sets
& HCLGE_D_IP_BIT
)
3290 nfc
->data
|= RXH_IP_DST
;
3291 if (tuple_sets
& HCLGE_S_IP_BIT
)
3292 nfc
->data
|= RXH_IP_SRC
;
3297 static int hclge_get_tc_size(struct hnae3_handle
*handle
)
3299 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3300 struct hclge_dev
*hdev
= vport
->back
;
3302 return hdev
->rss_size_max
;
3305 int hclge_rss_init_hw(struct hclge_dev
*hdev
)
3307 const u8 hfunc
= HCLGE_RSS_HASH_ALGO_TOEPLITZ
;
3308 struct hclge_vport
*vport
= hdev
->vport
;
3309 u16 tc_offset
[HCLGE_MAX_TC_NUM
];
3310 u8 rss_key
[HCLGE_RSS_KEY_SIZE
];
3311 u16 tc_valid
[HCLGE_MAX_TC_NUM
];
3312 u16 tc_size
[HCLGE_MAX_TC_NUM
];
3313 u32
*rss_indir
= NULL
;
3314 u16 rss_size
= 0, roundup_size
;
3318 rss_indir
= kcalloc(HCLGE_RSS_IND_TBL_SIZE
, sizeof(u32
), GFP_KERNEL
);
3322 /* Get default RSS key */
3323 netdev_rss_key_fill(rss_key
, HCLGE_RSS_KEY_SIZE
);
3325 /* Initialize RSS indirect table for each vport */
3326 for (j
= 0; j
< hdev
->num_vmdq_vport
+ 1; j
++) {
3327 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++) {
3328 vport
[j
].rss_indirection_tbl
[i
] =
3329 i
% vport
[j
].alloc_rss_size
;
3331 /* vport 0 is for PF */
3335 rss_size
= vport
[j
].alloc_rss_size
;
3336 rss_indir
[i
] = vport
[j
].rss_indirection_tbl
[i
];
3339 ret
= hclge_set_rss_indir_table(hdev
, rss_indir
);
3344 ret
= hclge_set_rss_algo_key(hdev
, hfunc
, key
);
3348 ret
= hclge_set_rss_input_tuple(hdev
);
3352 /* Each TC have the same queue size, and tc_size set to hardware is
3353 * the log2 of roundup power of two of rss_size, the acutal queue
3354 * size is limited by indirection table.
3356 if (rss_size
> HCLGE_RSS_TC_SIZE_7
|| rss_size
== 0) {
3357 dev_err(&hdev
->pdev
->dev
,
3358 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3364 roundup_size
= roundup_pow_of_two(rss_size
);
3365 roundup_size
= ilog2(roundup_size
);
3367 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
3370 if (!(hdev
->hw_tc_map
& BIT(i
)))
3374 tc_size
[i
] = roundup_size
;
3375 tc_offset
[i
] = rss_size
* i
;
3378 ret
= hclge_set_rss_tc_mode(hdev
, tc_valid
, tc_size
, tc_offset
);
3386 int hclge_bind_ring_with_vector(struct hclge_vport
*vport
,
3387 int vector_id
, bool en
,
3388 struct hnae3_ring_chain_node
*ring_chain
)
3390 struct hclge_dev
*hdev
= vport
->back
;
3391 struct hnae3_ring_chain_node
*node
;
3392 struct hclge_desc desc
;
3393 struct hclge_ctrl_vector_chain_cmd
*req
3394 = (struct hclge_ctrl_vector_chain_cmd
*)desc
.data
;
3395 enum hclge_cmd_status status
;
3396 enum hclge_opcode_type op
;
3397 u16 tqp_type_and_id
;
3400 op
= en
? HCLGE_OPC_ADD_RING_TO_VECTOR
: HCLGE_OPC_DEL_RING_TO_VECTOR
;
3401 hclge_cmd_setup_basic_desc(&desc
, op
, false);
3402 req
->int_vector_id
= vector_id
;
3405 for (node
= ring_chain
; node
; node
= node
->next
) {
3406 tqp_type_and_id
= le16_to_cpu(req
->tqp_type_and_id
[i
]);
3407 hnae_set_field(tqp_type_and_id
, HCLGE_INT_TYPE_M
,
3409 hnae_get_bit(node
->flag
, HNAE3_RING_TYPE_B
));
3410 hnae_set_field(tqp_type_and_id
, HCLGE_TQP_ID_M
,
3411 HCLGE_TQP_ID_S
, node
->tqp_index
);
3412 req
->tqp_type_and_id
[i
] = cpu_to_le16(tqp_type_and_id
);
3413 if (++i
>= HCLGE_VECTOR_ELEMENTS_PER_CMD
) {
3414 req
->int_cause_num
= HCLGE_VECTOR_ELEMENTS_PER_CMD
;
3415 req
->vfid
= vport
->vport_id
;
3417 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3419 dev_err(&hdev
->pdev
->dev
,
3420 "Map TQP fail, status is %d.\n",
3426 hclge_cmd_setup_basic_desc(&desc
,
3429 req
->int_vector_id
= vector_id
;
3434 req
->int_cause_num
= i
;
3435 req
->vfid
= vport
->vport_id
;
3436 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3438 dev_err(&hdev
->pdev
->dev
,
3439 "Map TQP fail, status is %d.\n", status
);
3447 static int hclge_map_ring_to_vector(struct hnae3_handle
*handle
,
3449 struct hnae3_ring_chain_node
*ring_chain
)
3451 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3452 struct hclge_dev
*hdev
= vport
->back
;
3455 vector_id
= hclge_get_vector_index(hdev
, vector
);
3456 if (vector_id
< 0) {
3457 dev_err(&hdev
->pdev
->dev
,
3458 "Get vector index fail. vector_id =%d\n", vector_id
);
3462 return hclge_bind_ring_with_vector(vport
, vector_id
, true, ring_chain
);
3465 static int hclge_unmap_ring_frm_vector(struct hnae3_handle
*handle
,
3467 struct hnae3_ring_chain_node
*ring_chain
)
3469 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3470 struct hclge_dev
*hdev
= vport
->back
;
3473 vector_id
= hclge_get_vector_index(hdev
, vector
);
3474 if (vector_id
< 0) {
3475 dev_err(&handle
->pdev
->dev
,
3476 "Get vector index fail. ret =%d\n", vector_id
);
3480 ret
= hclge_bind_ring_with_vector(vport
, vector_id
, false, ring_chain
);
3482 dev_err(&handle
->pdev
->dev
,
3483 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3489 /* Free this MSIX or MSI vector */
3490 hclge_free_vector(hdev
, vector_id
);
3495 int hclge_cmd_set_promisc_mode(struct hclge_dev
*hdev
,
3496 struct hclge_promisc_param
*param
)
3498 struct hclge_promisc_cfg_cmd
*req
;
3499 struct hclge_desc desc
;
3502 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_PROMISC_MODE
, false);
3504 req
= (struct hclge_promisc_cfg_cmd
*)desc
.data
;
3505 req
->vf_id
= param
->vf_id
;
3506 req
->flag
= (param
->enable
<< HCLGE_PROMISC_EN_B
);
3508 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3510 dev_err(&hdev
->pdev
->dev
,
3511 "Set promisc mode fail, status is %d.\n", ret
);
3517 void hclge_promisc_param_init(struct hclge_promisc_param
*param
, bool en_uc
,
3518 bool en_mc
, bool en_bc
, int vport_id
)
3523 memset(param
, 0, sizeof(struct hclge_promisc_param
));
3525 param
->enable
= HCLGE_PROMISC_EN_UC
;
3527 param
->enable
|= HCLGE_PROMISC_EN_MC
;
3529 param
->enable
|= HCLGE_PROMISC_EN_BC
;
3530 param
->vf_id
= vport_id
;
3533 static void hclge_set_promisc_mode(struct hnae3_handle
*handle
, u32 en
)
3535 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3536 struct hclge_dev
*hdev
= vport
->back
;
3537 struct hclge_promisc_param param
;
3539 hclge_promisc_param_init(¶m
, en
, en
, true, vport
->vport_id
);
3540 hclge_cmd_set_promisc_mode(hdev
, ¶m
);
3543 static void hclge_cfg_mac_mode(struct hclge_dev
*hdev
, bool enable
)
3545 struct hclge_desc desc
;
3546 struct hclge_config_mac_mode_cmd
*req
=
3547 (struct hclge_config_mac_mode_cmd
*)desc
.data
;
3551 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAC_MODE
, false);
3552 hnae_set_bit(loop_en
, HCLGE_MAC_TX_EN_B
, enable
);
3553 hnae_set_bit(loop_en
, HCLGE_MAC_RX_EN_B
, enable
);
3554 hnae_set_bit(loop_en
, HCLGE_MAC_PAD_TX_B
, enable
);
3555 hnae_set_bit(loop_en
, HCLGE_MAC_PAD_RX_B
, enable
);
3556 hnae_set_bit(loop_en
, HCLGE_MAC_1588_TX_B
, 0);
3557 hnae_set_bit(loop_en
, HCLGE_MAC_1588_RX_B
, 0);
3558 hnae_set_bit(loop_en
, HCLGE_MAC_APP_LP_B
, 0);
3559 hnae_set_bit(loop_en
, HCLGE_MAC_LINE_LP_B
, 0);
3560 hnae_set_bit(loop_en
, HCLGE_MAC_FCS_TX_B
, enable
);
3561 hnae_set_bit(loop_en
, HCLGE_MAC_RX_FCS_B
, enable
);
3562 hnae_set_bit(loop_en
, HCLGE_MAC_RX_FCS_STRIP_B
, enable
);
3563 hnae_set_bit(loop_en
, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B
, enable
);
3564 hnae_set_bit(loop_en
, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B
, enable
);
3565 hnae_set_bit(loop_en
, HCLGE_MAC_TX_UNDER_MIN_ERR_B
, enable
);
3566 req
->txrx_pad_fcs_loop_en
= cpu_to_le32(loop_en
);
3568 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3570 dev_err(&hdev
->pdev
->dev
,
3571 "mac enable fail, ret =%d.\n", ret
);
3574 static int hclge_set_loopback(struct hnae3_handle
*handle
,
3575 enum hnae3_loop loop_mode
, bool en
)
3577 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3578 struct hclge_config_mac_mode_cmd
*req
;
3579 struct hclge_dev
*hdev
= vport
->back
;
3580 struct hclge_desc desc
;
3584 switch (loop_mode
) {
3585 case HNAE3_MAC_INTER_LOOP_MAC
:
3586 req
= (struct hclge_config_mac_mode_cmd
*)&desc
.data
[0];
3587 /* 1 Read out the MAC mode config at first */
3588 hclge_cmd_setup_basic_desc(&desc
,
3589 HCLGE_OPC_CONFIG_MAC_MODE
,
3591 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3593 dev_err(&hdev
->pdev
->dev
,
3594 "mac loopback get fail, ret =%d.\n",
3599 /* 2 Then setup the loopback flag */
3600 loop_en
= le32_to_cpu(req
->txrx_pad_fcs_loop_en
);
3602 hnae_set_bit(loop_en
, HCLGE_MAC_APP_LP_B
, 1);
3604 hnae_set_bit(loop_en
, HCLGE_MAC_APP_LP_B
, 0);
3606 req
->txrx_pad_fcs_loop_en
= cpu_to_le32(loop_en
);
3608 /* 3 Config mac work mode with loopback flag
3609 * and its original configure parameters
3611 hclge_cmd_reuse_desc(&desc
, false);
3612 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3614 dev_err(&hdev
->pdev
->dev
,
3615 "mac loopback set fail, ret =%d.\n", ret
);
3619 dev_err(&hdev
->pdev
->dev
,
3620 "loop_mode %d is not supported\n", loop_mode
);
3627 static int hclge_tqp_enable(struct hclge_dev
*hdev
, int tqp_id
,
3628 int stream_id
, bool enable
)
3630 struct hclge_desc desc
;
3631 struct hclge_cfg_com_tqp_queue_cmd
*req
=
3632 (struct hclge_cfg_com_tqp_queue_cmd
*)desc
.data
;
3635 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_COM_TQP_QUEUE
, false);
3636 req
->tqp_id
= cpu_to_le16(tqp_id
& HCLGE_RING_ID_MASK
);
3637 req
->stream_id
= cpu_to_le16(stream_id
);
3638 req
->enable
|= enable
<< HCLGE_TQP_ENABLE_B
;
3640 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3642 dev_err(&hdev
->pdev
->dev
,
3643 "Tqp enable fail, status =%d.\n", ret
);
3647 static void hclge_reset_tqp_stats(struct hnae3_handle
*handle
)
3649 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3650 struct hnae3_queue
*queue
;
3651 struct hclge_tqp
*tqp
;
3654 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
3655 queue
= handle
->kinfo
.tqp
[i
];
3656 tqp
= container_of(queue
, struct hclge_tqp
, q
);
3657 memset(&tqp
->tqp_stats
, 0, sizeof(tqp
->tqp_stats
));
3661 static int hclge_ae_start(struct hnae3_handle
*handle
)
3663 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3664 struct hclge_dev
*hdev
= vport
->back
;
3665 int i
, queue_id
, ret
;
3667 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
3668 /* todo clear interrupt */
3670 queue_id
= hclge_get_queue_id(handle
->kinfo
.tqp
[i
]);
3672 dev_warn(&hdev
->pdev
->dev
,
3673 "Get invalid queue id, ignore it\n");
3677 hclge_tqp_enable(hdev
, queue_id
, 0, true);
3680 hclge_cfg_mac_mode(hdev
, true);
3681 clear_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
3682 mod_timer(&hdev
->service_timer
, jiffies
+ HZ
);
3684 ret
= hclge_mac_start_phy(hdev
);
3688 /* reset tqp stats */
3689 hclge_reset_tqp_stats(handle
);
3694 static void hclge_ae_stop(struct hnae3_handle
*handle
)
3696 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3697 struct hclge_dev
*hdev
= vport
->back
;
3700 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
3702 queue_id
= hclge_get_queue_id(handle
->kinfo
.tqp
[i
]);
3704 dev_warn(&hdev
->pdev
->dev
,
3705 "Get invalid queue id, ignore it\n");
3709 hclge_tqp_enable(hdev
, queue_id
, 0, false);
3712 hclge_cfg_mac_mode(hdev
, false);
3714 hclge_mac_stop_phy(hdev
);
3716 /* reset tqp stats */
3717 hclge_reset_tqp_stats(handle
);
3720 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport
*vport
,
3721 u16 cmdq_resp
, u8 resp_code
,
3722 enum hclge_mac_vlan_tbl_opcode op
)
3724 struct hclge_dev
*hdev
= vport
->back
;
3725 int return_status
= -EIO
;
3728 dev_err(&hdev
->pdev
->dev
,
3729 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
3734 if (op
== HCLGE_MAC_VLAN_ADD
) {
3735 if ((!resp_code
) || (resp_code
== 1)) {
3737 } else if (resp_code
== 2) {
3738 return_status
= -EIO
;
3739 dev_err(&hdev
->pdev
->dev
,
3740 "add mac addr failed for uc_overflow.\n");
3741 } else if (resp_code
== 3) {
3742 return_status
= -EIO
;
3743 dev_err(&hdev
->pdev
->dev
,
3744 "add mac addr failed for mc_overflow.\n");
3746 dev_err(&hdev
->pdev
->dev
,
3747 "add mac addr failed for undefined, code=%d.\n",
3750 } else if (op
== HCLGE_MAC_VLAN_REMOVE
) {
3753 } else if (resp_code
== 1) {
3754 return_status
= -EIO
;
3755 dev_dbg(&hdev
->pdev
->dev
,
3756 "remove mac addr failed for miss.\n");
3758 dev_err(&hdev
->pdev
->dev
,
3759 "remove mac addr failed for undefined, code=%d.\n",
3762 } else if (op
== HCLGE_MAC_VLAN_LKUP
) {
3765 } else if (resp_code
== 1) {
3766 return_status
= -EIO
;
3767 dev_dbg(&hdev
->pdev
->dev
,
3768 "lookup mac addr failed for miss.\n");
3770 dev_err(&hdev
->pdev
->dev
,
3771 "lookup mac addr failed for undefined, code=%d.\n",
3775 return_status
= -EIO
;
3776 dev_err(&hdev
->pdev
->dev
,
3777 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
3781 return return_status
;
3784 static int hclge_update_desc_vfid(struct hclge_desc
*desc
, int vfid
, bool clr
)
3789 if (vfid
> 255 || vfid
< 0)
3792 if (vfid
>= 0 && vfid
<= 191) {
3793 word_num
= vfid
/ 32;
3794 bit_num
= vfid
% 32;
3796 desc
[1].data
[word_num
] &= cpu_to_le32(~(1 << bit_num
));
3798 desc
[1].data
[word_num
] |= cpu_to_le32(1 << bit_num
);
3800 word_num
= (vfid
- 192) / 32;
3801 bit_num
= vfid
% 32;
3803 desc
[2].data
[word_num
] &= cpu_to_le32(~(1 << bit_num
));
3805 desc
[2].data
[word_num
] |= cpu_to_le32(1 << bit_num
);
3811 static bool hclge_is_all_function_id_zero(struct hclge_desc
*desc
)
3813 #define HCLGE_DESC_NUMBER 3
3814 #define HCLGE_FUNC_NUMBER_PER_DESC 6
3817 for (i
= 0; i
< HCLGE_DESC_NUMBER
; i
++)
3818 for (j
= 0; j
< HCLGE_FUNC_NUMBER_PER_DESC
; j
++)
3819 if (desc
[i
].data
[j
])
3825 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd
*new_req
,
3828 const unsigned char *mac_addr
= addr
;
3829 u32 high_val
= mac_addr
[2] << 16 | (mac_addr
[3] << 24) |
3830 (mac_addr
[0]) | (mac_addr
[1] << 8);
3831 u32 low_val
= mac_addr
[4] | (mac_addr
[5] << 8);
3833 new_req
->mac_addr_hi32
= cpu_to_le32(high_val
);
3834 new_req
->mac_addr_lo16
= cpu_to_le16(low_val
& 0xffff);
3837 static u16
hclge_get_mac_addr_to_mta_index(struct hclge_vport
*vport
,
3840 u16 high_val
= addr
[1] | (addr
[0] << 8);
3841 struct hclge_dev
*hdev
= vport
->back
;
3842 u32 rsh
= 4 - hdev
->mta_mac_sel_type
;
3843 u16 ret_val
= (high_val
>> rsh
) & 0xfff;
3848 static int hclge_set_mta_filter_mode(struct hclge_dev
*hdev
,
3849 enum hclge_mta_dmac_sel_type mta_mac_sel
,
3852 struct hclge_mta_filter_mode_cmd
*req
;
3853 struct hclge_desc desc
;
3856 req
= (struct hclge_mta_filter_mode_cmd
*)desc
.data
;
3857 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MTA_MAC_MODE_CFG
, false);
3859 hnae_set_bit(req
->dmac_sel_en
, HCLGE_CFG_MTA_MAC_EN_B
,
3861 hnae_set_field(req
->dmac_sel_en
, HCLGE_CFG_MTA_MAC_SEL_M
,
3862 HCLGE_CFG_MTA_MAC_SEL_S
, mta_mac_sel
);
3864 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3866 dev_err(&hdev
->pdev
->dev
,
3867 "Config mat filter mode failed for cmd_send, ret =%d.\n",
3875 int hclge_cfg_func_mta_filter(struct hclge_dev
*hdev
,
3879 struct hclge_cfg_func_mta_filter_cmd
*req
;
3880 struct hclge_desc desc
;
3883 req
= (struct hclge_cfg_func_mta_filter_cmd
*)desc
.data
;
3884 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MTA_MAC_FUNC_CFG
, false);
3886 hnae_set_bit(req
->accept
, HCLGE_CFG_FUNC_MTA_ACCEPT_B
,
3888 req
->function_id
= func_id
;
3890 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3892 dev_err(&hdev
->pdev
->dev
,
3893 "Config func_id enable failed for cmd_send, ret =%d.\n",
3901 static int hclge_set_mta_table_item(struct hclge_vport
*vport
,
3905 struct hclge_dev
*hdev
= vport
->back
;
3906 struct hclge_cfg_func_mta_item_cmd
*req
;
3907 struct hclge_desc desc
;
3911 req
= (struct hclge_cfg_func_mta_item_cmd
*)desc
.data
;
3912 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MTA_TBL_ITEM_CFG
, false);
3913 hnae_set_bit(req
->accept
, HCLGE_CFG_MTA_ITEM_ACCEPT_B
, enable
);
3915 hnae_set_field(item_idx
, HCLGE_CFG_MTA_ITEM_IDX_M
,
3916 HCLGE_CFG_MTA_ITEM_IDX_S
, idx
);
3917 req
->item_idx
= cpu_to_le16(item_idx
);
3919 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3921 dev_err(&hdev
->pdev
->dev
,
3922 "Config mta table item failed for cmd_send, ret =%d.\n",
3930 static int hclge_remove_mac_vlan_tbl(struct hclge_vport
*vport
,
3931 struct hclge_mac_vlan_tbl_entry_cmd
*req
)
3933 struct hclge_dev
*hdev
= vport
->back
;
3934 struct hclge_desc desc
;
3939 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_REMOVE
, false);
3941 memcpy(desc
.data
, req
, sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
3943 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3945 dev_err(&hdev
->pdev
->dev
,
3946 "del mac addr failed for cmd_send, ret =%d.\n",
3950 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
3951 retval
= le16_to_cpu(desc
.retval
);
3953 return hclge_get_mac_vlan_cmd_status(vport
, retval
, resp_code
,
3954 HCLGE_MAC_VLAN_REMOVE
);
3957 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport
*vport
,
3958 struct hclge_mac_vlan_tbl_entry_cmd
*req
,
3959 struct hclge_desc
*desc
,
3962 struct hclge_dev
*hdev
= vport
->back
;
3967 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_MAC_VLAN_ADD
, true);
3969 desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3970 memcpy(desc
[0].data
,
3972 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
3973 hclge_cmd_setup_basic_desc(&desc
[1],
3974 HCLGE_OPC_MAC_VLAN_ADD
,
3976 desc
[1].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3977 hclge_cmd_setup_basic_desc(&desc
[2],
3978 HCLGE_OPC_MAC_VLAN_ADD
,
3980 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 3);
3982 memcpy(desc
[0].data
,
3984 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
3985 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
3988 dev_err(&hdev
->pdev
->dev
,
3989 "lookup mac addr failed for cmd_send, ret =%d.\n",
3993 resp_code
= (le32_to_cpu(desc
[0].data
[0]) >> 8) & 0xff;
3994 retval
= le16_to_cpu(desc
[0].retval
);
3996 return hclge_get_mac_vlan_cmd_status(vport
, retval
, resp_code
,
3997 HCLGE_MAC_VLAN_LKUP
);
4000 static int hclge_add_mac_vlan_tbl(struct hclge_vport
*vport
,
4001 struct hclge_mac_vlan_tbl_entry_cmd
*req
,
4002 struct hclge_desc
*mc_desc
)
4004 struct hclge_dev
*hdev
= vport
->back
;
4011 struct hclge_desc desc
;
4013 hclge_cmd_setup_basic_desc(&desc
,
4014 HCLGE_OPC_MAC_VLAN_ADD
,
4016 memcpy(desc
.data
, req
,
4017 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
4018 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4019 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
4020 retval
= le16_to_cpu(desc
.retval
);
4022 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, retval
,
4024 HCLGE_MAC_VLAN_ADD
);
4026 hclge_cmd_reuse_desc(&mc_desc
[0], false);
4027 mc_desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
4028 hclge_cmd_reuse_desc(&mc_desc
[1], false);
4029 mc_desc
[1].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
4030 hclge_cmd_reuse_desc(&mc_desc
[2], false);
4031 mc_desc
[2].flag
&= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT
);
4032 memcpy(mc_desc
[0].data
, req
,
4033 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
4034 ret
= hclge_cmd_send(&hdev
->hw
, mc_desc
, 3);
4035 resp_code
= (le32_to_cpu(mc_desc
[0].data
[0]) >> 8) & 0xff;
4036 retval
= le16_to_cpu(mc_desc
[0].retval
);
4038 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, retval
,
4040 HCLGE_MAC_VLAN_ADD
);
4044 dev_err(&hdev
->pdev
->dev
,
4045 "add mac addr failed for cmd_send, ret =%d.\n",
4053 static int hclge_add_uc_addr(struct hnae3_handle
*handle
,
4054 const unsigned char *addr
)
4056 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4058 return hclge_add_uc_addr_common(vport
, addr
);
4061 int hclge_add_uc_addr_common(struct hclge_vport
*vport
,
4062 const unsigned char *addr
)
4064 struct hclge_dev
*hdev
= vport
->back
;
4065 struct hclge_mac_vlan_tbl_entry_cmd req
;
4066 enum hclge_cmd_status status
;
4067 u16 egress_port
= 0;
4069 /* mac addr check */
4070 if (is_zero_ether_addr(addr
) ||
4071 is_broadcast_ether_addr(addr
) ||
4072 is_multicast_ether_addr(addr
)) {
4073 dev_err(&hdev
->pdev
->dev
,
4074 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
4076 is_zero_ether_addr(addr
),
4077 is_broadcast_ether_addr(addr
),
4078 is_multicast_ether_addr(addr
));
4082 memset(&req
, 0, sizeof(req
));
4083 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
4084 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4085 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 0);
4086 hnae_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4088 hnae_set_bit(egress_port
, HCLGE_MAC_EPORT_SW_EN_B
, 0);
4089 hnae_set_bit(egress_port
, HCLGE_MAC_EPORT_TYPE_B
, 0);
4090 hnae_set_field(egress_port
, HCLGE_MAC_EPORT_VFID_M
,
4091 HCLGE_MAC_EPORT_VFID_S
, vport
->vport_id
);
4092 hnae_set_field(egress_port
, HCLGE_MAC_EPORT_PFID_M
,
4093 HCLGE_MAC_EPORT_PFID_S
, 0);
4095 req
.egress_port
= cpu_to_le16(egress_port
);
4097 hclge_prepare_mac_addr(&req
, addr
);
4099 status
= hclge_add_mac_vlan_tbl(vport
, &req
, NULL
);
4104 static int hclge_rm_uc_addr(struct hnae3_handle
*handle
,
4105 const unsigned char *addr
)
4107 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4109 return hclge_rm_uc_addr_common(vport
, addr
);
4112 int hclge_rm_uc_addr_common(struct hclge_vport
*vport
,
4113 const unsigned char *addr
)
4115 struct hclge_dev
*hdev
= vport
->back
;
4116 struct hclge_mac_vlan_tbl_entry_cmd req
;
4117 enum hclge_cmd_status status
;
4119 /* mac addr check */
4120 if (is_zero_ether_addr(addr
) ||
4121 is_broadcast_ether_addr(addr
) ||
4122 is_multicast_ether_addr(addr
)) {
4123 dev_dbg(&hdev
->pdev
->dev
,
4124 "Remove mac err! invalid mac:%pM.\n",
4129 memset(&req
, 0, sizeof(req
));
4130 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
4131 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4132 hclge_prepare_mac_addr(&req
, addr
);
4133 status
= hclge_remove_mac_vlan_tbl(vport
, &req
);
4138 static int hclge_add_mc_addr(struct hnae3_handle
*handle
,
4139 const unsigned char *addr
)
4141 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4143 return hclge_add_mc_addr_common(vport
, addr
);
4146 int hclge_add_mc_addr_common(struct hclge_vport
*vport
,
4147 const unsigned char *addr
)
4149 struct hclge_dev
*hdev
= vport
->back
;
4150 struct hclge_mac_vlan_tbl_entry_cmd req
;
4151 struct hclge_desc desc
[3];
4155 /* mac addr check */
4156 if (!is_multicast_ether_addr(addr
)) {
4157 dev_err(&hdev
->pdev
->dev
,
4158 "Add mc mac err! invalid mac:%pM.\n",
4162 memset(&req
, 0, sizeof(req
));
4163 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
4164 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4165 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 1);
4166 hnae_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4167 hclge_prepare_mac_addr(&req
, addr
);
4168 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
4170 /* This mac addr exist, update VFID for it */
4171 hclge_update_desc_vfid(desc
, vport
->vport_id
, false);
4172 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
4174 /* This mac addr do not exist, add new entry for it */
4175 memset(desc
[0].data
, 0, sizeof(desc
[0].data
));
4176 memset(desc
[1].data
, 0, sizeof(desc
[0].data
));
4177 memset(desc
[2].data
, 0, sizeof(desc
[0].data
));
4178 hclge_update_desc_vfid(desc
, vport
->vport_id
, false);
4179 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
4182 /* Set MTA table for this MAC address */
4183 tbl_idx
= hclge_get_mac_addr_to_mta_index(vport
, addr
);
4184 status
= hclge_set_mta_table_item(vport
, tbl_idx
, true);
4189 static int hclge_rm_mc_addr(struct hnae3_handle
*handle
,
4190 const unsigned char *addr
)
4192 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4194 return hclge_rm_mc_addr_common(vport
, addr
);
4197 int hclge_rm_mc_addr_common(struct hclge_vport
*vport
,
4198 const unsigned char *addr
)
4200 struct hclge_dev
*hdev
= vport
->back
;
4201 struct hclge_mac_vlan_tbl_entry_cmd req
;
4202 enum hclge_cmd_status status
;
4203 struct hclge_desc desc
[3];
4206 /* mac addr check */
4207 if (!is_multicast_ether_addr(addr
)) {
4208 dev_dbg(&hdev
->pdev
->dev
,
4209 "Remove mc mac err! invalid mac:%pM.\n",
4214 memset(&req
, 0, sizeof(req
));
4215 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
4216 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4217 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 1);
4218 hnae_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4219 hclge_prepare_mac_addr(&req
, addr
);
4220 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
4222 /* This mac addr exist, remove this handle's VFID for it */
4223 hclge_update_desc_vfid(desc
, vport
->vport_id
, true);
4225 if (hclge_is_all_function_id_zero(desc
))
4226 /* All the vfid is zero, so need to delete this entry */
4227 status
= hclge_remove_mac_vlan_tbl(vport
, &req
);
4229 /* Not all the vfid is zero, update the vfid */
4230 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
4233 /* This mac addr do not exist, can't delete it */
4234 dev_err(&hdev
->pdev
->dev
,
4235 "Rm multicast mac addr failed, ret = %d.\n",
4240 /* Set MTB table for this MAC address */
4241 tbl_idx
= hclge_get_mac_addr_to_mta_index(vport
, addr
);
4242 status
= hclge_set_mta_table_item(vport
, tbl_idx
, false);
4247 static void hclge_get_mac_addr(struct hnae3_handle
*handle
, u8
*p
)
4249 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4250 struct hclge_dev
*hdev
= vport
->back
;
4252 ether_addr_copy(p
, hdev
->hw
.mac
.mac_addr
);
4255 static int hclge_set_mac_addr(struct hnae3_handle
*handle
, void *p
)
4257 const unsigned char *new_addr
= (const unsigned char *)p
;
4258 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4259 struct hclge_dev
*hdev
= vport
->back
;
4262 /* mac addr check */
4263 if (is_zero_ether_addr(new_addr
) ||
4264 is_broadcast_ether_addr(new_addr
) ||
4265 is_multicast_ether_addr(new_addr
)) {
4266 dev_err(&hdev
->pdev
->dev
,
4267 "Change uc mac err! invalid mac:%p.\n",
4272 ret
= hclge_rm_uc_addr(handle
, hdev
->hw
.mac
.mac_addr
);
4274 dev_warn(&hdev
->pdev
->dev
,
4275 "remove old uc mac address fail, ret =%d.\n",
4278 ret
= hclge_add_uc_addr(handle
, new_addr
);
4280 dev_err(&hdev
->pdev
->dev
,
4281 "add uc mac address fail, ret =%d.\n",
4284 ret
= hclge_add_uc_addr(handle
, hdev
->hw
.mac
.mac_addr
);
4286 dev_err(&hdev
->pdev
->dev
,
4287 "restore uc mac address fail, ret =%d.\n",
4294 ret
= hclge_mac_pause_addr_cfg(hdev
, new_addr
);
4296 dev_err(&hdev
->pdev
->dev
,
4297 "configure mac pause address fail, ret =%d.\n",
4302 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, new_addr
);
4307 static int hclge_set_vlan_filter_ctrl(struct hclge_dev
*hdev
, u8 vlan_type
,
4310 struct hclge_vlan_filter_ctrl_cmd
*req
;
4311 struct hclge_desc desc
;
4314 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_CTRL
, false);
4316 req
= (struct hclge_vlan_filter_ctrl_cmd
*)desc
.data
;
4317 req
->vlan_type
= vlan_type
;
4318 req
->vlan_fe
= filter_en
;
4320 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4322 dev_err(&hdev
->pdev
->dev
, "set vlan filter fail, ret =%d.\n",
4330 #define HCLGE_FILTER_TYPE_VF 0
4331 #define HCLGE_FILTER_TYPE_PORT 1
4333 static void hclge_enable_vlan_filter(struct hnae3_handle
*handle
, bool enable
)
4335 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4336 struct hclge_dev
*hdev
= vport
->back
;
4338 hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
, enable
);
4341 int hclge_set_vf_vlan_common(struct hclge_dev
*hdev
, int vfid
,
4342 bool is_kill
, u16 vlan
, u8 qos
, __be16 proto
)
4344 #define HCLGE_MAX_VF_BYTES 16
4345 struct hclge_vlan_filter_vf_cfg_cmd
*req0
;
4346 struct hclge_vlan_filter_vf_cfg_cmd
*req1
;
4347 struct hclge_desc desc
[2];
4352 hclge_cmd_setup_basic_desc(&desc
[0],
4353 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
4354 hclge_cmd_setup_basic_desc(&desc
[1],
4355 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
4357 desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
4359 vf_byte_off
= vfid
/ 8;
4360 vf_byte_val
= 1 << (vfid
% 8);
4362 req0
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[0].data
;
4363 req1
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[1].data
;
4365 req0
->vlan_id
= cpu_to_le16(vlan
);
4366 req0
->vlan_cfg
= is_kill
;
4368 if (vf_byte_off
< HCLGE_MAX_VF_BYTES
)
4369 req0
->vf_bitmap
[vf_byte_off
] = vf_byte_val
;
4371 req1
->vf_bitmap
[vf_byte_off
- HCLGE_MAX_VF_BYTES
] = vf_byte_val
;
4373 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
4375 dev_err(&hdev
->pdev
->dev
,
4376 "Send vf vlan command fail, ret =%d.\n",
4382 if (!req0
->resp_code
|| req0
->resp_code
== 1)
4385 dev_err(&hdev
->pdev
->dev
,
4386 "Add vf vlan filter fail, ret =%d.\n",
4389 if (!req0
->resp_code
)
4392 dev_err(&hdev
->pdev
->dev
,
4393 "Kill vf vlan filter fail, ret =%d.\n",
4400 static int hclge_set_port_vlan_filter(struct hnae3_handle
*handle
,
4401 __be16 proto
, u16 vlan_id
,
4404 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4405 struct hclge_dev
*hdev
= vport
->back
;
4406 struct hclge_vlan_filter_pf_cfg_cmd
*req
;
4407 struct hclge_desc desc
;
4408 u8 vlan_offset_byte_val
;
4409 u8 vlan_offset_byte
;
4413 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_PF_CFG
, false);
4415 vlan_offset_160
= vlan_id
/ 160;
4416 vlan_offset_byte
= (vlan_id
% 160) / 8;
4417 vlan_offset_byte_val
= 1 << (vlan_id
% 8);
4419 req
= (struct hclge_vlan_filter_pf_cfg_cmd
*)desc
.data
;
4420 req
->vlan_offset
= vlan_offset_160
;
4421 req
->vlan_cfg
= is_kill
;
4422 req
->vlan_offset_bitmap
[vlan_offset_byte
] = vlan_offset_byte_val
;
4424 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4426 dev_err(&hdev
->pdev
->dev
,
4427 "port vlan command, send fail, ret =%d.\n",
4432 ret
= hclge_set_vf_vlan_common(hdev
, 0, is_kill
, vlan_id
, 0, proto
);
4434 dev_err(&hdev
->pdev
->dev
,
4435 "Set pf vlan filter config fail, ret =%d.\n",
4443 static int hclge_set_vf_vlan_filter(struct hnae3_handle
*handle
, int vfid
,
4444 u16 vlan
, u8 qos
, __be16 proto
)
4446 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4447 struct hclge_dev
*hdev
= vport
->back
;
4449 if ((vfid
>= hdev
->num_alloc_vfs
) || (vlan
> 4095) || (qos
> 7))
4451 if (proto
!= htons(ETH_P_8021Q
))
4452 return -EPROTONOSUPPORT
;
4454 return hclge_set_vf_vlan_common(hdev
, vfid
, false, vlan
, qos
, proto
);
4457 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport
*vport
)
4459 struct hclge_tx_vtag_cfg
*vcfg
= &vport
->txvlan_cfg
;
4460 struct hclge_vport_vtag_tx_cfg_cmd
*req
;
4461 struct hclge_dev
*hdev
= vport
->back
;
4462 struct hclge_desc desc
;
4465 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_PORT_TX_CFG
, false);
4467 req
= (struct hclge_vport_vtag_tx_cfg_cmd
*)desc
.data
;
4468 req
->def_vlan_tag1
= cpu_to_le16(vcfg
->default_tag1
);
4469 req
->def_vlan_tag2
= cpu_to_le16(vcfg
->default_tag2
);
4470 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_TAG_B
,
4471 vcfg
->accept_tag
? 1 : 0);
4472 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_UNTAG_B
,
4473 vcfg
->accept_untag
? 1 : 0);
4474 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_PORT_INS_TAG1_EN_B
,
4475 vcfg
->insert_tag1_en
? 1 : 0);
4476 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_PORT_INS_TAG2_EN_B
,
4477 vcfg
->insert_tag2_en
? 1 : 0);
4478 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_CFG_NIC_ROCE_SEL_B
, 0);
4480 req
->vf_offset
= vport
->vport_id
/ HCLGE_VF_NUM_PER_CMD
;
4481 req
->vf_bitmap
[req
->vf_offset
] =
4482 1 << (vport
->vport_id
% HCLGE_VF_NUM_PER_BYTE
);
4484 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4486 dev_err(&hdev
->pdev
->dev
,
4487 "Send port txvlan cfg command fail, ret =%d\n",
4493 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport
*vport
)
4495 struct hclge_rx_vtag_cfg
*vcfg
= &vport
->rxvlan_cfg
;
4496 struct hclge_vport_vtag_rx_cfg_cmd
*req
;
4497 struct hclge_dev
*hdev
= vport
->back
;
4498 struct hclge_desc desc
;
4501 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_PORT_RX_CFG
, false);
4503 req
= (struct hclge_vport_vtag_rx_cfg_cmd
*)desc
.data
;
4504 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_REM_TAG1_EN_B
,
4505 vcfg
->strip_tag1_en
? 1 : 0);
4506 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_REM_TAG2_EN_B
,
4507 vcfg
->strip_tag2_en
? 1 : 0);
4508 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_SHOW_TAG1_EN_B
,
4509 vcfg
->vlan1_vlan_prionly
? 1 : 0);
4510 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_SHOW_TAG2_EN_B
,
4511 vcfg
->vlan2_vlan_prionly
? 1 : 0);
4513 req
->vf_offset
= vport
->vport_id
/ HCLGE_VF_NUM_PER_CMD
;
4514 req
->vf_bitmap
[req
->vf_offset
] =
4515 1 << (vport
->vport_id
% HCLGE_VF_NUM_PER_BYTE
);
4517 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4519 dev_err(&hdev
->pdev
->dev
,
4520 "Send port rxvlan cfg command fail, ret =%d\n",
4526 static int hclge_set_vlan_protocol_type(struct hclge_dev
*hdev
)
4528 struct hclge_rx_vlan_type_cfg_cmd
*rx_req
;
4529 struct hclge_tx_vlan_type_cfg_cmd
*tx_req
;
4530 struct hclge_desc desc
;
4533 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_TYPE_ID
, false);
4534 rx_req
= (struct hclge_rx_vlan_type_cfg_cmd
*)desc
.data
;
4535 rx_req
->ot_fst_vlan_type
=
4536 cpu_to_le16(hdev
->vlan_type_cfg
.rx_ot_fst_vlan_type
);
4537 rx_req
->ot_sec_vlan_type
=
4538 cpu_to_le16(hdev
->vlan_type_cfg
.rx_ot_sec_vlan_type
);
4539 rx_req
->in_fst_vlan_type
=
4540 cpu_to_le16(hdev
->vlan_type_cfg
.rx_in_fst_vlan_type
);
4541 rx_req
->in_sec_vlan_type
=
4542 cpu_to_le16(hdev
->vlan_type_cfg
.rx_in_sec_vlan_type
);
4544 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4546 dev_err(&hdev
->pdev
->dev
,
4547 "Send rxvlan protocol type command fail, ret =%d\n",
4552 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_INSERT
, false);
4554 tx_req
= (struct hclge_tx_vlan_type_cfg_cmd
*)&desc
.data
;
4555 tx_req
->ot_vlan_type
= cpu_to_le16(hdev
->vlan_type_cfg
.tx_ot_vlan_type
);
4556 tx_req
->in_vlan_type
= cpu_to_le16(hdev
->vlan_type_cfg
.tx_in_vlan_type
);
4558 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4560 dev_err(&hdev
->pdev
->dev
,
4561 "Send txvlan protocol type command fail, ret =%d\n",
4567 static int hclge_init_vlan_config(struct hclge_dev
*hdev
)
4569 #define HCLGE_DEF_VLAN_TYPE 0x8100
4571 struct hnae3_handle
*handle
;
4572 struct hclge_vport
*vport
;
4576 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
, true);
4580 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_PORT
, true);
4584 hdev
->vlan_type_cfg
.rx_in_fst_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
4585 hdev
->vlan_type_cfg
.rx_in_sec_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
4586 hdev
->vlan_type_cfg
.rx_ot_fst_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
4587 hdev
->vlan_type_cfg
.rx_ot_sec_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
4588 hdev
->vlan_type_cfg
.tx_ot_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
4589 hdev
->vlan_type_cfg
.tx_in_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
4591 ret
= hclge_set_vlan_protocol_type(hdev
);
4595 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
4596 vport
= &hdev
->vport
[i
];
4597 vport
->txvlan_cfg
.accept_tag
= true;
4598 vport
->txvlan_cfg
.accept_untag
= true;
4599 vport
->txvlan_cfg
.insert_tag1_en
= false;
4600 vport
->txvlan_cfg
.insert_tag2_en
= false;
4601 vport
->txvlan_cfg
.default_tag1
= 0;
4602 vport
->txvlan_cfg
.default_tag2
= 0;
4604 ret
= hclge_set_vlan_tx_offload_cfg(vport
);
4608 vport
->rxvlan_cfg
.strip_tag1_en
= false;
4609 vport
->rxvlan_cfg
.strip_tag2_en
= true;
4610 vport
->rxvlan_cfg
.vlan1_vlan_prionly
= false;
4611 vport
->rxvlan_cfg
.vlan2_vlan_prionly
= false;
4613 ret
= hclge_set_vlan_rx_offload_cfg(vport
);
4618 handle
= &hdev
->vport
[0].nic
;
4619 return hclge_set_port_vlan_filter(handle
, htons(ETH_P_8021Q
), 0, false);
4622 static int hclge_en_hw_strip_rxvtag(struct hnae3_handle
*handle
, bool enable
)
4624 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4626 vport
->rxvlan_cfg
.strip_tag1_en
= false;
4627 vport
->rxvlan_cfg
.strip_tag2_en
= enable
;
4628 vport
->rxvlan_cfg
.vlan1_vlan_prionly
= false;
4629 vport
->rxvlan_cfg
.vlan2_vlan_prionly
= false;
4631 return hclge_set_vlan_rx_offload_cfg(vport
);
4634 static int hclge_set_mtu(struct hnae3_handle
*handle
, int new_mtu
)
4636 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4637 struct hclge_config_max_frm_size_cmd
*req
;
4638 struct hclge_dev
*hdev
= vport
->back
;
4639 struct hclge_desc desc
;
4643 max_frm_size
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
4645 if (max_frm_size
< HCLGE_MAC_MIN_FRAME
||
4646 max_frm_size
> HCLGE_MAC_MAX_FRAME
)
4649 max_frm_size
= max(max_frm_size
, HCLGE_MAC_DEFAULT_FRAME
);
4651 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAX_FRM_SIZE
, false);
4653 req
= (struct hclge_config_max_frm_size_cmd
*)desc
.data
;
4654 req
->max_frm_size
= cpu_to_le16(max_frm_size
);
4656 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4658 dev_err(&hdev
->pdev
->dev
, "set mtu fail, ret =%d.\n", ret
);
4662 hdev
->mps
= max_frm_size
;
4667 static int hclge_send_reset_tqp_cmd(struct hclge_dev
*hdev
, u16 queue_id
,
4670 struct hclge_reset_tqp_queue_cmd
*req
;
4671 struct hclge_desc desc
;
4674 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, false);
4676 req
= (struct hclge_reset_tqp_queue_cmd
*)desc
.data
;
4677 req
->tqp_id
= cpu_to_le16(queue_id
& HCLGE_RING_ID_MASK
);
4678 hnae_set_bit(req
->reset_req
, HCLGE_TQP_RESET_B
, enable
);
4680 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4682 dev_err(&hdev
->pdev
->dev
,
4683 "Send tqp reset cmd error, status =%d\n", ret
);
4690 static int hclge_get_reset_status(struct hclge_dev
*hdev
, u16 queue_id
)
4692 struct hclge_reset_tqp_queue_cmd
*req
;
4693 struct hclge_desc desc
;
4696 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, true);
4698 req
= (struct hclge_reset_tqp_queue_cmd
*)desc
.data
;
4699 req
->tqp_id
= cpu_to_le16(queue_id
& HCLGE_RING_ID_MASK
);
4701 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4703 dev_err(&hdev
->pdev
->dev
,
4704 "Get reset status error, status =%d\n", ret
);
4708 return hnae_get_bit(req
->ready_to_reset
, HCLGE_TQP_RESET_B
);
4711 void hclge_reset_tqp(struct hnae3_handle
*handle
, u16 queue_id
)
4713 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4714 struct hclge_dev
*hdev
= vport
->back
;
4715 int reset_try_times
= 0;
4719 ret
= hclge_tqp_enable(hdev
, queue_id
, 0, false);
4721 dev_warn(&hdev
->pdev
->dev
, "Disable tqp fail, ret = %d\n", ret
);
4725 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_id
, true);
4727 dev_warn(&hdev
->pdev
->dev
,
4728 "Send reset tqp cmd fail, ret = %d\n", ret
);
4732 reset_try_times
= 0;
4733 while (reset_try_times
++ < HCLGE_TQP_RESET_TRY_TIMES
) {
4734 /* Wait for tqp hw reset */
4736 reset_status
= hclge_get_reset_status(hdev
, queue_id
);
4741 if (reset_try_times
>= HCLGE_TQP_RESET_TRY_TIMES
) {
4742 dev_warn(&hdev
->pdev
->dev
, "Reset TQP fail\n");
4746 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_id
, false);
4748 dev_warn(&hdev
->pdev
->dev
,
4749 "Deassert the soft reset fail, ret = %d\n", ret
);
4754 static u32
hclge_get_fw_version(struct hnae3_handle
*handle
)
4756 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4757 struct hclge_dev
*hdev
= vport
->back
;
4759 return hdev
->fw_version
;
4762 static void hclge_get_flowctrl_adv(struct hnae3_handle
*handle
,
4765 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4766 struct hclge_dev
*hdev
= vport
->back
;
4767 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
4772 *flowctrl_adv
|= (phydev
->advertising
& ADVERTISED_Pause
) |
4773 (phydev
->advertising
& ADVERTISED_Asym_Pause
);
4776 static void hclge_set_flowctrl_adv(struct hclge_dev
*hdev
, u32 rx_en
, u32 tx_en
)
4778 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
4783 phydev
->advertising
&= ~(ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
4786 phydev
->advertising
|= ADVERTISED_Pause
| ADVERTISED_Asym_Pause
;
4789 phydev
->advertising
^= ADVERTISED_Asym_Pause
;
4792 static int hclge_cfg_pauseparam(struct hclge_dev
*hdev
, u32 rx_en
, u32 tx_en
)
4797 hdev
->fc_mode_last_time
= HCLGE_FC_FULL
;
4798 else if (rx_en
&& !tx_en
)
4799 hdev
->fc_mode_last_time
= HCLGE_FC_RX_PAUSE
;
4800 else if (!rx_en
&& tx_en
)
4801 hdev
->fc_mode_last_time
= HCLGE_FC_TX_PAUSE
;
4803 hdev
->fc_mode_last_time
= HCLGE_FC_NONE
;
4805 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
)
4808 ret
= hclge_mac_pause_en_cfg(hdev
, tx_en
, rx_en
);
4810 dev_err(&hdev
->pdev
->dev
, "configure pauseparam error, ret = %d.\n",
4815 hdev
->tm_info
.fc_mode
= hdev
->fc_mode_last_time
;
4820 int hclge_cfg_flowctrl(struct hclge_dev
*hdev
)
4822 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
4823 u16 remote_advertising
= 0;
4824 u16 local_advertising
= 0;
4825 u32 rx_pause
, tx_pause
;
4828 if (!phydev
->link
|| !phydev
->autoneg
)
4831 if (phydev
->advertising
& ADVERTISED_Pause
)
4832 local_advertising
= ADVERTISE_PAUSE_CAP
;
4834 if (phydev
->advertising
& ADVERTISED_Asym_Pause
)
4835 local_advertising
|= ADVERTISE_PAUSE_ASYM
;
4838 remote_advertising
= LPA_PAUSE_CAP
;
4840 if (phydev
->asym_pause
)
4841 remote_advertising
|= LPA_PAUSE_ASYM
;
4843 flowctl
= mii_resolve_flowctrl_fdx(local_advertising
,
4844 remote_advertising
);
4845 tx_pause
= flowctl
& FLOW_CTRL_TX
;
4846 rx_pause
= flowctl
& FLOW_CTRL_RX
;
4848 if (phydev
->duplex
== HCLGE_MAC_HALF
) {
4853 return hclge_cfg_pauseparam(hdev
, rx_pause
, tx_pause
);
4856 static void hclge_get_pauseparam(struct hnae3_handle
*handle
, u32
*auto_neg
,
4857 u32
*rx_en
, u32
*tx_en
)
4859 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4860 struct hclge_dev
*hdev
= vport
->back
;
4862 *auto_neg
= hclge_get_autoneg(handle
);
4864 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
) {
4870 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_RX_PAUSE
) {
4873 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_TX_PAUSE
) {
4876 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_FULL
) {
4885 static int hclge_set_pauseparam(struct hnae3_handle
*handle
, u32 auto_neg
,
4886 u32 rx_en
, u32 tx_en
)
4888 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4889 struct hclge_dev
*hdev
= vport
->back
;
4890 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
4893 /* Only support flow control negotiation for netdev with
4894 * phy attached for now.
4899 fc_autoneg
= hclge_get_autoneg(handle
);
4900 if (auto_neg
!= fc_autoneg
) {
4901 dev_info(&hdev
->pdev
->dev
,
4902 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
4906 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
) {
4907 dev_info(&hdev
->pdev
->dev
,
4908 "Priority flow control enabled. Cannot set link flow control.\n");
4912 hclge_set_flowctrl_adv(hdev
, rx_en
, tx_en
);
4915 return hclge_cfg_pauseparam(hdev
, rx_en
, tx_en
);
4917 return phy_start_aneg(phydev
);
4920 static void hclge_get_ksettings_an_result(struct hnae3_handle
*handle
,
4921 u8
*auto_neg
, u32
*speed
, u8
*duplex
)
4923 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4924 struct hclge_dev
*hdev
= vport
->back
;
4927 *speed
= hdev
->hw
.mac
.speed
;
4929 *duplex
= hdev
->hw
.mac
.duplex
;
4931 *auto_neg
= hdev
->hw
.mac
.autoneg
;
4934 static void hclge_get_media_type(struct hnae3_handle
*handle
, u8
*media_type
)
4936 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4937 struct hclge_dev
*hdev
= vport
->back
;
4940 *media_type
= hdev
->hw
.mac
.media_type
;
4943 static void hclge_get_mdix_mode(struct hnae3_handle
*handle
,
4944 u8
*tp_mdix_ctrl
, u8
*tp_mdix
)
4946 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4947 struct hclge_dev
*hdev
= vport
->back
;
4948 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
4949 int mdix_ctrl
, mdix
, retval
, is_resolved
;
4952 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
4953 *tp_mdix
= ETH_TP_MDI_INVALID
;
4957 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_MDIX
);
4959 retval
= phy_read(phydev
, HCLGE_PHY_CSC_REG
);
4960 mdix_ctrl
= hnae_get_field(retval
, HCLGE_PHY_MDIX_CTRL_M
,
4961 HCLGE_PHY_MDIX_CTRL_S
);
4963 retval
= phy_read(phydev
, HCLGE_PHY_CSS_REG
);
4964 mdix
= hnae_get_bit(retval
, HCLGE_PHY_MDIX_STATUS_B
);
4965 is_resolved
= hnae_get_bit(retval
, HCLGE_PHY_SPEED_DUP_RESOLVE_B
);
4967 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_COPPER
);
4969 switch (mdix_ctrl
) {
4971 *tp_mdix_ctrl
= ETH_TP_MDI
;
4974 *tp_mdix_ctrl
= ETH_TP_MDI_X
;
4977 *tp_mdix_ctrl
= ETH_TP_MDI_AUTO
;
4980 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
4985 *tp_mdix
= ETH_TP_MDI_INVALID
;
4987 *tp_mdix
= ETH_TP_MDI_X
;
4989 *tp_mdix
= ETH_TP_MDI
;
4992 static int hclge_init_client_instance(struct hnae3_client
*client
,
4993 struct hnae3_ae_dev
*ae_dev
)
4995 struct hclge_dev
*hdev
= ae_dev
->priv
;
4996 struct hclge_vport
*vport
;
4999 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
5000 vport
= &hdev
->vport
[i
];
5002 switch (client
->type
) {
5003 case HNAE3_CLIENT_KNIC
:
5005 hdev
->nic_client
= client
;
5006 vport
->nic
.client
= client
;
5007 ret
= client
->ops
->init_instance(&vport
->nic
);
5011 if (hdev
->roce_client
&&
5012 hnae3_dev_roce_supported(hdev
)) {
5013 struct hnae3_client
*rc
= hdev
->roce_client
;
5015 ret
= hclge_init_roce_base_info(vport
);
5019 ret
= rc
->ops
->init_instance(&vport
->roce
);
5025 case HNAE3_CLIENT_UNIC
:
5026 hdev
->nic_client
= client
;
5027 vport
->nic
.client
= client
;
5029 ret
= client
->ops
->init_instance(&vport
->nic
);
5034 case HNAE3_CLIENT_ROCE
:
5035 if (hnae3_dev_roce_supported(hdev
)) {
5036 hdev
->roce_client
= client
;
5037 vport
->roce
.client
= client
;
5040 if (hdev
->roce_client
&& hdev
->nic_client
) {
5041 ret
= hclge_init_roce_base_info(vport
);
5045 ret
= client
->ops
->init_instance(&vport
->roce
);
5057 static void hclge_uninit_client_instance(struct hnae3_client
*client
,
5058 struct hnae3_ae_dev
*ae_dev
)
5060 struct hclge_dev
*hdev
= ae_dev
->priv
;
5061 struct hclge_vport
*vport
;
5064 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
5065 vport
= &hdev
->vport
[i
];
5066 if (hdev
->roce_client
) {
5067 hdev
->roce_client
->ops
->uninit_instance(&vport
->roce
,
5069 hdev
->roce_client
= NULL
;
5070 vport
->roce
.client
= NULL
;
5072 if (client
->type
== HNAE3_CLIENT_ROCE
)
5074 if (client
->ops
->uninit_instance
) {
5075 client
->ops
->uninit_instance(&vport
->nic
, 0);
5076 hdev
->nic_client
= NULL
;
5077 vport
->nic
.client
= NULL
;
5082 static int hclge_pci_init(struct hclge_dev
*hdev
)
5084 struct pci_dev
*pdev
= hdev
->pdev
;
5085 struct hclge_hw
*hw
;
5088 ret
= pci_enable_device(pdev
);
5090 dev_err(&pdev
->dev
, "failed to enable PCI device\n");
5091 goto err_no_drvdata
;
5094 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
5096 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
5099 "can't set consistent PCI DMA");
5100 goto err_disable_device
;
5102 dev_warn(&pdev
->dev
, "set DMA mask to 32 bits\n");
5105 ret
= pci_request_regions(pdev
, HCLGE_DRIVER_NAME
);
5107 dev_err(&pdev
->dev
, "PCI request regions failed %d\n", ret
);
5108 goto err_disable_device
;
5111 pci_set_master(pdev
);
5114 hw
->io_base
= pcim_iomap(pdev
, 2, 0);
5116 dev_err(&pdev
->dev
, "Can't map configuration register space\n");
5118 goto err_clr_master
;
5121 hdev
->num_req_vfs
= pci_sriov_get_totalvfs(pdev
);
5125 pci_clear_master(pdev
);
5126 pci_release_regions(pdev
);
5128 pci_disable_device(pdev
);
5130 pci_set_drvdata(pdev
, NULL
);
5135 static void hclge_pci_uninit(struct hclge_dev
*hdev
)
5137 struct pci_dev
*pdev
= hdev
->pdev
;
5139 pci_free_irq_vectors(pdev
);
5140 pci_clear_master(pdev
);
5141 pci_release_mem_regions(pdev
);
5142 pci_disable_device(pdev
);
5145 static int hclge_init_ae_dev(struct hnae3_ae_dev
*ae_dev
)
5147 struct pci_dev
*pdev
= ae_dev
->pdev
;
5148 struct hclge_dev
*hdev
;
5151 hdev
= devm_kzalloc(&pdev
->dev
, sizeof(*hdev
), GFP_KERNEL
);
5158 hdev
->ae_dev
= ae_dev
;
5159 hdev
->reset_type
= HNAE3_NONE_RESET
;
5160 hdev
->reset_request
= 0;
5161 hdev
->reset_pending
= 0;
5162 ae_dev
->priv
= hdev
;
5164 ret
= hclge_pci_init(hdev
);
5166 dev_err(&pdev
->dev
, "PCI init failed\n");
5170 /* Firmware command queue initialize */
5171 ret
= hclge_cmd_queue_init(hdev
);
5173 dev_err(&pdev
->dev
, "Cmd queue init failed, ret = %d.\n", ret
);
5177 /* Firmware command initialize */
5178 ret
= hclge_cmd_init(hdev
);
5182 ret
= hclge_get_cap(hdev
);
5184 dev_err(&pdev
->dev
, "get hw capability error, ret = %d.\n",
5189 ret
= hclge_configure(hdev
);
5191 dev_err(&pdev
->dev
, "Configure dev error, ret = %d.\n", ret
);
5195 ret
= hclge_init_msi(hdev
);
5197 dev_err(&pdev
->dev
, "Init MSI/MSI-X error, ret = %d.\n", ret
);
5201 ret
= hclge_misc_irq_init(hdev
);
5204 "Misc IRQ(vector0) init error, ret = %d.\n",
5209 ret
= hclge_alloc_tqps(hdev
);
5211 dev_err(&pdev
->dev
, "Allocate TQPs error, ret = %d.\n", ret
);
5215 ret
= hclge_alloc_vport(hdev
);
5217 dev_err(&pdev
->dev
, "Allocate vport error, ret = %d.\n", ret
);
5221 ret
= hclge_map_tqp(hdev
);
5223 dev_err(&pdev
->dev
, "Map tqp error, ret = %d.\n", ret
);
5227 ret
= hclge_mac_mdio_config(hdev
);
5229 dev_warn(&hdev
->pdev
->dev
,
5230 "mdio config fail ret=%d\n", ret
);
5234 ret
= hclge_mac_init(hdev
);
5236 dev_err(&pdev
->dev
, "Mac init error, ret = %d\n", ret
);
5239 ret
= hclge_buffer_alloc(hdev
);
5241 dev_err(&pdev
->dev
, "Buffer allocate fail, ret =%d\n", ret
);
5245 ret
= hclge_config_tso(hdev
, HCLGE_TSO_MSS_MIN
, HCLGE_TSO_MSS_MAX
);
5247 dev_err(&pdev
->dev
, "Enable tso fail, ret =%d\n", ret
);
5251 ret
= hclge_init_vlan_config(hdev
);
5253 dev_err(&pdev
->dev
, "VLAN init fail, ret =%d\n", ret
);
5257 ret
= hclge_tm_schd_init(hdev
);
5259 dev_err(&pdev
->dev
, "tm schd init fail, ret =%d\n", ret
);
5263 ret
= hclge_rss_init_hw(hdev
);
5265 dev_err(&pdev
->dev
, "Rss init fail, ret =%d\n", ret
);
5269 hclge_dcb_ops_set(hdev
);
5271 timer_setup(&hdev
->service_timer
, hclge_service_timer
, 0);
5272 INIT_WORK(&hdev
->service_task
, hclge_service_task
);
5273 INIT_WORK(&hdev
->rst_service_task
, hclge_reset_service_task
);
5274 INIT_WORK(&hdev
->mbx_service_task
, hclge_mailbox_service_task
);
5276 /* Enable MISC vector(vector0) */
5277 hclge_enable_vector(&hdev
->misc_vector
, true);
5279 set_bit(HCLGE_STATE_SERVICE_INITED
, &hdev
->state
);
5280 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
5281 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
);
5282 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
5283 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
5284 clear_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
);
5286 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME
);
5290 pci_release_regions(pdev
);
5292 pci_set_drvdata(pdev
, NULL
);
5297 static void hclge_stats_clear(struct hclge_dev
*hdev
)
5299 memset(&hdev
->hw_stats
, 0, sizeof(hdev
->hw_stats
));
5302 static int hclge_reset_ae_dev(struct hnae3_ae_dev
*ae_dev
)
5304 struct hclge_dev
*hdev
= ae_dev
->priv
;
5305 struct pci_dev
*pdev
= ae_dev
->pdev
;
5308 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
5310 hclge_stats_clear(hdev
);
5312 ret
= hclge_cmd_init(hdev
);
5314 dev_err(&pdev
->dev
, "Cmd queue init failed\n");
5318 ret
= hclge_get_cap(hdev
);
5320 dev_err(&pdev
->dev
, "get hw capability error, ret = %d.\n",
5325 ret
= hclge_configure(hdev
);
5327 dev_err(&pdev
->dev
, "Configure dev error, ret = %d.\n", ret
);
5331 ret
= hclge_map_tqp(hdev
);
5333 dev_err(&pdev
->dev
, "Map tqp error, ret = %d.\n", ret
);
5337 ret
= hclge_mac_init(hdev
);
5339 dev_err(&pdev
->dev
, "Mac init error, ret = %d\n", ret
);
5343 ret
= hclge_buffer_alloc(hdev
);
5345 dev_err(&pdev
->dev
, "Buffer allocate fail, ret =%d\n", ret
);
5349 ret
= hclge_config_tso(hdev
, HCLGE_TSO_MSS_MIN
, HCLGE_TSO_MSS_MAX
);
5351 dev_err(&pdev
->dev
, "Enable tso fail, ret =%d\n", ret
);
5355 ret
= hclge_init_vlan_config(hdev
);
5357 dev_err(&pdev
->dev
, "VLAN init fail, ret =%d\n", ret
);
5361 ret
= hclge_tm_schd_init(hdev
);
5363 dev_err(&pdev
->dev
, "tm schd init fail, ret =%d\n", ret
);
5367 ret
= hclge_rss_init_hw(hdev
);
5369 dev_err(&pdev
->dev
, "Rss init fail, ret =%d\n", ret
);
5373 /* Enable MISC vector(vector0) */
5374 hclge_enable_vector(&hdev
->misc_vector
, true);
5376 dev_info(&pdev
->dev
, "Reset done, %s driver initialization finished.\n",
5382 static void hclge_uninit_ae_dev(struct hnae3_ae_dev
*ae_dev
)
5384 struct hclge_dev
*hdev
= ae_dev
->priv
;
5385 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
5387 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
5389 if (IS_ENABLED(CONFIG_PCI_IOV
))
5390 hclge_disable_sriov(hdev
);
5392 if (hdev
->service_timer
.function
)
5393 del_timer_sync(&hdev
->service_timer
);
5394 if (hdev
->service_task
.func
)
5395 cancel_work_sync(&hdev
->service_task
);
5396 if (hdev
->rst_service_task
.func
)
5397 cancel_work_sync(&hdev
->rst_service_task
);
5398 if (hdev
->mbx_service_task
.func
)
5399 cancel_work_sync(&hdev
->mbx_service_task
);
5402 mdiobus_unregister(mac
->mdio_bus
);
5404 /* Disable MISC vector(vector0) */
5405 hclge_enable_vector(&hdev
->misc_vector
, false);
5406 hclge_destroy_cmd_queue(&hdev
->hw
);
5407 hclge_misc_irq_uninit(hdev
);
5408 hclge_pci_uninit(hdev
);
5409 ae_dev
->priv
= NULL
;
5412 static u32
hclge_get_max_channels(struct hnae3_handle
*handle
)
5414 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
5415 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5416 struct hclge_dev
*hdev
= vport
->back
;
5418 return min_t(u32
, hdev
->rss_size_max
* kinfo
->num_tc
, hdev
->num_tqps
);
5421 static void hclge_get_channels(struct hnae3_handle
*handle
,
5422 struct ethtool_channels
*ch
)
5424 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5426 ch
->max_combined
= hclge_get_max_channels(handle
);
5427 ch
->other_count
= 1;
5429 ch
->combined_count
= vport
->alloc_tqps
;
5432 static void hclge_get_tqps_and_rss_info(struct hnae3_handle
*handle
,
5433 u16
*free_tqps
, u16
*max_rss_size
)
5435 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5436 struct hclge_dev
*hdev
= vport
->back
;
5440 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
5441 if (!hdev
->htqp
[i
].alloced
)
5444 *free_tqps
= temp_tqps
;
5445 *max_rss_size
= hdev
->rss_size_max
;
5448 static void hclge_release_tqp(struct hclge_vport
*vport
)
5450 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
5451 struct hclge_dev
*hdev
= vport
->back
;
5454 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
5455 struct hclge_tqp
*tqp
=
5456 container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
5458 tqp
->q
.handle
= NULL
;
5459 tqp
->q
.tqp_index
= 0;
5460 tqp
->alloced
= false;
5463 devm_kfree(&hdev
->pdev
->dev
, kinfo
->tqp
);
5467 static int hclge_set_channels(struct hnae3_handle
*handle
, u32 new_tqps_num
)
5469 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5470 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
5471 struct hclge_dev
*hdev
= vport
->back
;
5472 int cur_rss_size
= kinfo
->rss_size
;
5473 int cur_tqps
= kinfo
->num_tqps
;
5474 u16 tc_offset
[HCLGE_MAX_TC_NUM
];
5475 u16 tc_valid
[HCLGE_MAX_TC_NUM
];
5476 u16 tc_size
[HCLGE_MAX_TC_NUM
];
5481 hclge_release_tqp(vport
);
5483 ret
= hclge_knic_setup(vport
, new_tqps_num
);
5485 dev_err(&hdev
->pdev
->dev
, "setup nic fail, ret =%d\n", ret
);
5489 ret
= hclge_map_tqp_to_vport(hdev
, vport
);
5491 dev_err(&hdev
->pdev
->dev
, "map vport tqp fail, ret =%d\n", ret
);
5495 ret
= hclge_tm_schd_init(hdev
);
5497 dev_err(&hdev
->pdev
->dev
, "tm schd init fail, ret =%d\n", ret
);
5501 roundup_size
= roundup_pow_of_two(kinfo
->rss_size
);
5502 roundup_size
= ilog2(roundup_size
);
5503 /* Set the RSS TC mode according to the new RSS size */
5504 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
5507 if (!(hdev
->hw_tc_map
& BIT(i
)))
5511 tc_size
[i
] = roundup_size
;
5512 tc_offset
[i
] = kinfo
->rss_size
* i
;
5514 ret
= hclge_set_rss_tc_mode(hdev
, tc_valid
, tc_size
, tc_offset
);
5518 /* Reinitializes the rss indirect table according to the new RSS size */
5519 rss_indir
= kcalloc(HCLGE_RSS_IND_TBL_SIZE
, sizeof(u32
), GFP_KERNEL
);
5523 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
5524 rss_indir
[i
] = i
% kinfo
->rss_size
;
5526 ret
= hclge_set_rss(handle
, rss_indir
, NULL
, 0);
5528 dev_err(&hdev
->pdev
->dev
, "set rss indir table fail, ret=%d\n",
5534 dev_info(&hdev
->pdev
->dev
,
5535 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
5536 cur_rss_size
, kinfo
->rss_size
,
5537 cur_tqps
, kinfo
->rss_size
* kinfo
->num_tc
);
5542 static const struct hnae3_ae_ops hclge_ops
= {
5543 .init_ae_dev
= hclge_init_ae_dev
,
5544 .uninit_ae_dev
= hclge_uninit_ae_dev
,
5545 .init_client_instance
= hclge_init_client_instance
,
5546 .uninit_client_instance
= hclge_uninit_client_instance
,
5547 .map_ring_to_vector
= hclge_map_ring_to_vector
,
5548 .unmap_ring_from_vector
= hclge_unmap_ring_frm_vector
,
5549 .get_vector
= hclge_get_vector
,
5550 .set_promisc_mode
= hclge_set_promisc_mode
,
5551 .set_loopback
= hclge_set_loopback
,
5552 .start
= hclge_ae_start
,
5553 .stop
= hclge_ae_stop
,
5554 .get_status
= hclge_get_status
,
5555 .get_ksettings_an_result
= hclge_get_ksettings_an_result
,
5556 .update_speed_duplex_h
= hclge_update_speed_duplex_h
,
5557 .cfg_mac_speed_dup_h
= hclge_cfg_mac_speed_dup_h
,
5558 .get_media_type
= hclge_get_media_type
,
5559 .get_rss_key_size
= hclge_get_rss_key_size
,
5560 .get_rss_indir_size
= hclge_get_rss_indir_size
,
5561 .get_rss
= hclge_get_rss
,
5562 .set_rss
= hclge_set_rss
,
5563 .set_rss_tuple
= hclge_set_rss_tuple
,
5564 .get_rss_tuple
= hclge_get_rss_tuple
,
5565 .get_tc_size
= hclge_get_tc_size
,
5566 .get_mac_addr
= hclge_get_mac_addr
,
5567 .set_mac_addr
= hclge_set_mac_addr
,
5568 .add_uc_addr
= hclge_add_uc_addr
,
5569 .rm_uc_addr
= hclge_rm_uc_addr
,
5570 .add_mc_addr
= hclge_add_mc_addr
,
5571 .rm_mc_addr
= hclge_rm_mc_addr
,
5572 .set_autoneg
= hclge_set_autoneg
,
5573 .get_autoneg
= hclge_get_autoneg
,
5574 .get_pauseparam
= hclge_get_pauseparam
,
5575 .set_pauseparam
= hclge_set_pauseparam
,
5576 .set_mtu
= hclge_set_mtu
,
5577 .reset_queue
= hclge_reset_tqp
,
5578 .get_stats
= hclge_get_stats
,
5579 .update_stats
= hclge_update_stats
,
5580 .get_strings
= hclge_get_strings
,
5581 .get_sset_count
= hclge_get_sset_count
,
5582 .get_fw_version
= hclge_get_fw_version
,
5583 .get_mdix_mode
= hclge_get_mdix_mode
,
5584 .enable_vlan_filter
= hclge_enable_vlan_filter
,
5585 .set_vlan_filter
= hclge_set_port_vlan_filter
,
5586 .set_vf_vlan_filter
= hclge_set_vf_vlan_filter
,
5587 .enable_hw_strip_rxvtag
= hclge_en_hw_strip_rxvtag
,
5588 .reset_event
= hclge_reset_event
,
5589 .get_tqps_and_rss_info
= hclge_get_tqps_and_rss_info
,
5590 .set_channels
= hclge_set_channels
,
5591 .get_channels
= hclge_get_channels
,
5592 .get_flowctrl_adv
= hclge_get_flowctrl_adv
,
5595 static struct hnae3_ae_algo ae_algo
= {
5598 .pdev_id_table
= ae_algo_pci_tbl
,
5601 static int hclge_init(void)
5603 pr_info("%s is initializing\n", HCLGE_NAME
);
5605 return hnae3_register_ae_algo(&ae_algo
);
5608 static void hclge_exit(void)
5610 hnae3_unregister_ae_algo(&ae_algo
);
5612 module_init(hclge_init
);
5613 module_exit(hclge_exit
);
5615 MODULE_LICENSE("GPL");
5616 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
5617 MODULE_DESCRIPTION("HCLGE Driver");
5618 MODULE_VERSION(HCLGE_MOD_VERSION
);