2 * Copyright (c) 2016-2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/acpi.h>
11 #include <linux/device.h>
12 #include <linux/etherdevice.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/pci.h>
19 #include <linux/platform_device.h>
21 #include "hclge_cmd.h"
22 #include "hclge_main.h"
23 #include "hclge_mdio.h"
27 #define HCLGE_NAME "hclge"
28 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
31 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
33 static int hclge_rss_init_hw(struct hclge_dev
*hdev
);
34 static int hclge_set_mta_filter_mode(struct hclge_dev
*hdev
,
35 enum hclge_mta_dmac_sel_type mta_mac_sel
,
37 static int hclge_init_vlan_config(struct hclge_dev
*hdev
);
39 static struct hnae3_ae_algo ae_algo
;
41 static const struct pci_device_id ae_algo_pci_tbl
[] = {
42 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
43 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
44 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
), 0},
45 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
), 0},
46 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
), 0},
47 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
), 0},
48 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
), 0},
49 /* Required last entry */
53 static const struct pci_device_id roce_pci_tbl
[] = {
54 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
), 0},
55 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
), 0},
56 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
), 0},
57 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
), 0},
58 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
), 0},
59 /* Required last entry */
63 static const char hns3_nic_test_strs
[][ETH_GSTRING_LEN
] = {
65 "Serdes Loopback test",
69 static const struct hclge_comm_stats_str g_all_64bit_stats_string
[] = {
70 {"igu_rx_oversize_pkt",
71 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt
)},
72 {"igu_rx_undersize_pkt",
73 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt
)},
74 {"igu_rx_out_all_pkt",
75 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt
)},
77 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt
)},
79 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt
)},
81 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt
)},
82 {"egu_tx_out_all_pkt",
83 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt
)},
85 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt
)},
87 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt
)},
89 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt
)},
90 {"ssu_ppp_mac_key_num",
91 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num
)},
92 {"ssu_ppp_host_key_num",
93 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num
)},
94 {"ppp_ssu_mac_rlt_num",
95 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num
)},
96 {"ppp_ssu_host_rlt_num",
97 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num
)},
99 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num
)},
101 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num
)},
103 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num
)},
105 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num
)}
108 static const struct hclge_comm_stats_str g_all_32bit_stats_string
[] = {
110 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt
)},
111 {"igu_rx_no_eof_pkt",
112 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt
)},
113 {"igu_rx_no_sof_pkt",
114 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt
)},
116 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt
)},
117 {"ssu_full_drop_num",
118 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num
)},
119 {"ssu_part_drop_num",
120 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num
)},
122 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num
)},
124 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num
)},
126 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num
)},
128 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt
)},
130 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt
)},
132 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt
)},
133 {"qcn_fb_invaild_cnt",
134 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt
)},
135 {"rx_packet_tc0_in_cnt",
136 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt
)},
137 {"rx_packet_tc1_in_cnt",
138 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt
)},
139 {"rx_packet_tc2_in_cnt",
140 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt
)},
141 {"rx_packet_tc3_in_cnt",
142 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt
)},
143 {"rx_packet_tc4_in_cnt",
144 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt
)},
145 {"rx_packet_tc5_in_cnt",
146 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt
)},
147 {"rx_packet_tc6_in_cnt",
148 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt
)},
149 {"rx_packet_tc7_in_cnt",
150 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt
)},
151 {"rx_packet_tc0_out_cnt",
152 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt
)},
153 {"rx_packet_tc1_out_cnt",
154 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt
)},
155 {"rx_packet_tc2_out_cnt",
156 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt
)},
157 {"rx_packet_tc3_out_cnt",
158 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt
)},
159 {"rx_packet_tc4_out_cnt",
160 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt
)},
161 {"rx_packet_tc5_out_cnt",
162 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt
)},
163 {"rx_packet_tc6_out_cnt",
164 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt
)},
165 {"rx_packet_tc7_out_cnt",
166 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt
)},
167 {"tx_packet_tc0_in_cnt",
168 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt
)},
169 {"tx_packet_tc1_in_cnt",
170 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt
)},
171 {"tx_packet_tc2_in_cnt",
172 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt
)},
173 {"tx_packet_tc3_in_cnt",
174 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt
)},
175 {"tx_packet_tc4_in_cnt",
176 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt
)},
177 {"tx_packet_tc5_in_cnt",
178 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt
)},
179 {"tx_packet_tc6_in_cnt",
180 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt
)},
181 {"tx_packet_tc7_in_cnt",
182 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt
)},
183 {"tx_packet_tc0_out_cnt",
184 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt
)},
185 {"tx_packet_tc1_out_cnt",
186 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt
)},
187 {"tx_packet_tc2_out_cnt",
188 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt
)},
189 {"tx_packet_tc3_out_cnt",
190 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt
)},
191 {"tx_packet_tc4_out_cnt",
192 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt
)},
193 {"tx_packet_tc5_out_cnt",
194 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt
)},
195 {"tx_packet_tc6_out_cnt",
196 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt
)},
197 {"tx_packet_tc7_out_cnt",
198 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt
)},
199 {"pkt_curr_buf_tc0_cnt",
200 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt
)},
201 {"pkt_curr_buf_tc1_cnt",
202 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt
)},
203 {"pkt_curr_buf_tc2_cnt",
204 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt
)},
205 {"pkt_curr_buf_tc3_cnt",
206 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt
)},
207 {"pkt_curr_buf_tc4_cnt",
208 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt
)},
209 {"pkt_curr_buf_tc5_cnt",
210 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt
)},
211 {"pkt_curr_buf_tc6_cnt",
212 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt
)},
213 {"pkt_curr_buf_tc7_cnt",
214 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt
)},
216 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num
)},
217 {"lo_pri_unicast_rlt_drop_num",
218 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num
)},
219 {"hi_pri_multicast_rlt_drop_num",
220 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num
)},
221 {"lo_pri_multicast_rlt_drop_num",
222 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num
)},
223 {"rx_oq_drop_pkt_cnt",
224 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt
)},
225 {"tx_oq_drop_pkt_cnt",
226 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt
)},
227 {"nic_l2_err_drop_pkt_cnt",
228 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt
)},
229 {"roc_l2_err_drop_pkt_cnt",
230 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt
)}
233 static const struct hclge_comm_stats_str g_mac_stats_string
[] = {
234 {"mac_tx_mac_pause_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num
)},
236 {"mac_rx_mac_pause_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num
)},
238 {"mac_tx_pfc_pri0_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num
)},
240 {"mac_tx_pfc_pri1_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num
)},
242 {"mac_tx_pfc_pri2_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num
)},
244 {"mac_tx_pfc_pri3_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num
)},
246 {"mac_tx_pfc_pri4_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num
)},
248 {"mac_tx_pfc_pri5_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num
)},
250 {"mac_tx_pfc_pri6_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num
)},
252 {"mac_tx_pfc_pri7_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num
)},
254 {"mac_rx_pfc_pri0_pkt_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num
)},
256 {"mac_rx_pfc_pri1_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num
)},
258 {"mac_rx_pfc_pri2_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num
)},
260 {"mac_rx_pfc_pri3_pkt_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num
)},
262 {"mac_rx_pfc_pri4_pkt_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num
)},
264 {"mac_rx_pfc_pri5_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num
)},
266 {"mac_rx_pfc_pri6_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num
)},
268 {"mac_rx_pfc_pri7_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num
)},
270 {"mac_tx_total_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num
)},
272 {"mac_tx_total_oct_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num
)},
274 {"mac_tx_good_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num
)},
276 {"mac_tx_bad_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num
)},
278 {"mac_tx_good_oct_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num
)},
280 {"mac_tx_bad_oct_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num
)},
282 {"mac_tx_uni_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num
)},
284 {"mac_tx_multi_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num
)},
286 {"mac_tx_broad_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num
)},
288 {"mac_tx_undersize_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num
)},
290 {"mac_tx_overrsize_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num
)},
292 {"mac_tx_64_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num
)},
294 {"mac_tx_65_127_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num
)},
296 {"mac_tx_128_255_oct_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num
)},
298 {"mac_tx_256_511_oct_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num
)},
300 {"mac_tx_512_1023_oct_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num
)},
302 {"mac_tx_1024_1518_oct_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num
)},
304 {"mac_tx_1519_max_oct_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num
)},
306 {"mac_rx_total_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num
)},
308 {"mac_rx_total_oct_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num
)},
310 {"mac_rx_good_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num
)},
312 {"mac_rx_bad_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num
)},
314 {"mac_rx_good_oct_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num
)},
316 {"mac_rx_bad_oct_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num
)},
318 {"mac_rx_uni_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num
)},
320 {"mac_rx_multi_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num
)},
322 {"mac_rx_broad_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num
)},
324 {"mac_rx_undersize_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num
)},
326 {"mac_rx_overrsize_pkt_num",
327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num
)},
328 {"mac_rx_64_oct_pkt_num",
329 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num
)},
330 {"mac_rx_65_127_oct_pkt_num",
331 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num
)},
332 {"mac_rx_128_255_oct_pkt_num",
333 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num
)},
334 {"mac_rx_256_511_oct_pkt_num",
335 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num
)},
336 {"mac_rx_512_1023_oct_pkt_num",
337 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num
)},
338 {"mac_rx_1024_1518_oct_pkt_num",
339 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num
)},
340 {"mac_rx_1519_max_oct_pkt_num",
341 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num
)},
343 {"mac_trans_fragment_pkt_num",
344 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num
)},
345 {"mac_trans_undermin_pkt_num",
346 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num
)},
347 {"mac_trans_jabber_pkt_num",
348 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num
)},
349 {"mac_trans_err_all_pkt_num",
350 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num
)},
351 {"mac_trans_from_app_good_pkt_num",
352 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num
)},
353 {"mac_trans_from_app_bad_pkt_num",
354 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num
)},
355 {"mac_rcv_fragment_pkt_num",
356 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num
)},
357 {"mac_rcv_undermin_pkt_num",
358 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num
)},
359 {"mac_rcv_jabber_pkt_num",
360 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num
)},
361 {"mac_rcv_fcs_err_pkt_num",
362 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num
)},
363 {"mac_rcv_send_app_good_pkt_num",
364 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num
)},
365 {"mac_rcv_send_app_bad_pkt_num",
366 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num
)}
369 static int hclge_64_bit_update_stats(struct hclge_dev
*hdev
)
371 #define HCLGE_64_BIT_CMD_NUM 5
372 #define HCLGE_64_BIT_RTN_DATANUM 4
373 u64
*data
= (u64
*)(&hdev
->hw_stats
.all_64_bit_stats
);
374 struct hclge_desc desc
[HCLGE_64_BIT_CMD_NUM
];
379 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_64_BIT
, true);
380 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_64_BIT_CMD_NUM
);
382 dev_err(&hdev
->pdev
->dev
,
383 "Get 64 bit pkt stats fail, status = %d.\n", ret
);
387 for (i
= 0; i
< HCLGE_64_BIT_CMD_NUM
; i
++) {
388 if (unlikely(i
== 0)) {
389 desc_data
= (u64
*)(&desc
[i
].data
[0]);
390 n
= HCLGE_64_BIT_RTN_DATANUM
- 1;
392 desc_data
= (u64
*)(&desc
[i
]);
393 n
= HCLGE_64_BIT_RTN_DATANUM
;
395 for (k
= 0; k
< n
; k
++) {
396 *data
++ += cpu_to_le64(*desc_data
);
404 static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats
*stats
)
406 stats
->pkt_curr_buf_cnt
= 0;
407 stats
->pkt_curr_buf_tc0_cnt
= 0;
408 stats
->pkt_curr_buf_tc1_cnt
= 0;
409 stats
->pkt_curr_buf_tc2_cnt
= 0;
410 stats
->pkt_curr_buf_tc3_cnt
= 0;
411 stats
->pkt_curr_buf_tc4_cnt
= 0;
412 stats
->pkt_curr_buf_tc5_cnt
= 0;
413 stats
->pkt_curr_buf_tc6_cnt
= 0;
414 stats
->pkt_curr_buf_tc7_cnt
= 0;
417 static int hclge_32_bit_update_stats(struct hclge_dev
*hdev
)
419 #define HCLGE_32_BIT_CMD_NUM 8
420 #define HCLGE_32_BIT_RTN_DATANUM 8
422 struct hclge_desc desc
[HCLGE_32_BIT_CMD_NUM
];
423 struct hclge_32_bit_stats
*all_32_bit_stats
;
429 all_32_bit_stats
= &hdev
->hw_stats
.all_32_bit_stats
;
430 data
= (u64
*)(&all_32_bit_stats
->egu_tx_1588_pkt
);
432 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_32_BIT
, true);
433 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_32_BIT_CMD_NUM
);
435 dev_err(&hdev
->pdev
->dev
,
436 "Get 32 bit pkt stats fail, status = %d.\n", ret
);
441 hclge_reset_partial_32bit_counter(all_32_bit_stats
);
442 for (i
= 0; i
< HCLGE_32_BIT_CMD_NUM
; i
++) {
443 if (unlikely(i
== 0)) {
444 all_32_bit_stats
->igu_rx_err_pkt
+=
445 cpu_to_le32(desc
[i
].data
[0]);
446 all_32_bit_stats
->igu_rx_no_eof_pkt
+=
447 cpu_to_le32(desc
[i
].data
[1] & 0xffff);
448 all_32_bit_stats
->igu_rx_no_sof_pkt
+=
449 cpu_to_le32((desc
[i
].data
[1] >> 16) & 0xffff);
451 desc_data
= (u32
*)(&desc
[i
].data
[2]);
452 n
= HCLGE_32_BIT_RTN_DATANUM
- 4;
454 desc_data
= (u32
*)(&desc
[i
]);
455 n
= HCLGE_32_BIT_RTN_DATANUM
;
457 for (k
= 0; k
< n
; k
++) {
458 *data
++ += cpu_to_le32(*desc_data
);
466 static int hclge_mac_update_stats(struct hclge_dev
*hdev
)
468 #define HCLGE_MAC_CMD_NUM 17
469 #define HCLGE_RTN_DATA_NUM 4
471 u64
*data
= (u64
*)(&hdev
->hw_stats
.mac_stats
);
472 struct hclge_desc desc
[HCLGE_MAC_CMD_NUM
];
477 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_MAC
, true);
478 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_MAC_CMD_NUM
);
480 dev_err(&hdev
->pdev
->dev
,
481 "Get MAC pkt stats fail, status = %d.\n", ret
);
486 for (i
= 0; i
< HCLGE_MAC_CMD_NUM
; i
++) {
487 if (unlikely(i
== 0)) {
488 desc_data
= (u64
*)(&desc
[i
].data
[0]);
489 n
= HCLGE_RTN_DATA_NUM
- 2;
491 desc_data
= (u64
*)(&desc
[i
]);
492 n
= HCLGE_RTN_DATA_NUM
;
494 for (k
= 0; k
< n
; k
++) {
495 *data
++ += cpu_to_le64(*desc_data
);
503 static int hclge_tqps_update_stats(struct hnae3_handle
*handle
)
505 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
506 struct hclge_vport
*vport
= hclge_get_vport(handle
);
507 struct hclge_dev
*hdev
= vport
->back
;
508 struct hnae3_queue
*queue
;
509 struct hclge_desc desc
[1];
510 struct hclge_tqp
*tqp
;
513 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
514 queue
= handle
->kinfo
.tqp
[i
];
515 tqp
= container_of(queue
, struct hclge_tqp
, q
);
516 /* command : HCLGE_OPC_QUERY_IGU_STAT */
517 hclge_cmd_setup_basic_desc(&desc
[0],
518 HCLGE_OPC_QUERY_RX_STATUS
,
521 desc
[0].data
[0] = (tqp
->index
& 0x1ff);
522 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
524 dev_err(&hdev
->pdev
->dev
,
525 "Query tqp stat fail, status = %d,queue = %d\n",
529 tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
+=
530 cpu_to_le32(desc
[0].data
[4]);
533 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
534 queue
= handle
->kinfo
.tqp
[i
];
535 tqp
= container_of(queue
, struct hclge_tqp
, q
);
536 /* command : HCLGE_OPC_QUERY_IGU_STAT */
537 hclge_cmd_setup_basic_desc(&desc
[0],
538 HCLGE_OPC_QUERY_TX_STATUS
,
541 desc
[0].data
[0] = (tqp
->index
& 0x1ff);
542 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
544 dev_err(&hdev
->pdev
->dev
,
545 "Query tqp stat fail, status = %d,queue = %d\n",
549 tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
+=
550 cpu_to_le32(desc
[0].data
[4]);
556 static u64
*hclge_tqps_get_stats(struct hnae3_handle
*handle
, u64
*data
)
558 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
559 struct hclge_tqp
*tqp
;
563 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
564 tqp
= container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
565 *buff
++ = cpu_to_le64(tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
);
568 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
569 tqp
= container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
570 *buff
++ = cpu_to_le64(tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
);
576 static int hclge_tqps_get_sset_count(struct hnae3_handle
*handle
, int stringset
)
578 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
580 return kinfo
->num_tqps
* (2);
583 static u8
*hclge_tqps_get_strings(struct hnae3_handle
*handle
, u8
*data
)
585 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
589 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
590 struct hclge_tqp
*tqp
= container_of(handle
->kinfo
.tqp
[i
],
591 struct hclge_tqp
, q
);
592 snprintf(buff
, ETH_GSTRING_LEN
, "rcb_q%d_tx_pktnum_rcd",
594 buff
= buff
+ ETH_GSTRING_LEN
;
597 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
598 struct hclge_tqp
*tqp
= container_of(kinfo
->tqp
[i
],
599 struct hclge_tqp
, q
);
600 snprintf(buff
, ETH_GSTRING_LEN
, "rcb_q%d_rx_pktnum_rcd",
602 buff
= buff
+ ETH_GSTRING_LEN
;
608 static u64
*hclge_comm_get_stats(void *comm_stats
,
609 const struct hclge_comm_stats_str strs
[],
615 for (i
= 0; i
< size
; i
++)
616 buf
[i
] = HCLGE_STATS_READ(comm_stats
, strs
[i
].offset
);
621 static u8
*hclge_comm_get_strings(u32 stringset
,
622 const struct hclge_comm_stats_str strs
[],
625 char *buff
= (char *)data
;
628 if (stringset
!= ETH_SS_STATS
)
631 for (i
= 0; i
< size
; i
++) {
632 snprintf(buff
, ETH_GSTRING_LEN
,
634 buff
= buff
+ ETH_GSTRING_LEN
;
640 static void hclge_update_netstat(struct hclge_hw_stats
*hw_stats
,
641 struct net_device_stats
*net_stats
)
643 net_stats
->tx_dropped
= 0;
644 net_stats
->rx_dropped
= hw_stats
->all_32_bit_stats
.ssu_full_drop_num
;
645 net_stats
->rx_dropped
+= hw_stats
->all_32_bit_stats
.ppp_key_drop_num
;
646 net_stats
->rx_dropped
+= hw_stats
->all_32_bit_stats
.ssu_key_drop_num
;
648 net_stats
->rx_errors
= hw_stats
->mac_stats
.mac_rx_overrsize_pkt_num
;
649 net_stats
->rx_errors
+= hw_stats
->mac_stats
.mac_rx_undersize_pkt_num
;
650 net_stats
->rx_errors
+= hw_stats
->all_32_bit_stats
.igu_rx_err_pkt
;
651 net_stats
->rx_errors
+= hw_stats
->all_32_bit_stats
.igu_rx_no_eof_pkt
;
652 net_stats
->rx_errors
+= hw_stats
->all_32_bit_stats
.igu_rx_no_sof_pkt
;
653 net_stats
->rx_errors
+= hw_stats
->mac_stats
.mac_rcv_fcs_err_pkt_num
;
655 net_stats
->multicast
= hw_stats
->mac_stats
.mac_tx_multi_pkt_num
;
656 net_stats
->multicast
+= hw_stats
->mac_stats
.mac_rx_multi_pkt_num
;
658 net_stats
->rx_crc_errors
= hw_stats
->mac_stats
.mac_rcv_fcs_err_pkt_num
;
659 net_stats
->rx_length_errors
=
660 hw_stats
->mac_stats
.mac_rx_undersize_pkt_num
;
661 net_stats
->rx_length_errors
+=
662 hw_stats
->mac_stats
.mac_rx_overrsize_pkt_num
;
663 net_stats
->rx_over_errors
=
664 hw_stats
->mac_stats
.mac_rx_overrsize_pkt_num
;
667 static void hclge_update_stats_for_all(struct hclge_dev
*hdev
)
669 struct hnae3_handle
*handle
;
672 handle
= &hdev
->vport
[0].nic
;
673 if (handle
->client
) {
674 status
= hclge_tqps_update_stats(handle
);
676 dev_err(&hdev
->pdev
->dev
,
677 "Update TQPS stats fail, status = %d.\n",
682 status
= hclge_mac_update_stats(hdev
);
684 dev_err(&hdev
->pdev
->dev
,
685 "Update MAC stats fail, status = %d.\n", status
);
687 status
= hclge_32_bit_update_stats(hdev
);
689 dev_err(&hdev
->pdev
->dev
,
690 "Update 32 bit stats fail, status = %d.\n",
693 hclge_update_netstat(&hdev
->hw_stats
, &handle
->kinfo
.netdev
->stats
);
696 static void hclge_update_stats(struct hnae3_handle
*handle
,
697 struct net_device_stats
*net_stats
)
699 struct hclge_vport
*vport
= hclge_get_vport(handle
);
700 struct hclge_dev
*hdev
= vport
->back
;
701 struct hclge_hw_stats
*hw_stats
= &hdev
->hw_stats
;
704 status
= hclge_mac_update_stats(hdev
);
706 dev_err(&hdev
->pdev
->dev
,
707 "Update MAC stats fail, status = %d.\n",
710 status
= hclge_32_bit_update_stats(hdev
);
712 dev_err(&hdev
->pdev
->dev
,
713 "Update 32 bit stats fail, status = %d.\n",
716 status
= hclge_64_bit_update_stats(hdev
);
718 dev_err(&hdev
->pdev
->dev
,
719 "Update 64 bit stats fail, status = %d.\n",
722 status
= hclge_tqps_update_stats(handle
);
724 dev_err(&hdev
->pdev
->dev
,
725 "Update TQPS stats fail, status = %d.\n",
728 hclge_update_netstat(hw_stats
, net_stats
);
731 static int hclge_get_sset_count(struct hnae3_handle
*handle
, int stringset
)
733 #define HCLGE_LOOPBACK_TEST_FLAGS 0x7
735 struct hclge_vport
*vport
= hclge_get_vport(handle
);
736 struct hclge_dev
*hdev
= vport
->back
;
739 /* Loopback test support rules:
740 * mac: only GE mode support
741 * serdes: all mac mode will support include GE/XGE/LGE/CGE
742 * phy: only support when phy device exist on board
744 if (stringset
== ETH_SS_TEST
) {
745 /* clear loopback bit flags at first */
746 handle
->flags
= (handle
->flags
& (~HCLGE_LOOPBACK_TEST_FLAGS
));
747 if (hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_10M
||
748 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_100M
||
749 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_1G
) {
751 handle
->flags
|= HNAE3_SUPPORT_MAC_LOOPBACK
;
755 } else if (stringset
== ETH_SS_STATS
) {
756 count
= ARRAY_SIZE(g_mac_stats_string
) +
757 ARRAY_SIZE(g_all_32bit_stats_string
) +
758 ARRAY_SIZE(g_all_64bit_stats_string
) +
759 hclge_tqps_get_sset_count(handle
, stringset
);
765 static void hclge_get_strings(struct hnae3_handle
*handle
,
769 u8
*p
= (char *)data
;
772 if (stringset
== ETH_SS_STATS
) {
773 size
= ARRAY_SIZE(g_mac_stats_string
);
774 p
= hclge_comm_get_strings(stringset
,
778 size
= ARRAY_SIZE(g_all_32bit_stats_string
);
779 p
= hclge_comm_get_strings(stringset
,
780 g_all_32bit_stats_string
,
783 size
= ARRAY_SIZE(g_all_64bit_stats_string
);
784 p
= hclge_comm_get_strings(stringset
,
785 g_all_64bit_stats_string
,
788 p
= hclge_tqps_get_strings(handle
, p
);
789 } else if (stringset
== ETH_SS_TEST
) {
790 if (handle
->flags
& HNAE3_SUPPORT_MAC_LOOPBACK
) {
792 hns3_nic_test_strs
[HNAE3_MAC_INTER_LOOP_MAC
],
794 p
+= ETH_GSTRING_LEN
;
796 if (handle
->flags
& HNAE3_SUPPORT_SERDES_LOOPBACK
) {
798 hns3_nic_test_strs
[HNAE3_MAC_INTER_LOOP_SERDES
],
800 p
+= ETH_GSTRING_LEN
;
802 if (handle
->flags
& HNAE3_SUPPORT_PHY_LOOPBACK
) {
804 hns3_nic_test_strs
[HNAE3_MAC_INTER_LOOP_PHY
],
806 p
+= ETH_GSTRING_LEN
;
811 static void hclge_get_stats(struct hnae3_handle
*handle
, u64
*data
)
813 struct hclge_vport
*vport
= hclge_get_vport(handle
);
814 struct hclge_dev
*hdev
= vport
->back
;
817 p
= hclge_comm_get_stats(&hdev
->hw_stats
.mac_stats
,
819 ARRAY_SIZE(g_mac_stats_string
),
821 p
= hclge_comm_get_stats(&hdev
->hw_stats
.all_32_bit_stats
,
822 g_all_32bit_stats_string
,
823 ARRAY_SIZE(g_all_32bit_stats_string
),
825 p
= hclge_comm_get_stats(&hdev
->hw_stats
.all_64_bit_stats
,
826 g_all_64bit_stats_string
,
827 ARRAY_SIZE(g_all_64bit_stats_string
),
829 p
= hclge_tqps_get_stats(handle
, p
);
832 static int hclge_parse_func_status(struct hclge_dev
*hdev
,
833 struct hclge_func_status
*status
)
835 if (!(status
->pf_state
& HCLGE_PF_STATE_DONE
))
838 /* Set the pf to main pf */
839 if (status
->pf_state
& HCLGE_PF_STATE_MAIN
)
840 hdev
->flag
|= HCLGE_FLAG_MAIN
;
842 hdev
->flag
&= ~HCLGE_FLAG_MAIN
;
844 hdev
->num_req_vfs
= status
->vf_num
/ status
->pf_num
;
848 static int hclge_query_function_status(struct hclge_dev
*hdev
)
850 struct hclge_func_status
*req
;
851 struct hclge_desc desc
;
855 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_FUNC_STATUS
, true);
856 req
= (struct hclge_func_status
*)desc
.data
;
859 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
861 dev_err(&hdev
->pdev
->dev
,
862 "query function status failed %d.\n",
868 /* Check pf reset is done */
871 usleep_range(1000, 2000);
872 } while (timeout
++ < 5);
874 ret
= hclge_parse_func_status(hdev
, req
);
879 static int hclge_query_pf_resource(struct hclge_dev
*hdev
)
881 struct hclge_pf_res
*req
;
882 struct hclge_desc desc
;
885 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_PF_RSRC
, true);
886 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
888 dev_err(&hdev
->pdev
->dev
,
889 "query pf resource failed %d.\n", ret
);
893 req
= (struct hclge_pf_res
*)desc
.data
;
894 hdev
->num_tqps
= __le16_to_cpu(req
->tqp_num
);
895 hdev
->pkt_buf_size
= __le16_to_cpu(req
->buf_size
) << HCLGE_BUF_UNIT_S
;
897 if (hnae_get_bit(hdev
->ae_dev
->flag
, HNAE_DEV_SUPPORT_ROCE_B
)) {
898 hdev
->num_roce_msix
=
899 hnae_get_field(__le16_to_cpu(req
->pf_intr_vector_number
),
900 HCLGE_PF_VEC_NUM_M
, HCLGE_PF_VEC_NUM_S
);
902 /* PF should have NIC vectors and Roce vectors,
903 * NIC vectors are queued before Roce vectors.
905 hdev
->num_msi
= hdev
->num_roce_msix
+ HCLGE_ROCE_VECTOR_OFFSET
;
908 hnae_get_field(__le16_to_cpu(req
->pf_intr_vector_number
),
909 HCLGE_PF_VEC_NUM_M
, HCLGE_PF_VEC_NUM_S
);
915 static int hclge_parse_speed(int speed_cmd
, int *speed
)
919 *speed
= HCLGE_MAC_SPEED_10M
;
922 *speed
= HCLGE_MAC_SPEED_100M
;
925 *speed
= HCLGE_MAC_SPEED_1G
;
928 *speed
= HCLGE_MAC_SPEED_10G
;
931 *speed
= HCLGE_MAC_SPEED_25G
;
934 *speed
= HCLGE_MAC_SPEED_40G
;
937 *speed
= HCLGE_MAC_SPEED_50G
;
940 *speed
= HCLGE_MAC_SPEED_100G
;
949 static void hclge_parse_cfg(struct hclge_cfg
*cfg
, struct hclge_desc
*desc
)
951 struct hclge_cfg_param
*req
;
952 u64 mac_addr_tmp_high
;
956 req
= (struct hclge_cfg_param
*)desc
[0].data
;
958 /* get the configuration */
959 cfg
->vmdq_vport_num
= hnae_get_field(__le32_to_cpu(req
->param
[0]),
962 cfg
->tc_num
= hnae_get_field(__le32_to_cpu(req
->param
[0]),
963 HCLGE_CFG_TC_NUM_M
, HCLGE_CFG_TC_NUM_S
);
964 cfg
->tqp_desc_num
= hnae_get_field(__le32_to_cpu(req
->param
[0]),
965 HCLGE_CFG_TQP_DESC_N_M
,
966 HCLGE_CFG_TQP_DESC_N_S
);
968 cfg
->phy_addr
= hnae_get_field(__le32_to_cpu(req
->param
[1]),
969 HCLGE_CFG_PHY_ADDR_M
,
970 HCLGE_CFG_PHY_ADDR_S
);
971 cfg
->media_type
= hnae_get_field(__le32_to_cpu(req
->param
[1]),
972 HCLGE_CFG_MEDIA_TP_M
,
973 HCLGE_CFG_MEDIA_TP_S
);
974 cfg
->rx_buf_len
= hnae_get_field(__le32_to_cpu(req
->param
[1]),
975 HCLGE_CFG_RX_BUF_LEN_M
,
976 HCLGE_CFG_RX_BUF_LEN_S
);
977 /* get mac_address */
978 mac_addr_tmp
= __le32_to_cpu(req
->param
[2]);
979 mac_addr_tmp_high
= hnae_get_field(__le32_to_cpu(req
->param
[3]),
980 HCLGE_CFG_MAC_ADDR_H_M
,
981 HCLGE_CFG_MAC_ADDR_H_S
);
983 mac_addr_tmp
|= (mac_addr_tmp_high
<< 31) << 1;
985 cfg
->default_speed
= hnae_get_field(__le32_to_cpu(req
->param
[3]),
986 HCLGE_CFG_DEFAULT_SPEED_M
,
987 HCLGE_CFG_DEFAULT_SPEED_S
);
988 for (i
= 0; i
< ETH_ALEN
; i
++)
989 cfg
->mac_addr
[i
] = (mac_addr_tmp
>> (8 * i
)) & 0xff;
991 req
= (struct hclge_cfg_param
*)desc
[1].data
;
992 cfg
->numa_node_map
= __le32_to_cpu(req
->param
[0]);
995 /* hclge_get_cfg: query the static parameter from flash
996 * @hdev: pointer to struct hclge_dev
997 * @hcfg: the config structure to be getted
999 static int hclge_get_cfg(struct hclge_dev
*hdev
, struct hclge_cfg
*hcfg
)
1001 struct hclge_desc desc
[HCLGE_PF_CFG_DESC_NUM
];
1002 struct hclge_cfg_param
*req
;
1005 for (i
= 0; i
< HCLGE_PF_CFG_DESC_NUM
; i
++) {
1006 req
= (struct hclge_cfg_param
*)desc
[i
].data
;
1007 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_GET_CFG_PARAM
,
1009 hnae_set_field(req
->offset
, HCLGE_CFG_OFFSET_M
,
1010 HCLGE_CFG_OFFSET_S
, i
* HCLGE_CFG_RD_LEN_BYTES
);
1011 /* Len should be united by 4 bytes when send to hardware */
1012 hnae_set_field(req
->offset
, HCLGE_CFG_RD_LEN_M
,
1014 HCLGE_CFG_RD_LEN_BYTES
/ HCLGE_CFG_RD_LEN_UNIT
);
1015 req
->offset
= cpu_to_le32(req
->offset
);
1018 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_PF_CFG_DESC_NUM
);
1020 dev_err(&hdev
->pdev
->dev
,
1021 "get config failed %d.\n", ret
);
1025 hclge_parse_cfg(hcfg
, desc
);
1029 static int hclge_get_cap(struct hclge_dev
*hdev
)
1033 ret
= hclge_query_function_status(hdev
);
1035 dev_err(&hdev
->pdev
->dev
,
1036 "query function status error %d.\n", ret
);
1040 /* get pf resource */
1041 ret
= hclge_query_pf_resource(hdev
);
1043 dev_err(&hdev
->pdev
->dev
,
1044 "query pf resource error %d.\n", ret
);
1051 static int hclge_configure(struct hclge_dev
*hdev
)
1053 struct hclge_cfg cfg
;
1056 ret
= hclge_get_cfg(hdev
, &cfg
);
1058 dev_err(&hdev
->pdev
->dev
, "get mac mode error %d.\n", ret
);
1062 hdev
->num_vmdq_vport
= cfg
.vmdq_vport_num
;
1063 hdev
->base_tqp_pid
= 0;
1064 hdev
->rss_size_max
= 1;
1065 hdev
->rx_buf_len
= cfg
.rx_buf_len
;
1066 for (i
= 0; i
< ETH_ALEN
; i
++)
1067 hdev
->hw
.mac
.mac_addr
[i
] = cfg
.mac_addr
[i
];
1068 hdev
->hw
.mac
.media_type
= cfg
.media_type
;
1069 hdev
->num_desc
= cfg
.tqp_desc_num
;
1070 hdev
->tm_info
.num_pg
= 1;
1071 hdev
->tm_info
.num_tc
= cfg
.tc_num
;
1072 hdev
->tm_info
.hw_pfc_map
= 0;
1074 ret
= hclge_parse_speed(cfg
.default_speed
, &hdev
->hw
.mac
.speed
);
1076 dev_err(&hdev
->pdev
->dev
, "Get wrong speed ret=%d.\n", ret
);
1080 if ((hdev
->tm_info
.num_tc
> HNAE3_MAX_TC
) ||
1081 (hdev
->tm_info
.num_tc
< 1)) {
1082 dev_warn(&hdev
->pdev
->dev
, "TC num = %d.\n",
1083 hdev
->tm_info
.num_tc
);
1084 hdev
->tm_info
.num_tc
= 1;
1087 /* Currently not support uncontiuous tc */
1088 for (i
= 0; i
< cfg
.tc_num
; i
++)
1089 hnae_set_bit(hdev
->hw_tc_map
, i
, 1);
1091 if (!hdev
->num_vmdq_vport
&& !hdev
->num_req_vfs
)
1092 hdev
->tx_sch_mode
= HCLGE_FLAG_TC_BASE_SCH_MODE
;
1094 hdev
->tx_sch_mode
= HCLGE_FLAG_VNET_BASE_SCH_MODE
;
1099 static int hclge_config_tso(struct hclge_dev
*hdev
, int tso_mss_min
,
1102 struct hclge_cfg_tso_status
*req
;
1103 struct hclge_desc desc
;
1105 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TSO_GENERIC_CONFIG
, false);
1107 req
= (struct hclge_cfg_tso_status
*)desc
.data
;
1108 hnae_set_field(req
->tso_mss_min
, HCLGE_TSO_MSS_MIN_M
,
1109 HCLGE_TSO_MSS_MIN_S
, tso_mss_min
);
1110 hnae_set_field(req
->tso_mss_max
, HCLGE_TSO_MSS_MIN_M
,
1111 HCLGE_TSO_MSS_MIN_S
, tso_mss_max
);
1113 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1116 static int hclge_alloc_tqps(struct hclge_dev
*hdev
)
1118 struct hclge_tqp
*tqp
;
1121 hdev
->htqp
= devm_kcalloc(&hdev
->pdev
->dev
, hdev
->num_tqps
,
1122 sizeof(struct hclge_tqp
), GFP_KERNEL
);
1128 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
1129 tqp
->dev
= &hdev
->pdev
->dev
;
1132 tqp
->q
.ae_algo
= &ae_algo
;
1133 tqp
->q
.buf_size
= hdev
->rx_buf_len
;
1134 tqp
->q
.desc_num
= hdev
->num_desc
;
1135 tqp
->q
.io_base
= hdev
->hw
.io_base
+ HCLGE_TQP_REG_OFFSET
+
1136 i
* HCLGE_TQP_REG_SIZE
;
1144 static int hclge_map_tqps_to_func(struct hclge_dev
*hdev
, u16 func_id
,
1145 u16 tqp_pid
, u16 tqp_vid
, bool is_pf
)
1147 struct hclge_tqp_map
*req
;
1148 struct hclge_desc desc
;
1151 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_SET_TQP_MAP
, false);
1153 req
= (struct hclge_tqp_map
*)desc
.data
;
1154 req
->tqp_id
= cpu_to_le16(tqp_pid
);
1155 req
->tqp_vf
= cpu_to_le16(func_id
);
1156 req
->tqp_flag
= !is_pf
<< HCLGE_TQP_MAP_TYPE_B
|
1157 1 << HCLGE_TQP_MAP_EN_B
;
1158 req
->tqp_vid
= cpu_to_le16(tqp_vid
);
1160 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1162 dev_err(&hdev
->pdev
->dev
, "TQP map failed %d.\n",
1170 static int hclge_assign_tqp(struct hclge_vport
*vport
,
1171 struct hnae3_queue
**tqp
, u16 num_tqps
)
1173 struct hclge_dev
*hdev
= vport
->back
;
1174 int i
, alloced
, func_id
, ret
;
1177 func_id
= vport
->vport_id
;
1178 is_pf
= (vport
->vport_id
== 0) ? true : false;
1180 for (i
= 0, alloced
= 0; i
< hdev
->num_tqps
&&
1181 alloced
< num_tqps
; i
++) {
1182 if (!hdev
->htqp
[i
].alloced
) {
1183 hdev
->htqp
[i
].q
.handle
= &vport
->nic
;
1184 hdev
->htqp
[i
].q
.tqp_index
= alloced
;
1185 tqp
[alloced
] = &hdev
->htqp
[i
].q
;
1186 hdev
->htqp
[i
].alloced
= true;
1187 ret
= hclge_map_tqps_to_func(hdev
, func_id
,
1188 hdev
->htqp
[i
].index
,
1196 vport
->alloc_tqps
= num_tqps
;
1201 static int hclge_knic_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1203 struct hnae3_handle
*nic
= &vport
->nic
;
1204 struct hnae3_knic_private_info
*kinfo
= &nic
->kinfo
;
1205 struct hclge_dev
*hdev
= vport
->back
;
1208 kinfo
->num_desc
= hdev
->num_desc
;
1209 kinfo
->rx_buf_len
= hdev
->rx_buf_len
;
1210 kinfo
->num_tc
= min_t(u16
, num_tqps
, hdev
->tm_info
.num_tc
);
1212 = min_t(u16
, hdev
->rss_size_max
, num_tqps
/ kinfo
->num_tc
);
1213 kinfo
->num_tqps
= kinfo
->rss_size
* kinfo
->num_tc
;
1215 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
1216 if (hdev
->hw_tc_map
& BIT(i
)) {
1217 kinfo
->tc_info
[i
].enable
= true;
1218 kinfo
->tc_info
[i
].tqp_offset
= i
* kinfo
->rss_size
;
1219 kinfo
->tc_info
[i
].tqp_count
= kinfo
->rss_size
;
1220 kinfo
->tc_info
[i
].tc
= i
;
1222 /* Set to default queue if TC is disable */
1223 kinfo
->tc_info
[i
].enable
= false;
1224 kinfo
->tc_info
[i
].tqp_offset
= 0;
1225 kinfo
->tc_info
[i
].tqp_count
= 1;
1226 kinfo
->tc_info
[i
].tc
= 0;
1230 kinfo
->tqp
= devm_kcalloc(&hdev
->pdev
->dev
, kinfo
->num_tqps
,
1231 sizeof(struct hnae3_queue
*), GFP_KERNEL
);
1235 ret
= hclge_assign_tqp(vport
, kinfo
->tqp
, kinfo
->num_tqps
);
1237 dev_err(&hdev
->pdev
->dev
, "fail to assign TQPs %d.\n", ret
);
1244 static void hclge_unic_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1246 /* this would be initialized later */
1249 static int hclge_vport_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1251 struct hnae3_handle
*nic
= &vport
->nic
;
1252 struct hclge_dev
*hdev
= vport
->back
;
1255 nic
->pdev
= hdev
->pdev
;
1256 nic
->ae_algo
= &ae_algo
;
1257 nic
->numa_node_mask
= hdev
->numa_node_mask
;
1259 if (hdev
->ae_dev
->dev_type
== HNAE3_DEV_KNIC
) {
1260 ret
= hclge_knic_setup(vport
, num_tqps
);
1262 dev_err(&hdev
->pdev
->dev
, "knic setup failed %d\n",
1267 hclge_unic_setup(vport
, num_tqps
);
1273 static int hclge_alloc_vport(struct hclge_dev
*hdev
)
1275 struct pci_dev
*pdev
= hdev
->pdev
;
1276 struct hclge_vport
*vport
;
1282 /* We need to alloc a vport for main NIC of PF */
1283 num_vport
= hdev
->num_vmdq_vport
+ hdev
->num_req_vfs
+ 1;
1285 if (hdev
->num_tqps
< num_vport
)
1286 num_vport
= hdev
->num_tqps
;
1288 /* Alloc the same number of TQPs for every vport */
1289 tqp_per_vport
= hdev
->num_tqps
/ num_vport
;
1290 tqp_main_vport
= tqp_per_vport
+ hdev
->num_tqps
% num_vport
;
1292 vport
= devm_kcalloc(&pdev
->dev
, num_vport
, sizeof(struct hclge_vport
),
1297 hdev
->vport
= vport
;
1298 hdev
->num_alloc_vport
= num_vport
;
1300 #ifdef CONFIG_PCI_IOV
1302 if (hdev
->num_req_vfs
) {
1303 dev_info(&pdev
->dev
, "active VFs(%d) found, enabling SRIOV\n",
1305 ret
= pci_enable_sriov(hdev
->pdev
, hdev
->num_req_vfs
);
1307 hdev
->num_alloc_vfs
= 0;
1308 dev_err(&pdev
->dev
, "SRIOV enable failed %d\n",
1313 hdev
->num_alloc_vfs
= hdev
->num_req_vfs
;
1316 for (i
= 0; i
< num_vport
; i
++) {
1318 vport
->vport_id
= i
;
1321 ret
= hclge_vport_setup(vport
, tqp_main_vport
);
1323 ret
= hclge_vport_setup(vport
, tqp_per_vport
);
1326 "vport setup failed for vport %d, %d\n",
1337 static int hclge_cmd_alloc_tx_buff(struct hclge_dev
*hdev
, u16 buf_size
)
1339 /* TX buffer size is unit by 128 byte */
1340 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1341 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1342 struct hclge_tx_buff_alloc
*req
;
1343 struct hclge_desc desc
;
1347 req
= (struct hclge_tx_buff_alloc
*)desc
.data
;
1349 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TX_BUFF_ALLOC
, 0);
1350 for (i
= 0; i
< HCLGE_TC_NUM
; i
++)
1351 req
->tx_pkt_buff
[i
] =
1352 cpu_to_le16((buf_size
>> HCLGE_BUF_SIZE_UNIT_SHIFT
) |
1353 HCLGE_BUF_SIZE_UPDATE_EN_MSK
);
1355 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1357 dev_err(&hdev
->pdev
->dev
, "tx buffer alloc cmd failed %d.\n",
1365 static int hclge_tx_buffer_alloc(struct hclge_dev
*hdev
, u32 buf_size
)
1367 int ret
= hclge_cmd_alloc_tx_buff(hdev
, buf_size
);
1370 dev_err(&hdev
->pdev
->dev
,
1371 "tx buffer alloc failed %d\n", ret
);
1378 static int hclge_get_tc_num(struct hclge_dev
*hdev
)
1382 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1383 if (hdev
->hw_tc_map
& BIT(i
))
1388 static int hclge_get_pfc_enalbe_num(struct hclge_dev
*hdev
)
1392 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1393 if (hdev
->hw_tc_map
& BIT(i
) &&
1394 hdev
->tm_info
.hw_pfc_map
& BIT(i
))
1399 /* Get the number of pfc enabled TCs, which have private buffer */
1400 static int hclge_get_pfc_priv_num(struct hclge_dev
*hdev
)
1402 struct hclge_priv_buf
*priv
;
1405 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1406 priv
= &hdev
->priv_buf
[i
];
1407 if ((hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1415 /* Get the number of pfc disabled TCs, which have private buffer */
1416 static int hclge_get_no_pfc_priv_num(struct hclge_dev
*hdev
)
1418 struct hclge_priv_buf
*priv
;
1421 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1422 priv
= &hdev
->priv_buf
[i
];
1423 if (hdev
->hw_tc_map
& BIT(i
) &&
1424 !(hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1432 static u32
hclge_get_rx_priv_buff_alloced(struct hclge_dev
*hdev
)
1434 struct hclge_priv_buf
*priv
;
1438 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1439 priv
= &hdev
->priv_buf
[i
];
1441 rx_priv
+= priv
->buf_size
;
1446 static bool hclge_is_rx_buf_ok(struct hclge_dev
*hdev
, u32 rx_all
)
1448 u32 shared_buf_min
, shared_buf_tc
, shared_std
;
1449 int tc_num
, pfc_enable_num
;
1454 tc_num
= hclge_get_tc_num(hdev
);
1455 pfc_enable_num
= hclge_get_pfc_enalbe_num(hdev
);
1457 shared_buf_min
= 2 * hdev
->mps
+ HCLGE_DEFAULT_DV
;
1458 shared_buf_tc
= pfc_enable_num
* hdev
->mps
+
1459 (tc_num
- pfc_enable_num
) * hdev
->mps
/ 2 +
1461 shared_std
= max_t(u32
, shared_buf_min
, shared_buf_tc
);
1463 rx_priv
= hclge_get_rx_priv_buff_alloced(hdev
);
1464 if (rx_all
<= rx_priv
+ shared_std
)
1467 shared_buf
= rx_all
- rx_priv
;
1468 hdev
->s_buf
.buf_size
= shared_buf
;
1469 hdev
->s_buf
.self
.high
= shared_buf
;
1470 hdev
->s_buf
.self
.low
= 2 * hdev
->mps
;
1472 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1473 if ((hdev
->hw_tc_map
& BIT(i
)) &&
1474 (hdev
->tm_info
.hw_pfc_map
& BIT(i
))) {
1475 hdev
->s_buf
.tc_thrd
[i
].low
= hdev
->mps
;
1476 hdev
->s_buf
.tc_thrd
[i
].high
= 2 * hdev
->mps
;
1478 hdev
->s_buf
.tc_thrd
[i
].low
= 0;
1479 hdev
->s_buf
.tc_thrd
[i
].high
= hdev
->mps
;
1486 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1487 * @hdev: pointer to struct hclge_dev
1488 * @tx_size: the allocated tx buffer for all TCs
1489 * @return: 0: calculate sucessful, negative: fail
1491 int hclge_rx_buffer_calc(struct hclge_dev
*hdev
, u32 tx_size
)
1493 u32 rx_all
= hdev
->pkt_buf_size
- tx_size
;
1494 int no_pfc_priv_num
, pfc_priv_num
;
1495 struct hclge_priv_buf
*priv
;
1498 /* step 1, try to alloc private buffer for all enabled tc */
1499 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1500 priv
= &hdev
->priv_buf
[i
];
1501 if (hdev
->hw_tc_map
& BIT(i
)) {
1503 if (hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1504 priv
->wl
.low
= hdev
->mps
;
1505 priv
->wl
.high
= priv
->wl
.low
+ hdev
->mps
;
1506 priv
->buf_size
= priv
->wl
.high
+
1510 priv
->wl
.high
= 2 * hdev
->mps
;
1511 priv
->buf_size
= priv
->wl
.high
;
1516 if (hclge_is_rx_buf_ok(hdev
, rx_all
))
1519 /* step 2, try to decrease the buffer size of
1520 * no pfc TC's private buffer
1522 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1523 priv
= &hdev
->priv_buf
[i
];
1525 if (hdev
->hw_tc_map
& BIT(i
))
1528 if (hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1530 priv
->wl
.high
= priv
->wl
.low
+ hdev
->mps
;
1531 priv
->buf_size
= priv
->wl
.high
+ HCLGE_DEFAULT_DV
;
1534 priv
->wl
.high
= hdev
->mps
;
1535 priv
->buf_size
= priv
->wl
.high
;
1539 if (hclge_is_rx_buf_ok(hdev
, rx_all
))
1542 /* step 3, try to reduce the number of pfc disabled TCs,
1543 * which have private buffer
1545 /* get the total no pfc enable TC number, which have private buffer */
1546 no_pfc_priv_num
= hclge_get_no_pfc_priv_num(hdev
);
1548 /* let the last to be cleared first */
1549 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
1550 priv
= &hdev
->priv_buf
[i
];
1552 if (hdev
->hw_tc_map
& BIT(i
) &&
1553 !(hdev
->tm_info
.hw_pfc_map
& BIT(i
))) {
1554 /* Clear the no pfc TC private buffer */
1562 if (hclge_is_rx_buf_ok(hdev
, rx_all
) ||
1563 no_pfc_priv_num
== 0)
1567 if (hclge_is_rx_buf_ok(hdev
, rx_all
))
1570 /* step 4, try to reduce the number of pfc enabled TCs
1571 * which have private buffer.
1573 pfc_priv_num
= hclge_get_pfc_priv_num(hdev
);
1575 /* let the last to be cleared first */
1576 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
1577 priv
= &hdev
->priv_buf
[i
];
1579 if (hdev
->hw_tc_map
& BIT(i
) &&
1580 hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1581 /* Reduce the number of pfc TC with private buffer */
1589 if (hclge_is_rx_buf_ok(hdev
, rx_all
) ||
1593 if (hclge_is_rx_buf_ok(hdev
, rx_all
))
1599 static int hclge_rx_priv_buf_alloc(struct hclge_dev
*hdev
)
1601 struct hclge_rx_priv_buff
*req
;
1602 struct hclge_desc desc
;
1606 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_PRIV_BUFF_ALLOC
, false);
1607 req
= (struct hclge_rx_priv_buff
*)desc
.data
;
1609 /* Alloc private buffer TCs */
1610 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1611 struct hclge_priv_buf
*priv
= &hdev
->priv_buf
[i
];
1614 cpu_to_le16(priv
->buf_size
>> HCLGE_BUF_UNIT_S
);
1616 cpu_to_le16(true << HCLGE_TC0_PRI_BUF_EN_B
);
1619 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1621 dev_err(&hdev
->pdev
->dev
,
1622 "rx private buffer alloc cmd failed %d\n", ret
);
1629 #define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
1631 static int hclge_rx_priv_wl_config(struct hclge_dev
*hdev
)
1633 struct hclge_rx_priv_wl_buf
*req
;
1634 struct hclge_priv_buf
*priv
;
1635 struct hclge_desc desc
[2];
1639 for (i
= 0; i
< 2; i
++) {
1640 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_RX_PRIV_WL_ALLOC
,
1642 req
= (struct hclge_rx_priv_wl_buf
*)desc
[i
].data
;
1644 /* The first descriptor set the NEXT bit to 1 */
1646 desc
[i
].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1648 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1650 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
1651 priv
= &hdev
->priv_buf
[i
* HCLGE_TC_NUM_ONE_DESC
+ j
];
1652 req
->tc_wl
[j
].high
=
1653 cpu_to_le16(priv
->wl
.high
>> HCLGE_BUF_UNIT_S
);
1654 req
->tc_wl
[j
].high
|=
1655 cpu_to_le16(HCLGE_PRIV_ENABLE(priv
->wl
.high
) <<
1656 HCLGE_RX_PRIV_EN_B
);
1658 cpu_to_le16(priv
->wl
.low
>> HCLGE_BUF_UNIT_S
);
1659 req
->tc_wl
[j
].low
|=
1660 cpu_to_le16(HCLGE_PRIV_ENABLE(priv
->wl
.low
) <<
1661 HCLGE_RX_PRIV_EN_B
);
1665 /* Send 2 descriptor at one time */
1666 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
1668 dev_err(&hdev
->pdev
->dev
,
1669 "rx private waterline config cmd failed %d\n",
1676 static int hclge_common_thrd_config(struct hclge_dev
*hdev
)
1678 struct hclge_shared_buf
*s_buf
= &hdev
->s_buf
;
1679 struct hclge_rx_com_thrd
*req
;
1680 struct hclge_desc desc
[2];
1681 struct hclge_tc_thrd
*tc
;
1685 for (i
= 0; i
< 2; i
++) {
1686 hclge_cmd_setup_basic_desc(&desc
[i
],
1687 HCLGE_OPC_RX_COM_THRD_ALLOC
, false);
1688 req
= (struct hclge_rx_com_thrd
*)&desc
[i
].data
;
1690 /* The first descriptor set the NEXT bit to 1 */
1692 desc
[i
].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1694 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1696 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
1697 tc
= &s_buf
->tc_thrd
[i
* HCLGE_TC_NUM_ONE_DESC
+ j
];
1699 req
->com_thrd
[j
].high
=
1700 cpu_to_le16(tc
->high
>> HCLGE_BUF_UNIT_S
);
1701 req
->com_thrd
[j
].high
|=
1702 cpu_to_le16(HCLGE_PRIV_ENABLE(tc
->high
) <<
1703 HCLGE_RX_PRIV_EN_B
);
1704 req
->com_thrd
[j
].low
=
1705 cpu_to_le16(tc
->low
>> HCLGE_BUF_UNIT_S
);
1706 req
->com_thrd
[j
].low
|=
1707 cpu_to_le16(HCLGE_PRIV_ENABLE(tc
->low
) <<
1708 HCLGE_RX_PRIV_EN_B
);
1712 /* Send 2 descriptors at one time */
1713 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
1715 dev_err(&hdev
->pdev
->dev
,
1716 "common threshold config cmd failed %d\n", ret
);
1722 static int hclge_common_wl_config(struct hclge_dev
*hdev
)
1724 struct hclge_shared_buf
*buf
= &hdev
->s_buf
;
1725 struct hclge_rx_com_wl
*req
;
1726 struct hclge_desc desc
;
1729 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_COM_WL_ALLOC
, false);
1731 req
= (struct hclge_rx_com_wl
*)desc
.data
;
1732 req
->com_wl
.high
= cpu_to_le16(buf
->self
.high
>> HCLGE_BUF_UNIT_S
);
1734 cpu_to_le16(HCLGE_PRIV_ENABLE(buf
->self
.high
) <<
1735 HCLGE_RX_PRIV_EN_B
);
1737 req
->com_wl
.low
= cpu_to_le16(buf
->self
.low
>> HCLGE_BUF_UNIT_S
);
1739 cpu_to_le16(HCLGE_PRIV_ENABLE(buf
->self
.low
) <<
1740 HCLGE_RX_PRIV_EN_B
);
1742 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1744 dev_err(&hdev
->pdev
->dev
,
1745 "common waterline config cmd failed %d\n", ret
);
1752 int hclge_buffer_alloc(struct hclge_dev
*hdev
)
1754 u32 tx_buf_size
= HCLGE_DEFAULT_TX_BUF
;
1757 hdev
->priv_buf
= devm_kmalloc_array(&hdev
->pdev
->dev
, HCLGE_MAX_TC_NUM
,
1758 sizeof(struct hclge_priv_buf
),
1759 GFP_KERNEL
| __GFP_ZERO
);
1760 if (!hdev
->priv_buf
)
1763 ret
= hclge_tx_buffer_alloc(hdev
, tx_buf_size
);
1765 dev_err(&hdev
->pdev
->dev
,
1766 "could not alloc tx buffers %d\n", ret
);
1770 ret
= hclge_rx_buffer_calc(hdev
, tx_buf_size
);
1772 dev_err(&hdev
->pdev
->dev
,
1773 "could not calc rx priv buffer size for all TCs %d\n",
1778 ret
= hclge_rx_priv_buf_alloc(hdev
);
1780 dev_err(&hdev
->pdev
->dev
, "could not alloc rx priv buffer %d\n",
1785 ret
= hclge_rx_priv_wl_config(hdev
);
1787 dev_err(&hdev
->pdev
->dev
,
1788 "could not configure rx private waterline %d\n", ret
);
1792 ret
= hclge_common_thrd_config(hdev
);
1794 dev_err(&hdev
->pdev
->dev
,
1795 "could not configure common threshold %d\n", ret
);
1799 ret
= hclge_common_wl_config(hdev
);
1801 dev_err(&hdev
->pdev
->dev
,
1802 "could not configure common waterline %d\n", ret
);
1809 static int hclge_init_roce_base_info(struct hclge_vport
*vport
)
1811 struct hnae3_handle
*roce
= &vport
->roce
;
1812 struct hnae3_handle
*nic
= &vport
->nic
;
1814 roce
->rinfo
.num_vectors
= vport
->back
->num_roce_msix
;
1816 if (vport
->back
->num_msi_left
< vport
->roce
.rinfo
.num_vectors
||
1817 vport
->back
->num_msi_left
== 0)
1820 roce
->rinfo
.base_vector
= vport
->back
->roce_base_vector
;
1822 roce
->rinfo
.netdev
= nic
->kinfo
.netdev
;
1823 roce
->rinfo
.roce_io_base
= vport
->back
->hw
.io_base
;
1825 roce
->pdev
= nic
->pdev
;
1826 roce
->ae_algo
= nic
->ae_algo
;
1827 roce
->numa_node_mask
= nic
->numa_node_mask
;
1832 static int hclge_init_msix(struct hclge_dev
*hdev
)
1834 struct pci_dev
*pdev
= hdev
->pdev
;
1837 hdev
->msix_entries
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
1838 sizeof(struct msix_entry
),
1840 if (!hdev
->msix_entries
)
1843 hdev
->vector_status
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
1844 sizeof(u16
), GFP_KERNEL
);
1845 if (!hdev
->vector_status
)
1848 for (i
= 0; i
< hdev
->num_msi
; i
++) {
1849 hdev
->msix_entries
[i
].entry
= i
;
1850 hdev
->vector_status
[i
] = HCLGE_INVALID_VPORT
;
1853 hdev
->num_msi_left
= hdev
->num_msi
;
1854 hdev
->base_msi_vector
= hdev
->pdev
->irq
;
1855 hdev
->roce_base_vector
= hdev
->base_msi_vector
+
1856 HCLGE_ROCE_VECTOR_OFFSET
;
1858 ret
= pci_enable_msix_range(hdev
->pdev
, hdev
->msix_entries
,
1859 hdev
->num_msi
, hdev
->num_msi
);
1861 dev_info(&hdev
->pdev
->dev
,
1862 "MSI-X vector alloc failed: %d\n", ret
);
1869 static int hclge_init_msi(struct hclge_dev
*hdev
)
1871 struct pci_dev
*pdev
= hdev
->pdev
;
1875 hdev
->vector_status
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
1876 sizeof(u16
), GFP_KERNEL
);
1877 if (!hdev
->vector_status
)
1880 for (i
= 0; i
< hdev
->num_msi
; i
++)
1881 hdev
->vector_status
[i
] = HCLGE_INVALID_VPORT
;
1883 vectors
= pci_alloc_irq_vectors(pdev
, 1, hdev
->num_msi
, PCI_IRQ_MSI
);
1885 dev_err(&pdev
->dev
, "MSI vectors enable failed %d\n", vectors
);
1888 hdev
->num_msi
= vectors
;
1889 hdev
->num_msi_left
= vectors
;
1890 hdev
->base_msi_vector
= pdev
->irq
;
1891 hdev
->roce_base_vector
= hdev
->base_msi_vector
+
1892 HCLGE_ROCE_VECTOR_OFFSET
;
1897 static void hclge_check_speed_dup(struct hclge_dev
*hdev
, int duplex
, int speed
)
1899 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
1901 if ((speed
== HCLGE_MAC_SPEED_10M
) || (speed
== HCLGE_MAC_SPEED_100M
))
1902 mac
->duplex
= (u8
)duplex
;
1904 mac
->duplex
= HCLGE_MAC_FULL
;
1909 int hclge_cfg_mac_speed_dup(struct hclge_dev
*hdev
, int speed
, u8 duplex
)
1911 struct hclge_config_mac_speed_dup
*req
;
1912 struct hclge_desc desc
;
1915 req
= (struct hclge_config_mac_speed_dup
*)desc
.data
;
1917 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_SPEED_DUP
, false);
1919 hnae_set_bit(req
->speed_dup
, HCLGE_CFG_DUPLEX_B
, !!duplex
);
1922 case HCLGE_MAC_SPEED_10M
:
1923 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
1924 HCLGE_CFG_SPEED_S
, 6);
1926 case HCLGE_MAC_SPEED_100M
:
1927 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
1928 HCLGE_CFG_SPEED_S
, 7);
1930 case HCLGE_MAC_SPEED_1G
:
1931 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
1932 HCLGE_CFG_SPEED_S
, 0);
1934 case HCLGE_MAC_SPEED_10G
:
1935 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
1936 HCLGE_CFG_SPEED_S
, 1);
1938 case HCLGE_MAC_SPEED_25G
:
1939 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
1940 HCLGE_CFG_SPEED_S
, 2);
1942 case HCLGE_MAC_SPEED_40G
:
1943 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
1944 HCLGE_CFG_SPEED_S
, 3);
1946 case HCLGE_MAC_SPEED_50G
:
1947 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
1948 HCLGE_CFG_SPEED_S
, 4);
1950 case HCLGE_MAC_SPEED_100G
:
1951 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
1952 HCLGE_CFG_SPEED_S
, 5);
1955 dev_err(&hdev
->pdev
->dev
, "invalid speed (%d)\n", speed
);
1959 hnae_set_bit(req
->mac_change_fec_en
, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B
,
1962 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1964 dev_err(&hdev
->pdev
->dev
,
1965 "mac speed/duplex config cmd failed %d.\n", ret
);
1969 hclge_check_speed_dup(hdev
, duplex
, speed
);
1974 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle
*handle
, int speed
,
1977 struct hclge_vport
*vport
= hclge_get_vport(handle
);
1978 struct hclge_dev
*hdev
= vport
->back
;
1980 return hclge_cfg_mac_speed_dup(hdev
, speed
, duplex
);
1983 static int hclge_query_mac_an_speed_dup(struct hclge_dev
*hdev
, int *speed
,
1986 struct hclge_query_an_speed_dup
*req
;
1987 struct hclge_desc desc
;
1991 req
= (struct hclge_query_an_speed_dup
*)desc
.data
;
1993 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_AN_RESULT
, true);
1994 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1996 dev_err(&hdev
->pdev
->dev
,
1997 "mac speed/autoneg/duplex query cmd failed %d\n",
2002 *duplex
= hnae_get_bit(req
->an_syn_dup_speed
, HCLGE_QUERY_DUPLEX_B
);
2003 speed_tmp
= hnae_get_field(req
->an_syn_dup_speed
, HCLGE_QUERY_SPEED_M
,
2004 HCLGE_QUERY_SPEED_S
);
2006 ret
= hclge_parse_speed(speed_tmp
, speed
);
2008 dev_err(&hdev
->pdev
->dev
,
2009 "could not parse speed(=%d), %d\n", speed_tmp
, ret
);
2016 static int hclge_query_autoneg_result(struct hclge_dev
*hdev
)
2018 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2019 struct hclge_query_an_speed_dup
*req
;
2020 struct hclge_desc desc
;
2023 req
= (struct hclge_query_an_speed_dup
*)desc
.data
;
2025 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_AN_RESULT
, true);
2026 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2028 dev_err(&hdev
->pdev
->dev
,
2029 "autoneg result query cmd failed %d.\n", ret
);
2033 mac
->autoneg
= hnae_get_bit(req
->an_syn_dup_speed
, HCLGE_QUERY_AN_B
);
2038 static int hclge_set_autoneg_en(struct hclge_dev
*hdev
, bool enable
)
2040 struct hclge_config_auto_neg
*req
;
2041 struct hclge_desc desc
;
2044 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_AN_MODE
, false);
2046 req
= (struct hclge_config_auto_neg
*)desc
.data
;
2047 hnae_set_bit(req
->cfg_an_cmd_flag
, HCLGE_MAC_CFG_AN_EN_B
, !!enable
);
2049 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2051 dev_err(&hdev
->pdev
->dev
, "auto neg set cmd failed %d.\n",
2059 static int hclge_set_autoneg(struct hnae3_handle
*handle
, bool enable
)
2061 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2062 struct hclge_dev
*hdev
= vport
->back
;
2064 return hclge_set_autoneg_en(hdev
, enable
);
2067 static int hclge_get_autoneg(struct hnae3_handle
*handle
)
2069 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2070 struct hclge_dev
*hdev
= vport
->back
;
2072 hclge_query_autoneg_result(hdev
);
2074 return hdev
->hw
.mac
.autoneg
;
2077 static int hclge_mac_init(struct hclge_dev
*hdev
)
2079 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2082 ret
= hclge_cfg_mac_speed_dup(hdev
, hdev
->hw
.mac
.speed
, HCLGE_MAC_FULL
);
2084 dev_err(&hdev
->pdev
->dev
,
2085 "Config mac speed dup fail ret=%d\n", ret
);
2091 ret
= hclge_mac_mdio_config(hdev
);
2093 dev_warn(&hdev
->pdev
->dev
,
2094 "mdio config fail ret=%d\n", ret
);
2098 /* Initialize the MTA table work mode */
2099 hdev
->accept_mta_mc
= true;
2100 hdev
->enable_mta
= true;
2101 hdev
->mta_mac_sel_type
= HCLGE_MAC_ADDR_47_36
;
2103 ret
= hclge_set_mta_filter_mode(hdev
,
2104 hdev
->mta_mac_sel_type
,
2107 dev_err(&hdev
->pdev
->dev
, "set mta filter mode failed %d\n",
2112 return hclge_cfg_func_mta_filter(hdev
, 0, hdev
->accept_mta_mc
);
2115 static void hclge_task_schedule(struct hclge_dev
*hdev
)
2117 if (!test_bit(HCLGE_STATE_DOWN
, &hdev
->state
) &&
2118 !test_bit(HCLGE_STATE_REMOVING
, &hdev
->state
) &&
2119 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
))
2120 (void)schedule_work(&hdev
->service_task
);
2123 static int hclge_get_mac_link_status(struct hclge_dev
*hdev
)
2125 struct hclge_link_status
*req
;
2126 struct hclge_desc desc
;
2130 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_LINK_STATUS
, true);
2131 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2133 dev_err(&hdev
->pdev
->dev
, "get link status cmd failed %d\n",
2138 req
= (struct hclge_link_status
*)desc
.data
;
2139 link_status
= req
->status
& HCLGE_LINK_STATUS
;
2141 return !!link_status
;
2144 static int hclge_get_mac_phy_link(struct hclge_dev
*hdev
)
2149 mac_state
= hclge_get_mac_link_status(hdev
);
2151 if (hdev
->hw
.mac
.phydev
) {
2152 if (!genphy_read_status(hdev
->hw
.mac
.phydev
))
2153 link_stat
= mac_state
&
2154 hdev
->hw
.mac
.phydev
->link
;
2159 link_stat
= mac_state
;
2165 static void hclge_update_link_status(struct hclge_dev
*hdev
)
2167 struct hnae3_client
*client
= hdev
->nic_client
;
2168 struct hnae3_handle
*handle
;
2174 state
= hclge_get_mac_phy_link(hdev
);
2175 if (state
!= hdev
->hw
.mac
.link
) {
2176 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
2177 handle
= &hdev
->vport
[i
].nic
;
2178 client
->ops
->link_status_change(handle
, state
);
2180 hdev
->hw
.mac
.link
= state
;
2184 static int hclge_update_speed_duplex(struct hclge_dev
*hdev
)
2186 struct hclge_mac mac
= hdev
->hw
.mac
;
2191 /* get the speed and duplex as autoneg'result from mac cmd when phy
2197 /* update mac->antoneg. */
2198 ret
= hclge_query_autoneg_result(hdev
);
2200 dev_err(&hdev
->pdev
->dev
,
2201 "autoneg result query failed %d\n", ret
);
2208 ret
= hclge_query_mac_an_speed_dup(hdev
, &speed
, &duplex
);
2210 dev_err(&hdev
->pdev
->dev
,
2211 "mac autoneg/speed/duplex query failed %d\n", ret
);
2215 if ((mac
.speed
!= speed
) || (mac
.duplex
!= duplex
)) {
2216 ret
= hclge_cfg_mac_speed_dup(hdev
, speed
, duplex
);
2218 dev_err(&hdev
->pdev
->dev
,
2219 "mac speed/duplex config failed %d\n", ret
);
2227 static int hclge_update_speed_duplex_h(struct hnae3_handle
*handle
)
2229 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2230 struct hclge_dev
*hdev
= vport
->back
;
2232 return hclge_update_speed_duplex(hdev
);
2235 static int hclge_get_status(struct hnae3_handle
*handle
)
2237 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2238 struct hclge_dev
*hdev
= vport
->back
;
2240 hclge_update_link_status(hdev
);
2242 return hdev
->hw
.mac
.link
;
2245 static void hclge_service_timer(unsigned long data
)
2247 struct hclge_dev
*hdev
= (struct hclge_dev
*)data
;
2248 (void)mod_timer(&hdev
->service_timer
, jiffies
+ HZ
);
2250 hclge_task_schedule(hdev
);
2253 static void hclge_service_complete(struct hclge_dev
*hdev
)
2255 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
));
2257 /* Flush memory before next watchdog */
2258 smp_mb__before_atomic();
2259 clear_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
);
2262 static void hclge_service_task(struct work_struct
*work
)
2264 struct hclge_dev
*hdev
=
2265 container_of(work
, struct hclge_dev
, service_task
);
2267 hclge_update_speed_duplex(hdev
);
2268 hclge_update_link_status(hdev
);
2269 hclge_update_stats_for_all(hdev
);
2270 hclge_service_complete(hdev
);
2273 static void hclge_disable_sriov(struct hclge_dev
*hdev
)
2275 /* If our VFs are assigned we cannot shut down SR-IOV
2276 * without causing issues, so just leave the hardware
2277 * available but disabled
2279 if (pci_vfs_assigned(hdev
->pdev
)) {
2280 dev_warn(&hdev
->pdev
->dev
,
2281 "disabling driver while VFs are assigned\n");
2285 pci_disable_sriov(hdev
->pdev
);
2288 struct hclge_vport
*hclge_get_vport(struct hnae3_handle
*handle
)
2290 /* VF handle has no client */
2291 if (!handle
->client
)
2292 return container_of(handle
, struct hclge_vport
, nic
);
2293 else if (handle
->client
->type
== HNAE3_CLIENT_ROCE
)
2294 return container_of(handle
, struct hclge_vport
, roce
);
2296 return container_of(handle
, struct hclge_vport
, nic
);
2299 static int hclge_get_vector(struct hnae3_handle
*handle
, u16 vector_num
,
2300 struct hnae3_vector_info
*vector_info
)
2302 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2303 struct hnae3_vector_info
*vector
= vector_info
;
2304 struct hclge_dev
*hdev
= vport
->back
;
2308 vector_num
= min(hdev
->num_msi_left
, vector_num
);
2310 for (j
= 0; j
< vector_num
; j
++) {
2311 for (i
= 1; i
< hdev
->num_msi
; i
++) {
2312 if (hdev
->vector_status
[i
] == HCLGE_INVALID_VPORT
) {
2313 vector
->vector
= pci_irq_vector(hdev
->pdev
, i
);
2314 vector
->io_addr
= hdev
->hw
.io_base
+
2315 HCLGE_VECTOR_REG_BASE
+
2316 (i
- 1) * HCLGE_VECTOR_REG_OFFSET
+
2318 HCLGE_VECTOR_VF_OFFSET
;
2319 hdev
->vector_status
[i
] = vport
->vport_id
;
2328 hdev
->num_msi_left
-= alloc
;
2329 hdev
->num_msi_used
+= alloc
;
2334 static int hclge_get_vector_index(struct hclge_dev
*hdev
, int vector
)
2338 for (i
= 0; i
< hdev
->num_msi
; i
++) {
2339 if (hdev
->msix_entries
) {
2340 if (vector
== hdev
->msix_entries
[i
].vector
)
2343 if (vector
== (hdev
->base_msi_vector
+ i
))
2350 static u32
hclge_get_rss_key_size(struct hnae3_handle
*handle
)
2352 return HCLGE_RSS_KEY_SIZE
;
2355 static u32
hclge_get_rss_indir_size(struct hnae3_handle
*handle
)
2357 return HCLGE_RSS_IND_TBL_SIZE
;
2360 static int hclge_get_rss_algo(struct hclge_dev
*hdev
)
2362 struct hclge_rss_config
*req
;
2363 struct hclge_desc desc
;
2367 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_GENERIC_CONFIG
, true);
2369 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2371 dev_err(&hdev
->pdev
->dev
,
2372 "Get link status error, status =%d\n", ret
);
2376 req
= (struct hclge_rss_config
*)desc
.data
;
2377 rss_hash_algo
= (req
->hash_config
& HCLGE_RSS_HASH_ALGO_MASK
);
2379 if (rss_hash_algo
== HCLGE_RSS_HASH_ALGO_TOEPLITZ
)
2380 return ETH_RSS_HASH_TOP
;
2385 static int hclge_set_rss_algo_key(struct hclge_dev
*hdev
,
2386 const u8 hfunc
, const u8
*key
)
2388 struct hclge_rss_config
*req
;
2389 struct hclge_desc desc
;
2394 req
= (struct hclge_rss_config
*)desc
.data
;
2396 for (key_offset
= 0; key_offset
< 3; key_offset
++) {
2397 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_GENERIC_CONFIG
,
2400 req
->hash_config
|= (hfunc
& HCLGE_RSS_HASH_ALGO_MASK
);
2401 req
->hash_config
|= (key_offset
<< HCLGE_RSS_HASH_KEY_OFFSET_B
);
2403 if (key_offset
== 2)
2405 HCLGE_RSS_KEY_SIZE
- HCLGE_RSS_HASH_KEY_NUM
* 2;
2407 key_size
= HCLGE_RSS_HASH_KEY_NUM
;
2409 memcpy(req
->hash_key
,
2410 key
+ key_offset
* HCLGE_RSS_HASH_KEY_NUM
, key_size
);
2412 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2414 dev_err(&hdev
->pdev
->dev
,
2415 "Configure RSS config fail, status = %d\n",
2423 static int hclge_set_rss_indir_table(struct hclge_dev
*hdev
, const u32
*indir
)
2425 struct hclge_rss_indirection_table
*req
;
2426 struct hclge_desc desc
;
2430 req
= (struct hclge_rss_indirection_table
*)desc
.data
;
2432 for (i
= 0; i
< HCLGE_RSS_CFG_TBL_NUM
; i
++) {
2433 hclge_cmd_setup_basic_desc
2434 (&desc
, HCLGE_OPC_RSS_INDIR_TABLE
, false);
2436 req
->start_table_index
= i
* HCLGE_RSS_CFG_TBL_SIZE
;
2437 req
->rss_set_bitmap
= HCLGE_RSS_SET_BITMAP_MSK
;
2439 for (j
= 0; j
< HCLGE_RSS_CFG_TBL_SIZE
; j
++)
2440 req
->rss_result
[j
] =
2441 indir
[i
* HCLGE_RSS_CFG_TBL_SIZE
+ j
];
2443 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2445 dev_err(&hdev
->pdev
->dev
,
2446 "Configure rss indir table fail,status = %d\n",
2454 static int hclge_set_rss_tc_mode(struct hclge_dev
*hdev
, u16
*tc_valid
,
2455 u16
*tc_size
, u16
*tc_offset
)
2457 struct hclge_rss_tc_mode
*req
;
2458 struct hclge_desc desc
;
2462 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_TC_MODE
, false);
2463 req
= (struct hclge_rss_tc_mode
*)desc
.data
;
2465 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
2466 hnae_set_bit(req
->rss_tc_mode
[i
], HCLGE_RSS_TC_VALID_B
,
2467 (tc_valid
[i
] & 0x1));
2468 hnae_set_field(req
->rss_tc_mode
[i
], HCLGE_RSS_TC_SIZE_M
,
2469 HCLGE_RSS_TC_SIZE_S
, tc_size
[i
]);
2470 hnae_set_field(req
->rss_tc_mode
[i
], HCLGE_RSS_TC_OFFSET_M
,
2471 HCLGE_RSS_TC_OFFSET_S
, tc_offset
[i
]);
2474 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2476 dev_err(&hdev
->pdev
->dev
,
2477 "Configure rss tc mode fail, status = %d\n", ret
);
2484 static int hclge_set_rss_input_tuple(struct hclge_dev
*hdev
)
2486 #define HCLGE_RSS_INPUT_TUPLE_OTHER 0xf
2487 #define HCLGE_RSS_INPUT_TUPLE_SCTP 0x1f
2488 struct hclge_rss_input_tuple
*req
;
2489 struct hclge_desc desc
;
2492 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_INPUT_TUPLE
, false);
2494 req
= (struct hclge_rss_input_tuple
*)desc
.data
;
2495 req
->ipv4_tcp_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2496 req
->ipv4_udp_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2497 req
->ipv4_sctp_en
= HCLGE_RSS_INPUT_TUPLE_SCTP
;
2498 req
->ipv4_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2499 req
->ipv6_tcp_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2500 req
->ipv6_udp_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2501 req
->ipv6_sctp_en
= HCLGE_RSS_INPUT_TUPLE_SCTP
;
2502 req
->ipv6_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2503 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2505 dev_err(&hdev
->pdev
->dev
,
2506 "Configure rss input fail, status = %d\n", ret
);
2513 static int hclge_get_rss(struct hnae3_handle
*handle
, u32
*indir
,
2516 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2517 struct hclge_dev
*hdev
= vport
->back
;
2520 /* Get hash algorithm */
2522 *hfunc
= hclge_get_rss_algo(hdev
);
2524 /* Get the RSS Key required by the user */
2526 memcpy(key
, vport
->rss_hash_key
, HCLGE_RSS_KEY_SIZE
);
2528 /* Get indirect table */
2530 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
2531 indir
[i
] = vport
->rss_indirection_tbl
[i
];
2536 static int hclge_set_rss(struct hnae3_handle
*handle
, const u32
*indir
,
2537 const u8
*key
, const u8 hfunc
)
2539 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2540 struct hclge_dev
*hdev
= vport
->back
;
2544 /* Set the RSS Hash Key if specififed by the user */
2546 /* Update the shadow RSS key with user specified qids */
2547 memcpy(vport
->rss_hash_key
, key
, HCLGE_RSS_KEY_SIZE
);
2549 if (hfunc
== ETH_RSS_HASH_TOP
||
2550 hfunc
== ETH_RSS_HASH_NO_CHANGE
)
2551 hash_algo
= HCLGE_RSS_HASH_ALGO_TOEPLITZ
;
2554 ret
= hclge_set_rss_algo_key(hdev
, hash_algo
, key
);
2559 /* Update the shadow RSS table with user specified qids */
2560 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
2561 vport
->rss_indirection_tbl
[i
] = indir
[i
];
2563 /* Update the hardware */
2564 ret
= hclge_set_rss_indir_table(hdev
, indir
);
2568 static int hclge_get_tc_size(struct hnae3_handle
*handle
)
2570 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2571 struct hclge_dev
*hdev
= vport
->back
;
2573 return hdev
->rss_size_max
;
2576 static int hclge_rss_init_hw(struct hclge_dev
*hdev
)
2578 const u8 hfunc
= HCLGE_RSS_HASH_ALGO_TOEPLITZ
;
2579 struct hclge_vport
*vport
= hdev
->vport
;
2580 u16 tc_offset
[HCLGE_MAX_TC_NUM
];
2581 u8 rss_key
[HCLGE_RSS_KEY_SIZE
];
2582 u16 tc_valid
[HCLGE_MAX_TC_NUM
];
2583 u16 tc_size
[HCLGE_MAX_TC_NUM
];
2584 u32
*rss_indir
= NULL
;
2588 rss_indir
= kcalloc(HCLGE_RSS_IND_TBL_SIZE
, sizeof(u32
), GFP_KERNEL
);
2592 /* Get default RSS key */
2593 netdev_rss_key_fill(rss_key
, HCLGE_RSS_KEY_SIZE
);
2595 /* Initialize RSS indirect table for each vport */
2596 for (j
= 0; j
< hdev
->num_vmdq_vport
+ 1; j
++) {
2597 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++) {
2598 vport
[j
].rss_indirection_tbl
[i
] =
2599 i
% hdev
->rss_size_max
;
2600 rss_indir
[i
] = vport
[j
].rss_indirection_tbl
[i
];
2603 ret
= hclge_set_rss_indir_table(hdev
, rss_indir
);
2608 ret
= hclge_set_rss_algo_key(hdev
, hfunc
, key
);
2612 ret
= hclge_set_rss_input_tuple(hdev
);
2616 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
2617 if (hdev
->hw_tc_map
& BIT(i
))
2622 switch (hdev
->rss_size_max
) {
2623 case HCLGE_RSS_TC_SIZE_0
:
2626 case HCLGE_RSS_TC_SIZE_1
:
2629 case HCLGE_RSS_TC_SIZE_2
:
2632 case HCLGE_RSS_TC_SIZE_3
:
2635 case HCLGE_RSS_TC_SIZE_4
:
2638 case HCLGE_RSS_TC_SIZE_5
:
2641 case HCLGE_RSS_TC_SIZE_6
:
2644 case HCLGE_RSS_TC_SIZE_7
:
2650 tc_offset
[i
] = hdev
->rss_size_max
* i
;
2652 ret
= hclge_set_rss_tc_mode(hdev
, tc_valid
, tc_size
, tc_offset
);
2660 int hclge_map_vport_ring_to_vector(struct hclge_vport
*vport
, int vector_id
,
2661 struct hnae3_ring_chain_node
*ring_chain
)
2663 struct hclge_dev
*hdev
= vport
->back
;
2664 struct hclge_ctrl_vector_chain
*req
;
2665 struct hnae3_ring_chain_node
*node
;
2666 struct hclge_desc desc
;
2670 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_ADD_RING_TO_VECTOR
, false);
2672 req
= (struct hclge_ctrl_vector_chain
*)desc
.data
;
2673 req
->int_vector_id
= vector_id
;
2676 for (node
= ring_chain
; node
; node
= node
->next
) {
2677 hnae_set_field(req
->tqp_type_and_id
[i
], HCLGE_INT_TYPE_M
,
2679 hnae_get_bit(node
->flag
, HNAE3_RING_TYPE_B
));
2680 hnae_set_field(req
->tqp_type_and_id
[i
], HCLGE_TQP_ID_M
,
2681 HCLGE_TQP_ID_S
, node
->tqp_index
);
2682 req
->tqp_type_and_id
[i
] = cpu_to_le16(req
->tqp_type_and_id
[i
]);
2684 if (++i
>= HCLGE_VECTOR_ELEMENTS_PER_CMD
) {
2685 req
->int_cause_num
= HCLGE_VECTOR_ELEMENTS_PER_CMD
;
2687 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2689 dev_err(&hdev
->pdev
->dev
,
2690 "Map TQP fail, status is %d.\n",
2696 hclge_cmd_setup_basic_desc(&desc
,
2697 HCLGE_OPC_ADD_RING_TO_VECTOR
,
2699 req
->int_vector_id
= vector_id
;
2704 req
->int_cause_num
= i
;
2706 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2708 dev_err(&hdev
->pdev
->dev
,
2709 "Map TQP fail, status is %d.\n", ret
);
2717 int hclge_map_handle_ring_to_vector(struct hnae3_handle
*handle
,
2719 struct hnae3_ring_chain_node
*ring_chain
)
2721 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2722 struct hclge_dev
*hdev
= vport
->back
;
2725 vector_id
= hclge_get_vector_index(hdev
, vector
);
2726 if (vector_id
< 0) {
2727 dev_err(&hdev
->pdev
->dev
,
2728 "Get vector index fail. ret =%d\n", vector_id
);
2732 return hclge_map_vport_ring_to_vector(vport
, vector_id
, ring_chain
);
2735 static int hclge_unmap_ring_from_vector(
2736 struct hnae3_handle
*handle
, int vector
,
2737 struct hnae3_ring_chain_node
*ring_chain
)
2739 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2740 struct hclge_dev
*hdev
= vport
->back
;
2741 struct hclge_ctrl_vector_chain
*req
;
2742 struct hnae3_ring_chain_node
*node
;
2743 struct hclge_desc desc
;
2747 vector_id
= hclge_get_vector_index(hdev
, vector
);
2748 if (vector_id
< 0) {
2749 dev_err(&handle
->pdev
->dev
,
2750 "Get vector index fail. ret =%d\n", vector_id
);
2754 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_DEL_RING_TO_VECTOR
, false);
2756 req
= (struct hclge_ctrl_vector_chain
*)desc
.data
;
2757 req
->int_vector_id
= vector_id
;
2760 for (node
= ring_chain
; node
; node
= node
->next
) {
2761 hnae_set_field(req
->tqp_type_and_id
[i
], HCLGE_INT_TYPE_M
,
2763 hnae_get_bit(node
->flag
, HNAE3_RING_TYPE_B
));
2764 hnae_set_field(req
->tqp_type_and_id
[i
], HCLGE_TQP_ID_M
,
2765 HCLGE_TQP_ID_S
, node
->tqp_index
);
2767 req
->tqp_type_and_id
[i
] = cpu_to_le16(req
->tqp_type_and_id
[i
]);
2769 if (++i
>= HCLGE_VECTOR_ELEMENTS_PER_CMD
) {
2770 req
->int_cause_num
= HCLGE_VECTOR_ELEMENTS_PER_CMD
;
2772 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2774 dev_err(&hdev
->pdev
->dev
,
2775 "Unmap TQP fail, status is %d.\n",
2780 hclge_cmd_setup_basic_desc(&desc
,
2781 HCLGE_OPC_ADD_RING_TO_VECTOR
,
2783 req
->int_vector_id
= vector_id
;
2788 req
->int_cause_num
= i
;
2790 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2792 dev_err(&hdev
->pdev
->dev
,
2793 "Unmap TQP fail, status is %d.\n", ret
);
2801 int hclge_cmd_set_promisc_mode(struct hclge_dev
*hdev
,
2802 struct hclge_promisc_param
*param
)
2804 struct hclge_promisc_cfg
*req
;
2805 struct hclge_desc desc
;
2808 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_PROMISC_MODE
, false);
2810 req
= (struct hclge_promisc_cfg
*)desc
.data
;
2811 req
->vf_id
= param
->vf_id
;
2812 req
->flag
= (param
->enable
<< HCLGE_PROMISC_EN_B
);
2814 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2816 dev_err(&hdev
->pdev
->dev
,
2817 "Set promisc mode fail, status is %d.\n", ret
);
2823 void hclge_promisc_param_init(struct hclge_promisc_param
*param
, bool en_uc
,
2824 bool en_mc
, bool en_bc
, int vport_id
)
2829 memset(param
, 0, sizeof(struct hclge_promisc_param
));
2831 param
->enable
= HCLGE_PROMISC_EN_UC
;
2833 param
->enable
|= HCLGE_PROMISC_EN_MC
;
2835 param
->enable
|= HCLGE_PROMISC_EN_BC
;
2836 param
->vf_id
= vport_id
;
2839 static void hclge_set_promisc_mode(struct hnae3_handle
*handle
, u32 en
)
2841 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2842 struct hclge_dev
*hdev
= vport
->back
;
2843 struct hclge_promisc_param param
;
2845 hclge_promisc_param_init(¶m
, en
, en
, true, vport
->vport_id
);
2846 hclge_cmd_set_promisc_mode(hdev
, ¶m
);
2849 static void hclge_cfg_mac_mode(struct hclge_dev
*hdev
, bool enable
)
2851 struct hclge_desc desc
;
2852 struct hclge_config_mac_mode
*req
=
2853 (struct hclge_config_mac_mode
*)desc
.data
;
2856 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAC_MODE
, false);
2857 hnae_set_bit(req
->txrx_pad_fcs_loop_en
, HCLGE_MAC_TX_EN_B
, enable
);
2858 hnae_set_bit(req
->txrx_pad_fcs_loop_en
, HCLGE_MAC_RX_EN_B
, enable
);
2859 hnae_set_bit(req
->txrx_pad_fcs_loop_en
, HCLGE_MAC_PAD_TX_B
, enable
);
2860 hnae_set_bit(req
->txrx_pad_fcs_loop_en
, HCLGE_MAC_PAD_RX_B
, enable
);
2861 hnae_set_bit(req
->txrx_pad_fcs_loop_en
, HCLGE_MAC_1588_TX_B
, 0);
2862 hnae_set_bit(req
->txrx_pad_fcs_loop_en
, HCLGE_MAC_1588_RX_B
, 0);
2863 hnae_set_bit(req
->txrx_pad_fcs_loop_en
, HCLGE_MAC_APP_LP_B
, 0);
2864 hnae_set_bit(req
->txrx_pad_fcs_loop_en
, HCLGE_MAC_LINE_LP_B
, 0);
2865 hnae_set_bit(req
->txrx_pad_fcs_loop_en
, HCLGE_MAC_FCS_TX_B
, enable
);
2866 hnae_set_bit(req
->txrx_pad_fcs_loop_en
, HCLGE_MAC_RX_FCS_B
, enable
);
2867 hnae_set_bit(req
->txrx_pad_fcs_loop_en
,
2868 HCLGE_MAC_RX_FCS_STRIP_B
, enable
);
2869 hnae_set_bit(req
->txrx_pad_fcs_loop_en
,
2870 HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B
, enable
);
2871 hnae_set_bit(req
->txrx_pad_fcs_loop_en
,
2872 HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B
, enable
);
2873 hnae_set_bit(req
->txrx_pad_fcs_loop_en
,
2874 HCLGE_MAC_TX_UNDER_MIN_ERR_B
, enable
);
2876 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2878 dev_err(&hdev
->pdev
->dev
,
2879 "mac enable fail, ret =%d.\n", ret
);
2882 static int hclge_tqp_enable(struct hclge_dev
*hdev
, int tqp_id
,
2883 int stream_id
, bool enable
)
2885 struct hclge_desc desc
;
2886 struct hclge_cfg_com_tqp_queue
*req
=
2887 (struct hclge_cfg_com_tqp_queue
*)desc
.data
;
2890 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_COM_TQP_QUEUE
, false);
2891 req
->tqp_id
= cpu_to_le16(tqp_id
& HCLGE_RING_ID_MASK
);
2892 req
->stream_id
= cpu_to_le16(stream_id
);
2893 req
->enable
|= enable
<< HCLGE_TQP_ENABLE_B
;
2895 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2897 dev_err(&hdev
->pdev
->dev
,
2898 "Tqp enable fail, status =%d.\n", ret
);
2902 static void hclge_reset_tqp_stats(struct hnae3_handle
*handle
)
2904 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2905 struct hnae3_queue
*queue
;
2906 struct hclge_tqp
*tqp
;
2909 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
2910 queue
= handle
->kinfo
.tqp
[i
];
2911 tqp
= container_of(queue
, struct hclge_tqp
, q
);
2912 memset(&tqp
->tqp_stats
, 0, sizeof(tqp
->tqp_stats
));
2916 static int hclge_ae_start(struct hnae3_handle
*handle
)
2918 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2919 struct hclge_dev
*hdev
= vport
->back
;
2920 int i
, queue_id
, ret
;
2922 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
2923 /* todo clear interrupt */
2925 queue_id
= hclge_get_queue_id(handle
->kinfo
.tqp
[i
]);
2927 dev_warn(&hdev
->pdev
->dev
,
2928 "Get invalid queue id, ignore it\n");
2932 hclge_tqp_enable(hdev
, queue_id
, 0, true);
2935 hclge_cfg_mac_mode(hdev
, true);
2936 clear_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
2937 (void)mod_timer(&hdev
->service_timer
, jiffies
+ HZ
);
2939 ret
= hclge_mac_start_phy(hdev
);
2943 /* reset tqp stats */
2944 hclge_reset_tqp_stats(handle
);
2949 static void hclge_ae_stop(struct hnae3_handle
*handle
)
2951 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2952 struct hclge_dev
*hdev
= vport
->back
;
2955 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
2957 queue_id
= hclge_get_queue_id(handle
->kinfo
.tqp
[i
]);
2959 dev_warn(&hdev
->pdev
->dev
,
2960 "Get invalid queue id, ignore it\n");
2964 hclge_tqp_enable(hdev
, queue_id
, 0, false);
2967 hclge_cfg_mac_mode(hdev
, false);
2969 hclge_mac_stop_phy(hdev
);
2971 /* reset tqp stats */
2972 hclge_reset_tqp_stats(handle
);
2975 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport
*vport
,
2976 u16 cmdq_resp
, u8 resp_code
,
2977 enum hclge_mac_vlan_tbl_opcode op
)
2979 struct hclge_dev
*hdev
= vport
->back
;
2980 int return_status
= -EIO
;
2983 dev_err(&hdev
->pdev
->dev
,
2984 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
2989 if (op
== HCLGE_MAC_VLAN_ADD
) {
2990 if ((!resp_code
) || (resp_code
== 1)) {
2992 } else if (resp_code
== 2) {
2993 return_status
= -EIO
;
2994 dev_err(&hdev
->pdev
->dev
,
2995 "add mac addr failed for uc_overflow.\n");
2996 } else if (resp_code
== 3) {
2997 return_status
= -EIO
;
2998 dev_err(&hdev
->pdev
->dev
,
2999 "add mac addr failed for mc_overflow.\n");
3001 dev_err(&hdev
->pdev
->dev
,
3002 "add mac addr failed for undefined, code=%d.\n",
3005 } else if (op
== HCLGE_MAC_VLAN_REMOVE
) {
3008 } else if (resp_code
== 1) {
3009 return_status
= -EIO
;
3010 dev_dbg(&hdev
->pdev
->dev
,
3011 "remove mac addr failed for miss.\n");
3013 dev_err(&hdev
->pdev
->dev
,
3014 "remove mac addr failed for undefined, code=%d.\n",
3017 } else if (op
== HCLGE_MAC_VLAN_LKUP
) {
3020 } else if (resp_code
== 1) {
3021 return_status
= -EIO
;
3022 dev_dbg(&hdev
->pdev
->dev
,
3023 "lookup mac addr failed for miss.\n");
3025 dev_err(&hdev
->pdev
->dev
,
3026 "lookup mac addr failed for undefined, code=%d.\n",
3030 return_status
= -EIO
;
3031 dev_err(&hdev
->pdev
->dev
,
3032 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
3036 return return_status
;
3039 static int hclge_update_desc_vfid(struct hclge_desc
*desc
, int vfid
, bool clr
)
3044 if (vfid
> 255 || vfid
< 0)
3047 if (vfid
>= 0 && vfid
<= 191) {
3048 word_num
= vfid
/ 32;
3049 bit_num
= vfid
% 32;
3051 desc
[1].data
[word_num
] &= ~(1 << bit_num
);
3053 desc
[1].data
[word_num
] |= (1 << bit_num
);
3055 word_num
= (vfid
- 192) / 32;
3056 bit_num
= vfid
% 32;
3058 desc
[2].data
[word_num
] &= ~(1 << bit_num
);
3060 desc
[2].data
[word_num
] |= (1 << bit_num
);
3066 static bool hclge_is_all_function_id_zero(struct hclge_desc
*desc
)
3068 #define HCLGE_DESC_NUMBER 3
3069 #define HCLGE_FUNC_NUMBER_PER_DESC 6
3072 for (i
= 0; i
< HCLGE_DESC_NUMBER
; i
++)
3073 for (j
= 0; j
< HCLGE_FUNC_NUMBER_PER_DESC
; j
++)
3074 if (desc
[i
].data
[j
])
3080 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry
*new_req
,
3083 const unsigned char *mac_addr
= addr
;
3084 u32 high_val
= mac_addr
[2] << 16 | (mac_addr
[3] << 24) |
3085 (mac_addr
[0]) | (mac_addr
[1] << 8);
3086 u32 low_val
= mac_addr
[4] | (mac_addr
[5] << 8);
3088 new_req
->mac_addr_hi32
= cpu_to_le32(high_val
);
3089 new_req
->mac_addr_lo16
= cpu_to_le16(low_val
& 0xffff);
3092 u16
hclge_get_mac_addr_to_mta_index(struct hclge_vport
*vport
,
3095 u16 high_val
= addr
[1] | (addr
[0] << 8);
3096 struct hclge_dev
*hdev
= vport
->back
;
3097 u32 rsh
= 4 - hdev
->mta_mac_sel_type
;
3098 u16 ret_val
= (high_val
>> rsh
) & 0xfff;
3103 static int hclge_set_mta_filter_mode(struct hclge_dev
*hdev
,
3104 enum hclge_mta_dmac_sel_type mta_mac_sel
,
3107 struct hclge_mta_filter_mode
*req
;
3108 struct hclge_desc desc
;
3111 req
= (struct hclge_mta_filter_mode
*)desc
.data
;
3112 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MTA_MAC_MODE_CFG
, false);
3114 hnae_set_bit(req
->dmac_sel_en
, HCLGE_CFG_MTA_MAC_EN_B
,
3116 hnae_set_field(req
->dmac_sel_en
, HCLGE_CFG_MTA_MAC_SEL_M
,
3117 HCLGE_CFG_MTA_MAC_SEL_S
, mta_mac_sel
);
3119 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3121 dev_err(&hdev
->pdev
->dev
,
3122 "Config mat filter mode failed for cmd_send, ret =%d.\n",
3130 int hclge_cfg_func_mta_filter(struct hclge_dev
*hdev
,
3134 struct hclge_cfg_func_mta_filter
*req
;
3135 struct hclge_desc desc
;
3138 req
= (struct hclge_cfg_func_mta_filter
*)desc
.data
;
3139 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MTA_MAC_FUNC_CFG
, false);
3141 hnae_set_bit(req
->accept
, HCLGE_CFG_FUNC_MTA_ACCEPT_B
,
3143 req
->function_id
= func_id
;
3145 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3147 dev_err(&hdev
->pdev
->dev
,
3148 "Config func_id enable failed for cmd_send, ret =%d.\n",
3156 static int hclge_set_mta_table_item(struct hclge_vport
*vport
,
3160 struct hclge_dev
*hdev
= vport
->back
;
3161 struct hclge_cfg_func_mta_item
*req
;
3162 struct hclge_desc desc
;
3165 req
= (struct hclge_cfg_func_mta_item
*)desc
.data
;
3166 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MTA_TBL_ITEM_CFG
, false);
3167 hnae_set_bit(req
->accept
, HCLGE_CFG_MTA_ITEM_ACCEPT_B
, enable
);
3169 hnae_set_field(req
->item_idx
, HCLGE_CFG_MTA_ITEM_IDX_M
,
3170 HCLGE_CFG_MTA_ITEM_IDX_S
, idx
);
3171 req
->item_idx
= cpu_to_le16(req
->item_idx
);
3173 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3175 dev_err(&hdev
->pdev
->dev
,
3176 "Config mta table item failed for cmd_send, ret =%d.\n",
3184 static int hclge_remove_mac_vlan_tbl(struct hclge_vport
*vport
,
3185 struct hclge_mac_vlan_tbl_entry
*req
)
3187 struct hclge_dev
*hdev
= vport
->back
;
3188 struct hclge_desc desc
;
3192 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_REMOVE
, false);
3194 memcpy(desc
.data
, req
, sizeof(struct hclge_mac_vlan_tbl_entry
));
3196 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3198 dev_err(&hdev
->pdev
->dev
,
3199 "del mac addr failed for cmd_send, ret =%d.\n",
3203 resp_code
= (desc
.data
[0] >> 8) & 0xff;
3205 return hclge_get_mac_vlan_cmd_status(vport
, desc
.retval
, resp_code
,
3206 HCLGE_MAC_VLAN_REMOVE
);
3209 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport
*vport
,
3210 struct hclge_mac_vlan_tbl_entry
*req
,
3211 struct hclge_desc
*desc
,
3214 struct hclge_dev
*hdev
= vport
->back
;
3218 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_MAC_VLAN_ADD
, true);
3220 desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3221 memcpy(desc
[0].data
,
3223 sizeof(struct hclge_mac_vlan_tbl_entry
));
3224 hclge_cmd_setup_basic_desc(&desc
[1],
3225 HCLGE_OPC_MAC_VLAN_ADD
,
3227 desc
[1].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3228 hclge_cmd_setup_basic_desc(&desc
[2],
3229 HCLGE_OPC_MAC_VLAN_ADD
,
3231 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 3);
3233 memcpy(desc
[0].data
,
3235 sizeof(struct hclge_mac_vlan_tbl_entry
));
3236 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
3239 dev_err(&hdev
->pdev
->dev
,
3240 "lookup mac addr failed for cmd_send, ret =%d.\n",
3244 resp_code
= (desc
[0].data
[0] >> 8) & 0xff;
3246 return hclge_get_mac_vlan_cmd_status(vport
, desc
[0].retval
, resp_code
,
3247 HCLGE_MAC_VLAN_LKUP
);
3250 static int hclge_add_mac_vlan_tbl(struct hclge_vport
*vport
,
3251 struct hclge_mac_vlan_tbl_entry
*req
,
3252 struct hclge_desc
*mc_desc
)
3254 struct hclge_dev
*hdev
= vport
->back
;
3260 struct hclge_desc desc
;
3262 hclge_cmd_setup_basic_desc(&desc
,
3263 HCLGE_OPC_MAC_VLAN_ADD
,
3265 memcpy(desc
.data
, req
, sizeof(struct hclge_mac_vlan_tbl_entry
));
3266 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3267 resp_code
= (desc
.data
[0] >> 8) & 0xff;
3268 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, desc
.retval
,
3270 HCLGE_MAC_VLAN_ADD
);
3272 mc_desc
[0].flag
&= cpu_to_le16(~HCLGE_CMD_FLAG_WR
);
3273 mc_desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3274 mc_desc
[1].flag
&= cpu_to_le16(~HCLGE_CMD_FLAG_WR
);
3275 mc_desc
[1].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3276 mc_desc
[2].flag
&= cpu_to_le16(~HCLGE_CMD_FLAG_WR
);
3277 mc_desc
[2].flag
&= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT
);
3278 memcpy(mc_desc
[0].data
, req
,
3279 sizeof(struct hclge_mac_vlan_tbl_entry
));
3280 ret
= hclge_cmd_send(&hdev
->hw
, mc_desc
, 3);
3281 resp_code
= (mc_desc
[0].data
[0] >> 8) & 0xff;
3282 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
,
3285 HCLGE_MAC_VLAN_ADD
);
3289 dev_err(&hdev
->pdev
->dev
,
3290 "add mac addr failed for cmd_send, ret =%d.\n",
3298 static int hclge_add_uc_addr(struct hnae3_handle
*handle
,
3299 const unsigned char *addr
)
3301 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3303 return hclge_add_uc_addr_common(vport
, addr
);
3306 int hclge_add_uc_addr_common(struct hclge_vport
*vport
,
3307 const unsigned char *addr
)
3309 struct hclge_dev
*hdev
= vport
->back
;
3310 struct hclge_mac_vlan_tbl_entry req
;
3311 enum hclge_cmd_status status
;
3313 /* mac addr check */
3314 if (is_zero_ether_addr(addr
) ||
3315 is_broadcast_ether_addr(addr
) ||
3316 is_multicast_ether_addr(addr
)) {
3317 dev_err(&hdev
->pdev
->dev
,
3318 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
3320 is_zero_ether_addr(addr
),
3321 is_broadcast_ether_addr(addr
),
3322 is_multicast_ether_addr(addr
));
3326 memset(&req
, 0, sizeof(req
));
3327 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
3328 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
3329 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 0);
3330 hnae_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
3331 hnae_set_bit(req
.egress_port
,
3332 HCLGE_MAC_EPORT_SW_EN_B
, 0);
3333 hnae_set_bit(req
.egress_port
,
3334 HCLGE_MAC_EPORT_TYPE_B
, 0);
3335 hnae_set_field(req
.egress_port
, HCLGE_MAC_EPORT_VFID_M
,
3336 HCLGE_MAC_EPORT_VFID_S
, vport
->vport_id
);
3337 hnae_set_field(req
.egress_port
, HCLGE_MAC_EPORT_PFID_M
,
3338 HCLGE_MAC_EPORT_PFID_S
, 0);
3339 req
.egress_port
= cpu_to_le16(req
.egress_port
);
3341 hclge_prepare_mac_addr(&req
, addr
);
3343 status
= hclge_add_mac_vlan_tbl(vport
, &req
, NULL
);
3348 static int hclge_rm_uc_addr(struct hnae3_handle
*handle
,
3349 const unsigned char *addr
)
3351 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3353 return hclge_rm_uc_addr_common(vport
, addr
);
3356 int hclge_rm_uc_addr_common(struct hclge_vport
*vport
,
3357 const unsigned char *addr
)
3359 struct hclge_dev
*hdev
= vport
->back
;
3360 struct hclge_mac_vlan_tbl_entry req
;
3361 enum hclge_cmd_status status
;
3363 /* mac addr check */
3364 if (is_zero_ether_addr(addr
) ||
3365 is_broadcast_ether_addr(addr
) ||
3366 is_multicast_ether_addr(addr
)) {
3367 dev_dbg(&hdev
->pdev
->dev
,
3368 "Remove mac err! invalid mac:%pM.\n",
3373 memset(&req
, 0, sizeof(req
));
3374 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
3375 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
3376 hclge_prepare_mac_addr(&req
, addr
);
3377 status
= hclge_remove_mac_vlan_tbl(vport
, &req
);
3382 static int hclge_add_mc_addr(struct hnae3_handle
*handle
,
3383 const unsigned char *addr
)
3385 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3387 return hclge_add_mc_addr_common(vport
, addr
);
3390 int hclge_add_mc_addr_common(struct hclge_vport
*vport
,
3391 const unsigned char *addr
)
3393 struct hclge_dev
*hdev
= vport
->back
;
3394 struct hclge_mac_vlan_tbl_entry req
;
3395 struct hclge_desc desc
[3];
3399 /* mac addr check */
3400 if (!is_multicast_ether_addr(addr
)) {
3401 dev_err(&hdev
->pdev
->dev
,
3402 "Add mc mac err! invalid mac:%pM.\n",
3406 memset(&req
, 0, sizeof(req
));
3407 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
3408 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
3409 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 1);
3410 hnae_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
3411 hclge_prepare_mac_addr(&req
, addr
);
3412 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
3414 /* This mac addr exist, update VFID for it */
3415 hclge_update_desc_vfid(desc
, vport
->vport_id
, false);
3416 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
3418 /* This mac addr do not exist, add new entry for it */
3419 memset(desc
[0].data
, 0, sizeof(desc
[0].data
));
3420 memset(desc
[1].data
, 0, sizeof(desc
[0].data
));
3421 memset(desc
[2].data
, 0, sizeof(desc
[0].data
));
3422 hclge_update_desc_vfid(desc
, vport
->vport_id
, false);
3423 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
3426 /* Set MTA table for this MAC address */
3427 tbl_idx
= hclge_get_mac_addr_to_mta_index(vport
, addr
);
3428 status
= hclge_set_mta_table_item(vport
, tbl_idx
, true);
3433 static int hclge_rm_mc_addr(struct hnae3_handle
*handle
,
3434 const unsigned char *addr
)
3436 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3438 return hclge_rm_mc_addr_common(vport
, addr
);
3441 int hclge_rm_mc_addr_common(struct hclge_vport
*vport
,
3442 const unsigned char *addr
)
3444 struct hclge_dev
*hdev
= vport
->back
;
3445 struct hclge_mac_vlan_tbl_entry req
;
3446 enum hclge_cmd_status status
;
3447 struct hclge_desc desc
[3];
3450 /* mac addr check */
3451 if (!is_multicast_ether_addr(addr
)) {
3452 dev_dbg(&hdev
->pdev
->dev
,
3453 "Remove mc mac err! invalid mac:%pM.\n",
3458 memset(&req
, 0, sizeof(req
));
3459 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
3460 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
3461 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 1);
3462 hnae_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
3463 hclge_prepare_mac_addr(&req
, addr
);
3464 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
3466 /* This mac addr exist, remove this handle's VFID for it */
3467 hclge_update_desc_vfid(desc
, vport
->vport_id
, true);
3469 if (hclge_is_all_function_id_zero(desc
))
3470 /* All the vfid is zero, so need to delete this entry */
3471 status
= hclge_remove_mac_vlan_tbl(vport
, &req
);
3473 /* Not all the vfid is zero, update the vfid */
3474 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
3477 /* This mac addr do not exist, can't delete it */
3478 dev_err(&hdev
->pdev
->dev
,
3479 "Rm multicast mac addr failed, ret = %d.\n",
3484 /* Set MTB table for this MAC address */
3485 tbl_idx
= hclge_get_mac_addr_to_mta_index(vport
, addr
);
3486 status
= hclge_set_mta_table_item(vport
, tbl_idx
, false);
3491 static void hclge_get_mac_addr(struct hnae3_handle
*handle
, u8
*p
)
3493 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3494 struct hclge_dev
*hdev
= vport
->back
;
3496 ether_addr_copy(p
, hdev
->hw
.mac
.mac_addr
);
3499 static int hclge_set_mac_addr(struct hnae3_handle
*handle
, void *p
)
3501 const unsigned char *new_addr
= (const unsigned char *)p
;
3502 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3503 struct hclge_dev
*hdev
= vport
->back
;
3505 /* mac addr check */
3506 if (is_zero_ether_addr(new_addr
) ||
3507 is_broadcast_ether_addr(new_addr
) ||
3508 is_multicast_ether_addr(new_addr
)) {
3509 dev_err(&hdev
->pdev
->dev
,
3510 "Change uc mac err! invalid mac:%p.\n",
3515 hclge_rm_uc_addr(handle
, hdev
->hw
.mac
.mac_addr
);
3517 if (!hclge_add_uc_addr(handle
, new_addr
)) {
3518 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, new_addr
);
3525 static int hclge_set_vlan_filter_ctrl(struct hclge_dev
*hdev
, u8 vlan_type
,
3528 struct hclge_vlan_filter_ctrl
*req
;
3529 struct hclge_desc desc
;
3532 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_CTRL
, false);
3534 req
= (struct hclge_vlan_filter_ctrl
*)desc
.data
;
3535 req
->vlan_type
= vlan_type
;
3536 req
->vlan_fe
= filter_en
;
3538 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3540 dev_err(&hdev
->pdev
->dev
, "set vlan filter fail, ret =%d.\n",
3548 int hclge_set_vf_vlan_common(struct hclge_dev
*hdev
, int vfid
,
3549 bool is_kill
, u16 vlan
, u8 qos
, __be16 proto
)
3551 #define HCLGE_MAX_VF_BYTES 16
3552 struct hclge_vlan_filter_vf_cfg
*req0
;
3553 struct hclge_vlan_filter_vf_cfg
*req1
;
3554 struct hclge_desc desc
[2];
3559 hclge_cmd_setup_basic_desc(&desc
[0],
3560 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
3561 hclge_cmd_setup_basic_desc(&desc
[1],
3562 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
3564 desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3566 vf_byte_off
= vfid
/ 8;
3567 vf_byte_val
= 1 << (vfid
% 8);
3569 req0
= (struct hclge_vlan_filter_vf_cfg
*)desc
[0].data
;
3570 req1
= (struct hclge_vlan_filter_vf_cfg
*)desc
[1].data
;
3572 req0
->vlan_id
= vlan
;
3573 req0
->vlan_cfg
= is_kill
;
3575 if (vf_byte_off
< HCLGE_MAX_VF_BYTES
)
3576 req0
->vf_bitmap
[vf_byte_off
] = vf_byte_val
;
3578 req1
->vf_bitmap
[vf_byte_off
- HCLGE_MAX_VF_BYTES
] = vf_byte_val
;
3580 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
3582 dev_err(&hdev
->pdev
->dev
,
3583 "Send vf vlan command fail, ret =%d.\n",
3589 if (!req0
->resp_code
|| req0
->resp_code
== 1)
3592 dev_err(&hdev
->pdev
->dev
,
3593 "Add vf vlan filter fail, ret =%d.\n",
3596 if (!req0
->resp_code
)
3599 dev_err(&hdev
->pdev
->dev
,
3600 "Kill vf vlan filter fail, ret =%d.\n",
3607 static int hclge_set_port_vlan_filter(struct hnae3_handle
*handle
,
3608 __be16 proto
, u16 vlan_id
,
3611 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3612 struct hclge_dev
*hdev
= vport
->back
;
3613 struct hclge_vlan_filter_pf_cfg
*req
;
3614 struct hclge_desc desc
;
3615 u8 vlan_offset_byte_val
;
3616 u8 vlan_offset_byte
;
3620 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_PF_CFG
, false);
3622 vlan_offset_160
= vlan_id
/ 160;
3623 vlan_offset_byte
= (vlan_id
% 160) / 8;
3624 vlan_offset_byte_val
= 1 << (vlan_id
% 8);
3626 req
= (struct hclge_vlan_filter_pf_cfg
*)desc
.data
;
3627 req
->vlan_offset
= vlan_offset_160
;
3628 req
->vlan_cfg
= is_kill
;
3629 req
->vlan_offset_bitmap
[vlan_offset_byte
] = vlan_offset_byte_val
;
3631 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3633 dev_err(&hdev
->pdev
->dev
,
3634 "port vlan command, send fail, ret =%d.\n",
3639 ret
= hclge_set_vf_vlan_common(hdev
, 0, is_kill
, vlan_id
, 0, proto
);
3641 dev_err(&hdev
->pdev
->dev
,
3642 "Set pf vlan filter config fail, ret =%d.\n",
3650 static int hclge_set_vf_vlan_filter(struct hnae3_handle
*handle
, int vfid
,
3651 u16 vlan
, u8 qos
, __be16 proto
)
3653 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3654 struct hclge_dev
*hdev
= vport
->back
;
3656 if ((vfid
>= hdev
->num_alloc_vfs
) || (vlan
> 4095) || (qos
> 7))
3658 if (proto
!= htons(ETH_P_8021Q
))
3659 return -EPROTONOSUPPORT
;
3661 return hclge_set_vf_vlan_common(hdev
, vfid
, false, vlan
, qos
, proto
);
3664 static int hclge_init_vlan_config(struct hclge_dev
*hdev
)
3666 #define HCLGE_VLAN_TYPE_VF_TABLE 0
3667 #define HCLGE_VLAN_TYPE_PORT_TABLE 1
3670 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_VLAN_TYPE_VF_TABLE
,
3675 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_VLAN_TYPE_PORT_TABLE
,
3681 static int hclge_set_mtu(struct hnae3_handle
*handle
, int new_mtu
)
3683 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3684 struct hclge_config_max_frm_size
*req
;
3685 struct hclge_dev
*hdev
= vport
->back
;
3686 struct hclge_desc desc
;
3689 if ((new_mtu
< HCLGE_MAC_MIN_MTU
) || (new_mtu
> HCLGE_MAC_MAX_MTU
))
3692 hdev
->mps
= new_mtu
;
3693 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAX_FRM_SIZE
, false);
3695 req
= (struct hclge_config_max_frm_size
*)desc
.data
;
3696 req
->max_frm_size
= cpu_to_le16(new_mtu
);
3698 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3700 dev_err(&hdev
->pdev
->dev
, "set mtu fail, ret =%d.\n", ret
);
3707 static int hclge_send_reset_tqp_cmd(struct hclge_dev
*hdev
, u16 queue_id
,
3710 struct hclge_reset_tqp_queue
*req
;
3711 struct hclge_desc desc
;
3714 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, false);
3716 req
= (struct hclge_reset_tqp_queue
*)desc
.data
;
3717 req
->tqp_id
= cpu_to_le16(queue_id
& HCLGE_RING_ID_MASK
);
3718 hnae_set_bit(req
->reset_req
, HCLGE_TQP_RESET_B
, enable
);
3720 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3722 dev_err(&hdev
->pdev
->dev
,
3723 "Send tqp reset cmd error, status =%d\n", ret
);
3730 static int hclge_get_reset_status(struct hclge_dev
*hdev
, u16 queue_id
)
3732 struct hclge_reset_tqp_queue
*req
;
3733 struct hclge_desc desc
;
3736 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, true);
3738 req
= (struct hclge_reset_tqp_queue
*)desc
.data
;
3739 req
->tqp_id
= cpu_to_le16(queue_id
& HCLGE_RING_ID_MASK
);
3741 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3743 dev_err(&hdev
->pdev
->dev
,
3744 "Get reset status error, status =%d\n", ret
);
3748 return hnae_get_bit(req
->ready_to_reset
, HCLGE_TQP_RESET_B
);
3751 static void hclge_reset_tqp(struct hnae3_handle
*handle
, u16 queue_id
)
3753 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3754 struct hclge_dev
*hdev
= vport
->back
;
3755 int reset_try_times
= 0;
3759 ret
= hclge_tqp_enable(hdev
, queue_id
, 0, false);
3761 dev_warn(&hdev
->pdev
->dev
, "Disable tqp fail, ret = %d\n", ret
);
3765 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_id
, true);
3767 dev_warn(&hdev
->pdev
->dev
,
3768 "Send reset tqp cmd fail, ret = %d\n", ret
);
3772 reset_try_times
= 0;
3773 while (reset_try_times
++ < HCLGE_TQP_RESET_TRY_TIMES
) {
3774 /* Wait for tqp hw reset */
3776 reset_status
= hclge_get_reset_status(hdev
, queue_id
);
3781 if (reset_try_times
>= HCLGE_TQP_RESET_TRY_TIMES
) {
3782 dev_warn(&hdev
->pdev
->dev
, "Reset TQP fail\n");
3786 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_id
, false);
3788 dev_warn(&hdev
->pdev
->dev
,
3789 "Deassert the soft reset fail, ret = %d\n", ret
);
3794 static u32
hclge_get_fw_version(struct hnae3_handle
*handle
)
3796 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3797 struct hclge_dev
*hdev
= vport
->back
;
3799 return hdev
->fw_version
;
3802 static void hclge_get_pauseparam(struct hnae3_handle
*handle
, u32
*auto_neg
,
3803 u32
*rx_en
, u32
*tx_en
)
3805 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3806 struct hclge_dev
*hdev
= vport
->back
;
3808 *auto_neg
= hclge_get_autoneg(handle
);
3810 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
) {
3816 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_RX_PAUSE
) {
3819 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_TX_PAUSE
) {
3822 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_FULL
) {
3831 static void hclge_get_ksettings_an_result(struct hnae3_handle
*handle
,
3832 u8
*auto_neg
, u32
*speed
, u8
*duplex
)
3834 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3835 struct hclge_dev
*hdev
= vport
->back
;
3838 *speed
= hdev
->hw
.mac
.speed
;
3840 *duplex
= hdev
->hw
.mac
.duplex
;
3842 *auto_neg
= hdev
->hw
.mac
.autoneg
;
3845 static void hclge_get_media_type(struct hnae3_handle
*handle
, u8
*media_type
)
3847 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3848 struct hclge_dev
*hdev
= vport
->back
;
3851 *media_type
= hdev
->hw
.mac
.media_type
;
3854 static void hclge_get_mdix_mode(struct hnae3_handle
*handle
,
3855 u8
*tp_mdix_ctrl
, u8
*tp_mdix
)
3857 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3858 struct hclge_dev
*hdev
= vport
->back
;
3859 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
3860 int mdix_ctrl
, mdix
, retval
, is_resolved
;
3863 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
3864 *tp_mdix
= ETH_TP_MDI_INVALID
;
3868 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_MDIX
);
3870 retval
= phy_read(phydev
, HCLGE_PHY_CSC_REG
);
3871 mdix_ctrl
= hnae_get_field(retval
, HCLGE_PHY_MDIX_CTRL_M
,
3872 HCLGE_PHY_MDIX_CTRL_S
);
3874 retval
= phy_read(phydev
, HCLGE_PHY_CSS_REG
);
3875 mdix
= hnae_get_bit(retval
, HCLGE_PHY_MDIX_STATUS_B
);
3876 is_resolved
= hnae_get_bit(retval
, HCLGE_PHY_SPEED_DUP_RESOLVE_B
);
3878 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_COPPER
);
3880 switch (mdix_ctrl
) {
3882 *tp_mdix_ctrl
= ETH_TP_MDI
;
3885 *tp_mdix_ctrl
= ETH_TP_MDI_X
;
3888 *tp_mdix_ctrl
= ETH_TP_MDI_AUTO
;
3891 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
3896 *tp_mdix
= ETH_TP_MDI_INVALID
;
3898 *tp_mdix
= ETH_TP_MDI_X
;
3900 *tp_mdix
= ETH_TP_MDI
;
3903 static int hclge_init_client_instance(struct hnae3_client
*client
,
3904 struct hnae3_ae_dev
*ae_dev
)
3906 struct hclge_dev
*hdev
= ae_dev
->priv
;
3907 struct hclge_vport
*vport
;
3910 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
3911 vport
= &hdev
->vport
[i
];
3913 switch (client
->type
) {
3914 case HNAE3_CLIENT_KNIC
:
3916 hdev
->nic_client
= client
;
3917 vport
->nic
.client
= client
;
3918 ret
= client
->ops
->init_instance(&vport
->nic
);
3922 if (hdev
->roce_client
&&
3923 hnae_get_bit(hdev
->ae_dev
->flag
,
3924 HNAE_DEV_SUPPORT_ROCE_B
)) {
3925 struct hnae3_client
*rc
= hdev
->roce_client
;
3927 ret
= hclge_init_roce_base_info(vport
);
3931 ret
= rc
->ops
->init_instance(&vport
->roce
);
3937 case HNAE3_CLIENT_UNIC
:
3938 hdev
->nic_client
= client
;
3939 vport
->nic
.client
= client
;
3941 ret
= client
->ops
->init_instance(&vport
->nic
);
3946 case HNAE3_CLIENT_ROCE
:
3947 if (hnae_get_bit(hdev
->ae_dev
->flag
,
3948 HNAE_DEV_SUPPORT_ROCE_B
)) {
3949 hdev
->roce_client
= client
;
3950 vport
->roce
.client
= client
;
3953 if (hdev
->roce_client
) {
3954 ret
= hclge_init_roce_base_info(vport
);
3958 ret
= client
->ops
->init_instance(&vport
->roce
);
3970 static void hclge_uninit_client_instance(struct hnae3_client
*client
,
3971 struct hnae3_ae_dev
*ae_dev
)
3973 struct hclge_dev
*hdev
= ae_dev
->priv
;
3974 struct hclge_vport
*vport
;
3977 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
3978 vport
= &hdev
->vport
[i
];
3979 if (hdev
->roce_client
)
3980 hdev
->roce_client
->ops
->uninit_instance(&vport
->roce
,
3982 if (client
->type
== HNAE3_CLIENT_ROCE
)
3984 if (client
->ops
->uninit_instance
)
3985 client
->ops
->uninit_instance(&vport
->nic
, 0);
3989 static int hclge_pci_init(struct hclge_dev
*hdev
)
3991 struct pci_dev
*pdev
= hdev
->pdev
;
3992 struct hclge_hw
*hw
;
3995 ret
= pci_enable_device(pdev
);
3997 dev_err(&pdev
->dev
, "failed to enable PCI device\n");
3998 goto err_no_drvdata
;
4001 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
4003 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
4006 "can't set consistent PCI DMA");
4007 goto err_disable_device
;
4009 dev_warn(&pdev
->dev
, "set DMA mask to 32 bits\n");
4012 ret
= pci_request_regions(pdev
, HCLGE_DRIVER_NAME
);
4014 dev_err(&pdev
->dev
, "PCI request regions failed %d\n", ret
);
4015 goto err_disable_device
;
4018 pci_set_master(pdev
);
4021 hw
->io_base
= pcim_iomap(pdev
, 2, 0);
4023 dev_err(&pdev
->dev
, "Can't map configuration register space\n");
4025 goto err_clr_master
;
4030 pci_clear_master(pdev
);
4031 pci_release_regions(pdev
);
4033 pci_disable_device(pdev
);
4035 pci_set_drvdata(pdev
, NULL
);
4040 static void hclge_pci_uninit(struct hclge_dev
*hdev
)
4042 struct pci_dev
*pdev
= hdev
->pdev
;
4044 if (hdev
->flag
& HCLGE_FLAG_USE_MSIX
) {
4045 pci_disable_msix(pdev
);
4046 devm_kfree(&pdev
->dev
, hdev
->msix_entries
);
4047 hdev
->msix_entries
= NULL
;
4049 pci_disable_msi(pdev
);
4052 pci_clear_master(pdev
);
4053 pci_release_mem_regions(pdev
);
4054 pci_disable_device(pdev
);
4057 static int hclge_init_ae_dev(struct hnae3_ae_dev
*ae_dev
)
4059 struct pci_dev
*pdev
= ae_dev
->pdev
;
4060 const struct pci_device_id
*id
;
4061 struct hclge_dev
*hdev
;
4064 hdev
= devm_kzalloc(&pdev
->dev
, sizeof(*hdev
), GFP_KERNEL
);
4070 hdev
->flag
|= HCLGE_FLAG_USE_MSIX
;
4072 hdev
->ae_dev
= ae_dev
;
4073 ae_dev
->priv
= hdev
;
4075 id
= pci_match_id(roce_pci_tbl
, ae_dev
->pdev
);
4077 hnae_set_bit(ae_dev
->flag
, HNAE_DEV_SUPPORT_ROCE_B
, 1);
4079 ret
= hclge_pci_init(hdev
);
4081 dev_err(&pdev
->dev
, "PCI init failed\n");
4085 /* Command queue initialize */
4086 ret
= hclge_cmd_init(hdev
);
4090 ret
= hclge_get_cap(hdev
);
4092 dev_err(&pdev
->dev
, "get hw capability error, ret = %d.\n",
4097 ret
= hclge_configure(hdev
);
4099 dev_err(&pdev
->dev
, "Configure dev error, ret = %d.\n", ret
);
4103 if (hdev
->flag
& HCLGE_FLAG_USE_MSIX
)
4104 ret
= hclge_init_msix(hdev
);
4106 ret
= hclge_init_msi(hdev
);
4108 dev_err(&pdev
->dev
, "Init msix/msi error, ret = %d.\n", ret
);
4112 ret
= hclge_alloc_tqps(hdev
);
4114 dev_err(&pdev
->dev
, "Allocate TQPs error, ret = %d.\n", ret
);
4118 ret
= hclge_alloc_vport(hdev
);
4120 dev_err(&pdev
->dev
, "Allocate vport error, ret = %d.\n", ret
);
4124 ret
= hclge_mac_init(hdev
);
4126 dev_err(&pdev
->dev
, "Mac init error, ret = %d\n", ret
);
4129 ret
= hclge_buffer_alloc(hdev
);
4131 dev_err(&pdev
->dev
, "Buffer allocate fail, ret =%d\n", ret
);
4135 ret
= hclge_config_tso(hdev
, HCLGE_TSO_MSS_MIN
, HCLGE_TSO_MSS_MAX
);
4137 dev_err(&pdev
->dev
, "Enable tso fail, ret =%d\n", ret
);
4141 ret
= hclge_rss_init_hw(hdev
);
4143 dev_err(&pdev
->dev
, "Rss init fail, ret =%d\n", ret
);
4147 ret
= hclge_init_vlan_config(hdev
);
4149 dev_err(&pdev
->dev
, "VLAN init fail, ret =%d\n", ret
);
4153 ret
= hclge_tm_schd_init(hdev
);
4155 dev_err(&pdev
->dev
, "tm schd init fail, ret =%d\n", ret
);
4159 setup_timer(&hdev
->service_timer
, hclge_service_timer
,
4160 (unsigned long)hdev
);
4161 INIT_WORK(&hdev
->service_task
, hclge_service_task
);
4163 set_bit(HCLGE_STATE_SERVICE_INITED
, &hdev
->state
);
4164 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
4166 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME
);
4170 pci_release_regions(pdev
);
4172 pci_set_drvdata(pdev
, NULL
);
4177 static void hclge_uninit_ae_dev(struct hnae3_ae_dev
*ae_dev
)
4179 struct hclge_dev
*hdev
= ae_dev
->priv
;
4180 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
4182 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
4184 if (IS_ENABLED(CONFIG_PCI_IOV
))
4185 hclge_disable_sriov(hdev
);
4187 if (hdev
->service_timer
.data
)
4188 del_timer_sync(&hdev
->service_timer
);
4189 if (hdev
->service_task
.func
)
4190 cancel_work_sync(&hdev
->service_task
);
4193 mdiobus_unregister(mac
->mdio_bus
);
4195 hclge_destroy_cmd_queue(&hdev
->hw
);
4196 hclge_pci_uninit(hdev
);
4197 ae_dev
->priv
= NULL
;
4200 static const struct hnae3_ae_ops hclge_ops
= {
4201 .init_ae_dev
= hclge_init_ae_dev
,
4202 .uninit_ae_dev
= hclge_uninit_ae_dev
,
4203 .init_client_instance
= hclge_init_client_instance
,
4204 .uninit_client_instance
= hclge_uninit_client_instance
,
4205 .map_ring_to_vector
= hclge_map_handle_ring_to_vector
,
4206 .unmap_ring_from_vector
= hclge_unmap_ring_from_vector
,
4207 .get_vector
= hclge_get_vector
,
4208 .set_promisc_mode
= hclge_set_promisc_mode
,
4209 .start
= hclge_ae_start
,
4210 .stop
= hclge_ae_stop
,
4211 .get_status
= hclge_get_status
,
4212 .get_ksettings_an_result
= hclge_get_ksettings_an_result
,
4213 .update_speed_duplex_h
= hclge_update_speed_duplex_h
,
4214 .cfg_mac_speed_dup_h
= hclge_cfg_mac_speed_dup_h
,
4215 .get_media_type
= hclge_get_media_type
,
4216 .get_rss_key_size
= hclge_get_rss_key_size
,
4217 .get_rss_indir_size
= hclge_get_rss_indir_size
,
4218 .get_rss
= hclge_get_rss
,
4219 .set_rss
= hclge_set_rss
,
4220 .get_tc_size
= hclge_get_tc_size
,
4221 .get_mac_addr
= hclge_get_mac_addr
,
4222 .set_mac_addr
= hclge_set_mac_addr
,
4223 .add_uc_addr
= hclge_add_uc_addr
,
4224 .rm_uc_addr
= hclge_rm_uc_addr
,
4225 .add_mc_addr
= hclge_add_mc_addr
,
4226 .rm_mc_addr
= hclge_rm_mc_addr
,
4227 .set_autoneg
= hclge_set_autoneg
,
4228 .get_autoneg
= hclge_get_autoneg
,
4229 .get_pauseparam
= hclge_get_pauseparam
,
4230 .set_mtu
= hclge_set_mtu
,
4231 .reset_queue
= hclge_reset_tqp
,
4232 .get_stats
= hclge_get_stats
,
4233 .update_stats
= hclge_update_stats
,
4234 .get_strings
= hclge_get_strings
,
4235 .get_sset_count
= hclge_get_sset_count
,
4236 .get_fw_version
= hclge_get_fw_version
,
4237 .get_mdix_mode
= hclge_get_mdix_mode
,
4238 .set_vlan_filter
= hclge_set_port_vlan_filter
,
4239 .set_vf_vlan_filter
= hclge_set_vf_vlan_filter
,
4242 static struct hnae3_ae_algo ae_algo
= {
4245 .pdev_id_table
= ae_algo_pci_tbl
,
4248 static int hclge_init(void)
4250 pr_info("%s is initializing\n", HCLGE_NAME
);
4252 return hnae3_register_ae_algo(&ae_algo
);
4255 static void hclge_exit(void)
4257 hnae3_unregister_ae_algo(&ae_algo
);
4259 module_init(hclge_init
);
4260 module_exit(hclge_exit
);
4262 MODULE_LICENSE("GPL");
4263 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
4264 MODULE_DESCRIPTION("HCLGE Driver");
4265 MODULE_VERSION(HCLGE_MOD_VERSION
);