]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
net: hns3: Add mac loopback selftest support in hns3 driver
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 /*
2 * Copyright (c) 2016-2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10 #include <linux/acpi.h>
11 #include <linux/device.h>
12 #include <linux/etherdevice.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/pci.h>
19 #include <linux/platform_device.h>
20
21 #include "hclge_cmd.h"
22 #include "hclge_dcb.h"
23 #include "hclge_main.h"
24 #include "hclge_mdio.h"
25 #include "hclge_tm.h"
26 #include "hnae3.h"
27
28 #define HCLGE_NAME "hclge"
29 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
30 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
31 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
32 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
33
34 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
35 enum hclge_mta_dmac_sel_type mta_mac_sel,
36 bool enable);
37 static int hclge_init_vlan_config(struct hclge_dev *hdev);
38
39 static struct hnae3_ae_algo ae_algo;
40
41 static const struct pci_device_id ae_algo_pci_tbl[] = {
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
49 /* required last entry */
50 {0, }
51 };
52
53 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
54 "Mac Loopback test",
55 "Serdes Loopback test",
56 "Phy Loopback test"
57 };
58
59 static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = {
60 {"igu_rx_oversize_pkt",
61 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)},
62 {"igu_rx_undersize_pkt",
63 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)},
64 {"igu_rx_out_all_pkt",
65 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)},
66 {"igu_rx_uni_pkt",
67 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)},
68 {"igu_rx_multi_pkt",
69 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)},
70 {"igu_rx_broad_pkt",
71 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)},
72 {"egu_tx_out_all_pkt",
73 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)},
74 {"egu_tx_uni_pkt",
75 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)},
76 {"egu_tx_multi_pkt",
77 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)},
78 {"egu_tx_broad_pkt",
79 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)},
80 {"ssu_ppp_mac_key_num",
81 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)},
82 {"ssu_ppp_host_key_num",
83 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)},
84 {"ppp_ssu_mac_rlt_num",
85 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)},
86 {"ppp_ssu_host_rlt_num",
87 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)},
88 {"ssu_tx_in_num",
89 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)},
90 {"ssu_tx_out_num",
91 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)},
92 {"ssu_rx_in_num",
93 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)},
94 {"ssu_rx_out_num",
95 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)}
96 };
97
98 static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = {
99 {"igu_rx_err_pkt",
100 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)},
101 {"igu_rx_no_eof_pkt",
102 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)},
103 {"igu_rx_no_sof_pkt",
104 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)},
105 {"egu_tx_1588_pkt",
106 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)},
107 {"ssu_full_drop_num",
108 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)},
109 {"ssu_part_drop_num",
110 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)},
111 {"ppp_key_drop_num",
112 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)},
113 {"ppp_rlt_drop_num",
114 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)},
115 {"ssu_key_drop_num",
116 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)},
117 {"pkt_curr_buf_cnt",
118 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)},
119 {"qcn_fb_rcv_cnt",
120 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)},
121 {"qcn_fb_drop_cnt",
122 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)},
123 {"qcn_fb_invaild_cnt",
124 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)},
125 {"rx_packet_tc0_in_cnt",
126 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)},
127 {"rx_packet_tc1_in_cnt",
128 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)},
129 {"rx_packet_tc2_in_cnt",
130 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)},
131 {"rx_packet_tc3_in_cnt",
132 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)},
133 {"rx_packet_tc4_in_cnt",
134 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)},
135 {"rx_packet_tc5_in_cnt",
136 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)},
137 {"rx_packet_tc6_in_cnt",
138 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)},
139 {"rx_packet_tc7_in_cnt",
140 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)},
141 {"rx_packet_tc0_out_cnt",
142 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)},
143 {"rx_packet_tc1_out_cnt",
144 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)},
145 {"rx_packet_tc2_out_cnt",
146 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)},
147 {"rx_packet_tc3_out_cnt",
148 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)},
149 {"rx_packet_tc4_out_cnt",
150 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)},
151 {"rx_packet_tc5_out_cnt",
152 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)},
153 {"rx_packet_tc6_out_cnt",
154 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)},
155 {"rx_packet_tc7_out_cnt",
156 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)},
157 {"tx_packet_tc0_in_cnt",
158 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)},
159 {"tx_packet_tc1_in_cnt",
160 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)},
161 {"tx_packet_tc2_in_cnt",
162 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)},
163 {"tx_packet_tc3_in_cnt",
164 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)},
165 {"tx_packet_tc4_in_cnt",
166 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)},
167 {"tx_packet_tc5_in_cnt",
168 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)},
169 {"tx_packet_tc6_in_cnt",
170 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)},
171 {"tx_packet_tc7_in_cnt",
172 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)},
173 {"tx_packet_tc0_out_cnt",
174 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)},
175 {"tx_packet_tc1_out_cnt",
176 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)},
177 {"tx_packet_tc2_out_cnt",
178 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)},
179 {"tx_packet_tc3_out_cnt",
180 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)},
181 {"tx_packet_tc4_out_cnt",
182 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)},
183 {"tx_packet_tc5_out_cnt",
184 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)},
185 {"tx_packet_tc6_out_cnt",
186 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)},
187 {"tx_packet_tc7_out_cnt",
188 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)},
189 {"pkt_curr_buf_tc0_cnt",
190 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)},
191 {"pkt_curr_buf_tc1_cnt",
192 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)},
193 {"pkt_curr_buf_tc2_cnt",
194 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)},
195 {"pkt_curr_buf_tc3_cnt",
196 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)},
197 {"pkt_curr_buf_tc4_cnt",
198 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)},
199 {"pkt_curr_buf_tc5_cnt",
200 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)},
201 {"pkt_curr_buf_tc6_cnt",
202 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)},
203 {"pkt_curr_buf_tc7_cnt",
204 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)},
205 {"mb_uncopy_num",
206 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)},
207 {"lo_pri_unicast_rlt_drop_num",
208 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)},
209 {"hi_pri_multicast_rlt_drop_num",
210 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)},
211 {"lo_pri_multicast_rlt_drop_num",
212 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)},
213 {"rx_oq_drop_pkt_cnt",
214 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)},
215 {"tx_oq_drop_pkt_cnt",
216 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)},
217 {"nic_l2_err_drop_pkt_cnt",
218 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)},
219 {"roc_l2_err_drop_pkt_cnt",
220 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)}
221 };
222
223 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
224 {"mac_tx_mac_pause_num",
225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
226 {"mac_rx_mac_pause_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
228 {"mac_tx_pfc_pri0_pkt_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
230 {"mac_tx_pfc_pri1_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
232 {"mac_tx_pfc_pri2_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
234 {"mac_tx_pfc_pri3_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
236 {"mac_tx_pfc_pri4_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
238 {"mac_tx_pfc_pri5_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
240 {"mac_tx_pfc_pri6_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
242 {"mac_tx_pfc_pri7_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
244 {"mac_rx_pfc_pri0_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
246 {"mac_rx_pfc_pri1_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
248 {"mac_rx_pfc_pri2_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
250 {"mac_rx_pfc_pri3_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
252 {"mac_rx_pfc_pri4_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
254 {"mac_rx_pfc_pri5_pkt_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
256 {"mac_rx_pfc_pri6_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
258 {"mac_rx_pfc_pri7_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
260 {"mac_tx_total_pkt_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
262 {"mac_tx_total_oct_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
264 {"mac_tx_good_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
266 {"mac_tx_bad_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
268 {"mac_tx_good_oct_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
270 {"mac_tx_bad_oct_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
272 {"mac_tx_uni_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
274 {"mac_tx_multi_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
276 {"mac_tx_broad_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
278 {"mac_tx_undersize_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
280 {"mac_tx_overrsize_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)},
282 {"mac_tx_64_oct_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
284 {"mac_tx_65_127_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
286 {"mac_tx_128_255_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
288 {"mac_tx_256_511_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
290 {"mac_tx_512_1023_oct_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
292 {"mac_tx_1024_1518_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
294 {"mac_tx_1519_max_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)},
296 {"mac_rx_total_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
298 {"mac_rx_total_oct_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
300 {"mac_rx_good_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
302 {"mac_rx_bad_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
304 {"mac_rx_good_oct_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
306 {"mac_rx_bad_oct_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
308 {"mac_rx_uni_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
310 {"mac_rx_multi_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
312 {"mac_rx_broad_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
314 {"mac_rx_undersize_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
316 {"mac_rx_overrsize_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)},
318 {"mac_rx_64_oct_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
320 {"mac_rx_65_127_oct_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
322 {"mac_rx_128_255_oct_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
324 {"mac_rx_256_511_oct_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
326 {"mac_rx_512_1023_oct_pkt_num",
327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
328 {"mac_rx_1024_1518_oct_pkt_num",
329 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
330 {"mac_rx_1519_max_oct_pkt_num",
331 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)},
332
333 {"mac_trans_fragment_pkt_num",
334 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)},
335 {"mac_trans_undermin_pkt_num",
336 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)},
337 {"mac_trans_jabber_pkt_num",
338 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)},
339 {"mac_trans_err_all_pkt_num",
340 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)},
341 {"mac_trans_from_app_good_pkt_num",
342 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)},
343 {"mac_trans_from_app_bad_pkt_num",
344 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)},
345 {"mac_rcv_fragment_pkt_num",
346 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)},
347 {"mac_rcv_undermin_pkt_num",
348 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)},
349 {"mac_rcv_jabber_pkt_num",
350 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)},
351 {"mac_rcv_fcs_err_pkt_num",
352 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)},
353 {"mac_rcv_send_app_good_pkt_num",
354 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)},
355 {"mac_rcv_send_app_bad_pkt_num",
356 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)}
357 };
358
359 static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
360 {
361 #define HCLGE_64_BIT_CMD_NUM 5
362 #define HCLGE_64_BIT_RTN_DATANUM 4
363 u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats);
364 struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM];
365 __le64 *desc_data;
366 int i, k, n;
367 int ret;
368
369 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true);
370 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM);
371 if (ret) {
372 dev_err(&hdev->pdev->dev,
373 "Get 64 bit pkt stats fail, status = %d.\n", ret);
374 return ret;
375 }
376
377 for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) {
378 if (unlikely(i == 0)) {
379 desc_data = (__le64 *)(&desc[i].data[0]);
380 n = HCLGE_64_BIT_RTN_DATANUM - 1;
381 } else {
382 desc_data = (__le64 *)(&desc[i]);
383 n = HCLGE_64_BIT_RTN_DATANUM;
384 }
385 for (k = 0; k < n; k++) {
386 *data++ += le64_to_cpu(*desc_data);
387 desc_data++;
388 }
389 }
390
391 return 0;
392 }
393
394 static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats)
395 {
396 stats->pkt_curr_buf_cnt = 0;
397 stats->pkt_curr_buf_tc0_cnt = 0;
398 stats->pkt_curr_buf_tc1_cnt = 0;
399 stats->pkt_curr_buf_tc2_cnt = 0;
400 stats->pkt_curr_buf_tc3_cnt = 0;
401 stats->pkt_curr_buf_tc4_cnt = 0;
402 stats->pkt_curr_buf_tc5_cnt = 0;
403 stats->pkt_curr_buf_tc6_cnt = 0;
404 stats->pkt_curr_buf_tc7_cnt = 0;
405 }
406
407 static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
408 {
409 #define HCLGE_32_BIT_CMD_NUM 8
410 #define HCLGE_32_BIT_RTN_DATANUM 8
411
412 struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM];
413 struct hclge_32_bit_stats *all_32_bit_stats;
414 __le32 *desc_data;
415 int i, k, n;
416 u64 *data;
417 int ret;
418
419 all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats;
420 data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt);
421
422 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true);
423 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM);
424 if (ret) {
425 dev_err(&hdev->pdev->dev,
426 "Get 32 bit pkt stats fail, status = %d.\n", ret);
427
428 return ret;
429 }
430
431 hclge_reset_partial_32bit_counter(all_32_bit_stats);
432 for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) {
433 if (unlikely(i == 0)) {
434 __le16 *desc_data_16bit;
435
436 all_32_bit_stats->igu_rx_err_pkt +=
437 le32_to_cpu(desc[i].data[0]);
438
439 desc_data_16bit = (__le16 *)&desc[i].data[1];
440 all_32_bit_stats->igu_rx_no_eof_pkt +=
441 le16_to_cpu(*desc_data_16bit);
442
443 desc_data_16bit++;
444 all_32_bit_stats->igu_rx_no_sof_pkt +=
445 le16_to_cpu(*desc_data_16bit);
446
447 desc_data = &desc[i].data[2];
448 n = HCLGE_32_BIT_RTN_DATANUM - 4;
449 } else {
450 desc_data = (__le32 *)&desc[i];
451 n = HCLGE_32_BIT_RTN_DATANUM;
452 }
453 for (k = 0; k < n; k++) {
454 *data++ += le32_to_cpu(*desc_data);
455 desc_data++;
456 }
457 }
458
459 return 0;
460 }
461
462 static int hclge_mac_update_stats(struct hclge_dev *hdev)
463 {
464 #define HCLGE_MAC_CMD_NUM 17
465 #define HCLGE_RTN_DATA_NUM 4
466
467 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
468 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
469 __le64 *desc_data;
470 int i, k, n;
471 int ret;
472
473 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
474 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
475 if (ret) {
476 dev_err(&hdev->pdev->dev,
477 "Get MAC pkt stats fail, status = %d.\n", ret);
478
479 return ret;
480 }
481
482 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
483 if (unlikely(i == 0)) {
484 desc_data = (__le64 *)(&desc[i].data[0]);
485 n = HCLGE_RTN_DATA_NUM - 2;
486 } else {
487 desc_data = (__le64 *)(&desc[i]);
488 n = HCLGE_RTN_DATA_NUM;
489 }
490 for (k = 0; k < n; k++) {
491 *data++ += le64_to_cpu(*desc_data);
492 desc_data++;
493 }
494 }
495
496 return 0;
497 }
498
499 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
500 {
501 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
502 struct hclge_vport *vport = hclge_get_vport(handle);
503 struct hclge_dev *hdev = vport->back;
504 struct hnae3_queue *queue;
505 struct hclge_desc desc[1];
506 struct hclge_tqp *tqp;
507 int ret, i;
508
509 for (i = 0; i < kinfo->num_tqps; i++) {
510 queue = handle->kinfo.tqp[i];
511 tqp = container_of(queue, struct hclge_tqp, q);
512 /* command : HCLGE_OPC_QUERY_IGU_STAT */
513 hclge_cmd_setup_basic_desc(&desc[0],
514 HCLGE_OPC_QUERY_RX_STATUS,
515 true);
516
517 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
518 ret = hclge_cmd_send(&hdev->hw, desc, 1);
519 if (ret) {
520 dev_err(&hdev->pdev->dev,
521 "Query tqp stat fail, status = %d,queue = %d\n",
522 ret, i);
523 return ret;
524 }
525 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
526 le32_to_cpu(desc[0].data[4]);
527 }
528
529 for (i = 0; i < kinfo->num_tqps; i++) {
530 queue = handle->kinfo.tqp[i];
531 tqp = container_of(queue, struct hclge_tqp, q);
532 /* command : HCLGE_OPC_QUERY_IGU_STAT */
533 hclge_cmd_setup_basic_desc(&desc[0],
534 HCLGE_OPC_QUERY_TX_STATUS,
535 true);
536
537 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
538 ret = hclge_cmd_send(&hdev->hw, desc, 1);
539 if (ret) {
540 dev_err(&hdev->pdev->dev,
541 "Query tqp stat fail, status = %d,queue = %d\n",
542 ret, i);
543 return ret;
544 }
545 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
546 le32_to_cpu(desc[0].data[4]);
547 }
548
549 return 0;
550 }
551
552 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
553 {
554 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
555 struct hclge_tqp *tqp;
556 u64 *buff = data;
557 int i;
558
559 for (i = 0; i < kinfo->num_tqps; i++) {
560 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
561 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
562 }
563
564 for (i = 0; i < kinfo->num_tqps; i++) {
565 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
566 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
567 }
568
569 return buff;
570 }
571
572 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
573 {
574 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
575
576 return kinfo->num_tqps * (2);
577 }
578
579 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
580 {
581 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
582 u8 *buff = data;
583 int i = 0;
584
585 for (i = 0; i < kinfo->num_tqps; i++) {
586 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
587 struct hclge_tqp, q);
588 snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd",
589 tqp->index);
590 buff = buff + ETH_GSTRING_LEN;
591 }
592
593 for (i = 0; i < kinfo->num_tqps; i++) {
594 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
595 struct hclge_tqp, q);
596 snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd",
597 tqp->index);
598 buff = buff + ETH_GSTRING_LEN;
599 }
600
601 return buff;
602 }
603
604 static u64 *hclge_comm_get_stats(void *comm_stats,
605 const struct hclge_comm_stats_str strs[],
606 int size, u64 *data)
607 {
608 u64 *buf = data;
609 u32 i;
610
611 for (i = 0; i < size; i++)
612 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
613
614 return buf + size;
615 }
616
617 static u8 *hclge_comm_get_strings(u32 stringset,
618 const struct hclge_comm_stats_str strs[],
619 int size, u8 *data)
620 {
621 char *buff = (char *)data;
622 u32 i;
623
624 if (stringset != ETH_SS_STATS)
625 return buff;
626
627 for (i = 0; i < size; i++) {
628 snprintf(buff, ETH_GSTRING_LEN,
629 strs[i].desc);
630 buff = buff + ETH_GSTRING_LEN;
631 }
632
633 return (u8 *)buff;
634 }
635
636 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
637 struct net_device_stats *net_stats)
638 {
639 net_stats->tx_dropped = 0;
640 net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num;
641 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
642 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
643
644 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
645 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
646 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt;
647 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
648 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
649 net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
650
651 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
652 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
653
654 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
655 net_stats->rx_length_errors =
656 hw_stats->mac_stats.mac_rx_undersize_pkt_num;
657 net_stats->rx_length_errors +=
658 hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
659 net_stats->rx_over_errors =
660 hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
661 }
662
663 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
664 {
665 struct hnae3_handle *handle;
666 int status;
667
668 handle = &hdev->vport[0].nic;
669 if (handle->client) {
670 status = hclge_tqps_update_stats(handle);
671 if (status) {
672 dev_err(&hdev->pdev->dev,
673 "Update TQPS stats fail, status = %d.\n",
674 status);
675 }
676 }
677
678 status = hclge_mac_update_stats(hdev);
679 if (status)
680 dev_err(&hdev->pdev->dev,
681 "Update MAC stats fail, status = %d.\n", status);
682
683 status = hclge_32_bit_update_stats(hdev);
684 if (status)
685 dev_err(&hdev->pdev->dev,
686 "Update 32 bit stats fail, status = %d.\n",
687 status);
688
689 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
690 }
691
692 static void hclge_update_stats(struct hnae3_handle *handle,
693 struct net_device_stats *net_stats)
694 {
695 struct hclge_vport *vport = hclge_get_vport(handle);
696 struct hclge_dev *hdev = vport->back;
697 struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
698 int status;
699
700 status = hclge_mac_update_stats(hdev);
701 if (status)
702 dev_err(&hdev->pdev->dev,
703 "Update MAC stats fail, status = %d.\n",
704 status);
705
706 status = hclge_32_bit_update_stats(hdev);
707 if (status)
708 dev_err(&hdev->pdev->dev,
709 "Update 32 bit stats fail, status = %d.\n",
710 status);
711
712 status = hclge_64_bit_update_stats(hdev);
713 if (status)
714 dev_err(&hdev->pdev->dev,
715 "Update 64 bit stats fail, status = %d.\n",
716 status);
717
718 status = hclge_tqps_update_stats(handle);
719 if (status)
720 dev_err(&hdev->pdev->dev,
721 "Update TQPS stats fail, status = %d.\n",
722 status);
723
724 hclge_update_netstat(hw_stats, net_stats);
725 }
726
727 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
728 {
729 #define HCLGE_LOOPBACK_TEST_FLAGS 0x7
730
731 struct hclge_vport *vport = hclge_get_vport(handle);
732 struct hclge_dev *hdev = vport->back;
733 int count = 0;
734
735 /* Loopback test support rules:
736 * mac: only GE mode support
737 * serdes: all mac mode will support include GE/XGE/LGE/CGE
738 * phy: only support when phy device exist on board
739 */
740 if (stringset == ETH_SS_TEST) {
741 /* clear loopback bit flags at first */
742 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
743 if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
744 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
746 count += 1;
747 handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK;
748 } else {
749 count = -EOPNOTSUPP;
750 }
751 } else if (stringset == ETH_SS_STATS) {
752 count = ARRAY_SIZE(g_mac_stats_string) +
753 ARRAY_SIZE(g_all_32bit_stats_string) +
754 ARRAY_SIZE(g_all_64bit_stats_string) +
755 hclge_tqps_get_sset_count(handle, stringset);
756 }
757
758 return count;
759 }
760
761 static void hclge_get_strings(struct hnae3_handle *handle,
762 u32 stringset,
763 u8 *data)
764 {
765 u8 *p = (char *)data;
766 int size;
767
768 if (stringset == ETH_SS_STATS) {
769 size = ARRAY_SIZE(g_mac_stats_string);
770 p = hclge_comm_get_strings(stringset,
771 g_mac_stats_string,
772 size,
773 p);
774 size = ARRAY_SIZE(g_all_32bit_stats_string);
775 p = hclge_comm_get_strings(stringset,
776 g_all_32bit_stats_string,
777 size,
778 p);
779 size = ARRAY_SIZE(g_all_64bit_stats_string);
780 p = hclge_comm_get_strings(stringset,
781 g_all_64bit_stats_string,
782 size,
783 p);
784 p = hclge_tqps_get_strings(handle, p);
785 } else if (stringset == ETH_SS_TEST) {
786 if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) {
787 memcpy(p,
788 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC],
789 ETH_GSTRING_LEN);
790 p += ETH_GSTRING_LEN;
791 }
792 if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) {
793 memcpy(p,
794 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES],
795 ETH_GSTRING_LEN);
796 p += ETH_GSTRING_LEN;
797 }
798 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
799 memcpy(p,
800 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY],
801 ETH_GSTRING_LEN);
802 p += ETH_GSTRING_LEN;
803 }
804 }
805 }
806
807 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
808 {
809 struct hclge_vport *vport = hclge_get_vport(handle);
810 struct hclge_dev *hdev = vport->back;
811 u64 *p;
812
813 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
814 g_mac_stats_string,
815 ARRAY_SIZE(g_mac_stats_string),
816 data);
817 p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats,
818 g_all_32bit_stats_string,
819 ARRAY_SIZE(g_all_32bit_stats_string),
820 p);
821 p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats,
822 g_all_64bit_stats_string,
823 ARRAY_SIZE(g_all_64bit_stats_string),
824 p);
825 p = hclge_tqps_get_stats(handle, p);
826 }
827
828 static int hclge_parse_func_status(struct hclge_dev *hdev,
829 struct hclge_func_status_cmd *status)
830 {
831 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
832 return -EINVAL;
833
834 /* Set the pf to main pf */
835 if (status->pf_state & HCLGE_PF_STATE_MAIN)
836 hdev->flag |= HCLGE_FLAG_MAIN;
837 else
838 hdev->flag &= ~HCLGE_FLAG_MAIN;
839
840 hdev->num_req_vfs = status->vf_num / status->pf_num;
841 return 0;
842 }
843
844 static int hclge_query_function_status(struct hclge_dev *hdev)
845 {
846 struct hclge_func_status_cmd *req;
847 struct hclge_desc desc;
848 int timeout = 0;
849 int ret;
850
851 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
852 req = (struct hclge_func_status_cmd *)desc.data;
853
854 do {
855 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
856 if (ret) {
857 dev_err(&hdev->pdev->dev,
858 "query function status failed %d.\n",
859 ret);
860
861 return ret;
862 }
863
864 /* Check pf reset is done */
865 if (req->pf_state)
866 break;
867 usleep_range(1000, 2000);
868 } while (timeout++ < 5);
869
870 ret = hclge_parse_func_status(hdev, req);
871
872 return ret;
873 }
874
875 static int hclge_query_pf_resource(struct hclge_dev *hdev)
876 {
877 struct hclge_pf_res_cmd *req;
878 struct hclge_desc desc;
879 int ret;
880
881 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
882 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
883 if (ret) {
884 dev_err(&hdev->pdev->dev,
885 "query pf resource failed %d.\n", ret);
886 return ret;
887 }
888
889 req = (struct hclge_pf_res_cmd *)desc.data;
890 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
891 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
892
893 if (hnae3_dev_roce_supported(hdev)) {
894 hdev->num_roce_msix =
895 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
896 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
897
898 /* PF should have NIC vectors and Roce vectors,
899 * NIC vectors are queued before Roce vectors.
900 */
901 hdev->num_msi = hdev->num_roce_msix + HCLGE_ROCE_VECTOR_OFFSET;
902 } else {
903 hdev->num_msi =
904 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
905 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
906 }
907
908 return 0;
909 }
910
911 static int hclge_parse_speed(int speed_cmd, int *speed)
912 {
913 switch (speed_cmd) {
914 case 6:
915 *speed = HCLGE_MAC_SPEED_10M;
916 break;
917 case 7:
918 *speed = HCLGE_MAC_SPEED_100M;
919 break;
920 case 0:
921 *speed = HCLGE_MAC_SPEED_1G;
922 break;
923 case 1:
924 *speed = HCLGE_MAC_SPEED_10G;
925 break;
926 case 2:
927 *speed = HCLGE_MAC_SPEED_25G;
928 break;
929 case 3:
930 *speed = HCLGE_MAC_SPEED_40G;
931 break;
932 case 4:
933 *speed = HCLGE_MAC_SPEED_50G;
934 break;
935 case 5:
936 *speed = HCLGE_MAC_SPEED_100G;
937 break;
938 default:
939 return -EINVAL;
940 }
941
942 return 0;
943 }
944
945 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
946 {
947 struct hclge_cfg_param_cmd *req;
948 u64 mac_addr_tmp_high;
949 u64 mac_addr_tmp;
950 int i;
951
952 req = (struct hclge_cfg_param_cmd *)desc[0].data;
953
954 /* get the configuration */
955 cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]),
956 HCLGE_CFG_VMDQ_M,
957 HCLGE_CFG_VMDQ_S);
958 cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
959 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
960 cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
961 HCLGE_CFG_TQP_DESC_N_M,
962 HCLGE_CFG_TQP_DESC_N_S);
963
964 cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]),
965 HCLGE_CFG_PHY_ADDR_M,
966 HCLGE_CFG_PHY_ADDR_S);
967 cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]),
968 HCLGE_CFG_MEDIA_TP_M,
969 HCLGE_CFG_MEDIA_TP_S);
970 cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]),
971 HCLGE_CFG_RX_BUF_LEN_M,
972 HCLGE_CFG_RX_BUF_LEN_S);
973 /* get mac_address */
974 mac_addr_tmp = __le32_to_cpu(req->param[2]);
975 mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]),
976 HCLGE_CFG_MAC_ADDR_H_M,
977 HCLGE_CFG_MAC_ADDR_H_S);
978
979 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
980
981 cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
982 HCLGE_CFG_DEFAULT_SPEED_M,
983 HCLGE_CFG_DEFAULT_SPEED_S);
984 for (i = 0; i < ETH_ALEN; i++)
985 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
986
987 req = (struct hclge_cfg_param_cmd *)desc[1].data;
988 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
989 }
990
991 /* hclge_get_cfg: query the static parameter from flash
992 * @hdev: pointer to struct hclge_dev
993 * @hcfg: the config structure to be getted
994 */
995 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
996 {
997 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
998 struct hclge_cfg_param_cmd *req;
999 int i, ret;
1000
1001 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1002 u32 offset = 0;
1003
1004 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1005 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1006 true);
1007 hnae_set_field(offset, HCLGE_CFG_OFFSET_M,
1008 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1009 /* Len should be united by 4 bytes when send to hardware */
1010 hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1011 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1012 req->offset = cpu_to_le32(offset);
1013 }
1014
1015 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1016 if (ret) {
1017 dev_err(&hdev->pdev->dev,
1018 "get config failed %d.\n", ret);
1019 return ret;
1020 }
1021
1022 hclge_parse_cfg(hcfg, desc);
1023 return 0;
1024 }
1025
1026 static int hclge_get_cap(struct hclge_dev *hdev)
1027 {
1028 int ret;
1029
1030 ret = hclge_query_function_status(hdev);
1031 if (ret) {
1032 dev_err(&hdev->pdev->dev,
1033 "query function status error %d.\n", ret);
1034 return ret;
1035 }
1036
1037 /* get pf resource */
1038 ret = hclge_query_pf_resource(hdev);
1039 if (ret) {
1040 dev_err(&hdev->pdev->dev,
1041 "query pf resource error %d.\n", ret);
1042 return ret;
1043 }
1044
1045 return 0;
1046 }
1047
1048 static int hclge_configure(struct hclge_dev *hdev)
1049 {
1050 struct hclge_cfg cfg;
1051 int ret, i;
1052
1053 ret = hclge_get_cfg(hdev, &cfg);
1054 if (ret) {
1055 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1056 return ret;
1057 }
1058
1059 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1060 hdev->base_tqp_pid = 0;
1061 hdev->rss_size_max = 1;
1062 hdev->rx_buf_len = cfg.rx_buf_len;
1063 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1064 hdev->hw.mac.media_type = cfg.media_type;
1065 hdev->hw.mac.phy_addr = cfg.phy_addr;
1066 hdev->num_desc = cfg.tqp_desc_num;
1067 hdev->tm_info.num_pg = 1;
1068 hdev->tc_max = cfg.tc_num;
1069 hdev->tm_info.hw_pfc_map = 0;
1070
1071 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1072 if (ret) {
1073 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1074 return ret;
1075 }
1076
1077 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1078 (hdev->tc_max < 1)) {
1079 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1080 hdev->tc_max);
1081 hdev->tc_max = 1;
1082 }
1083
1084 /* Dev does not support DCB */
1085 if (!hnae3_dev_dcb_supported(hdev)) {
1086 hdev->tc_max = 1;
1087 hdev->pfc_max = 0;
1088 } else {
1089 hdev->pfc_max = hdev->tc_max;
1090 }
1091
1092 hdev->tm_info.num_tc = hdev->tc_max;
1093
1094 /* Currently not support uncontiuous tc */
1095 for (i = 0; i < hdev->tm_info.num_tc; i++)
1096 hnae_set_bit(hdev->hw_tc_map, i, 1);
1097
1098 if (!hdev->num_vmdq_vport && !hdev->num_req_vfs)
1099 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1100 else
1101 hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE;
1102
1103 return ret;
1104 }
1105
1106 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1107 int tso_mss_max)
1108 {
1109 struct hclge_cfg_tso_status_cmd *req;
1110 struct hclge_desc desc;
1111 u16 tso_mss;
1112
1113 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1114
1115 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1116
1117 tso_mss = 0;
1118 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1119 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1120 req->tso_mss_min = cpu_to_le16(tso_mss);
1121
1122 tso_mss = 0;
1123 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1124 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1125 req->tso_mss_max = cpu_to_le16(tso_mss);
1126
1127 return hclge_cmd_send(&hdev->hw, &desc, 1);
1128 }
1129
1130 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1131 {
1132 struct hclge_tqp *tqp;
1133 int i;
1134
1135 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1136 sizeof(struct hclge_tqp), GFP_KERNEL);
1137 if (!hdev->htqp)
1138 return -ENOMEM;
1139
1140 tqp = hdev->htqp;
1141
1142 for (i = 0; i < hdev->num_tqps; i++) {
1143 tqp->dev = &hdev->pdev->dev;
1144 tqp->index = i;
1145
1146 tqp->q.ae_algo = &ae_algo;
1147 tqp->q.buf_size = hdev->rx_buf_len;
1148 tqp->q.desc_num = hdev->num_desc;
1149 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1150 i * HCLGE_TQP_REG_SIZE;
1151
1152 tqp++;
1153 }
1154
1155 return 0;
1156 }
1157
1158 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1159 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1160 {
1161 struct hclge_tqp_map_cmd *req;
1162 struct hclge_desc desc;
1163 int ret;
1164
1165 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1166
1167 req = (struct hclge_tqp_map_cmd *)desc.data;
1168 req->tqp_id = cpu_to_le16(tqp_pid);
1169 req->tqp_vf = func_id;
1170 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1171 1 << HCLGE_TQP_MAP_EN_B;
1172 req->tqp_vid = cpu_to_le16(tqp_vid);
1173
1174 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1175 if (ret) {
1176 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n",
1177 ret);
1178 return ret;
1179 }
1180
1181 return 0;
1182 }
1183
1184 static int hclge_assign_tqp(struct hclge_vport *vport,
1185 struct hnae3_queue **tqp, u16 num_tqps)
1186 {
1187 struct hclge_dev *hdev = vport->back;
1188 int i, alloced, func_id, ret;
1189 bool is_pf;
1190
1191 func_id = vport->vport_id;
1192 is_pf = (vport->vport_id == 0) ? true : false;
1193
1194 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1195 alloced < num_tqps; i++) {
1196 if (!hdev->htqp[i].alloced) {
1197 hdev->htqp[i].q.handle = &vport->nic;
1198 hdev->htqp[i].q.tqp_index = alloced;
1199 tqp[alloced] = &hdev->htqp[i].q;
1200 hdev->htqp[i].alloced = true;
1201 ret = hclge_map_tqps_to_func(hdev, func_id,
1202 hdev->htqp[i].index,
1203 alloced, is_pf);
1204 if (ret)
1205 return ret;
1206
1207 alloced++;
1208 }
1209 }
1210 vport->alloc_tqps = num_tqps;
1211
1212 return 0;
1213 }
1214
1215 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps)
1216 {
1217 struct hnae3_handle *nic = &vport->nic;
1218 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1219 struct hclge_dev *hdev = vport->back;
1220 int i, ret;
1221
1222 kinfo->num_desc = hdev->num_desc;
1223 kinfo->rx_buf_len = hdev->rx_buf_len;
1224 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1225 kinfo->rss_size
1226 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1227 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
1228
1229 for (i = 0; i < HNAE3_MAX_TC; i++) {
1230 if (hdev->hw_tc_map & BIT(i)) {
1231 kinfo->tc_info[i].enable = true;
1232 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1233 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1234 kinfo->tc_info[i].tc = i;
1235 } else {
1236 /* Set to default queue if TC is disable */
1237 kinfo->tc_info[i].enable = false;
1238 kinfo->tc_info[i].tqp_offset = 0;
1239 kinfo->tc_info[i].tqp_count = 1;
1240 kinfo->tc_info[i].tc = 0;
1241 }
1242 }
1243
1244 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1245 sizeof(struct hnae3_queue *), GFP_KERNEL);
1246 if (!kinfo->tqp)
1247 return -ENOMEM;
1248
1249 ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps);
1250 if (ret) {
1251 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1252 return -EINVAL;
1253 }
1254
1255 return 0;
1256 }
1257
1258 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1259 {
1260 /* this would be initialized later */
1261 }
1262
1263 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1264 {
1265 struct hnae3_handle *nic = &vport->nic;
1266 struct hclge_dev *hdev = vport->back;
1267 int ret;
1268
1269 nic->pdev = hdev->pdev;
1270 nic->ae_algo = &ae_algo;
1271 nic->numa_node_mask = hdev->numa_node_mask;
1272
1273 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1274 ret = hclge_knic_setup(vport, num_tqps);
1275 if (ret) {
1276 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1277 ret);
1278 return ret;
1279 }
1280 } else {
1281 hclge_unic_setup(vport, num_tqps);
1282 }
1283
1284 return 0;
1285 }
1286
1287 static int hclge_alloc_vport(struct hclge_dev *hdev)
1288 {
1289 struct pci_dev *pdev = hdev->pdev;
1290 struct hclge_vport *vport;
1291 u32 tqp_main_vport;
1292 u32 tqp_per_vport;
1293 int num_vport, i;
1294 int ret;
1295
1296 /* We need to alloc a vport for main NIC of PF */
1297 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1298
1299 if (hdev->num_tqps < num_vport)
1300 num_vport = hdev->num_tqps;
1301
1302 /* Alloc the same number of TQPs for every vport */
1303 tqp_per_vport = hdev->num_tqps / num_vport;
1304 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1305
1306 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1307 GFP_KERNEL);
1308 if (!vport)
1309 return -ENOMEM;
1310
1311 hdev->vport = vport;
1312 hdev->num_alloc_vport = num_vport;
1313
1314 #ifdef CONFIG_PCI_IOV
1315 /* Enable SRIOV */
1316 if (hdev->num_req_vfs) {
1317 dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n",
1318 hdev->num_req_vfs);
1319 ret = pci_enable_sriov(hdev->pdev, hdev->num_req_vfs);
1320 if (ret) {
1321 hdev->num_alloc_vfs = 0;
1322 dev_err(&pdev->dev, "SRIOV enable failed %d\n",
1323 ret);
1324 return ret;
1325 }
1326 }
1327 hdev->num_alloc_vfs = hdev->num_req_vfs;
1328 #endif
1329
1330 for (i = 0; i < num_vport; i++) {
1331 vport->back = hdev;
1332 vport->vport_id = i;
1333
1334 if (i == 0)
1335 ret = hclge_vport_setup(vport, tqp_main_vport);
1336 else
1337 ret = hclge_vport_setup(vport, tqp_per_vport);
1338 if (ret) {
1339 dev_err(&pdev->dev,
1340 "vport setup failed for vport %d, %d\n",
1341 i, ret);
1342 return ret;
1343 }
1344
1345 vport++;
1346 }
1347
1348 return 0;
1349 }
1350
1351 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1352 struct hclge_pkt_buf_alloc *buf_alloc)
1353 {
1354 /* TX buffer size is unit by 128 byte */
1355 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1356 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1357 struct hclge_tx_buff_alloc_cmd *req;
1358 struct hclge_desc desc;
1359 int ret;
1360 u8 i;
1361
1362 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1363
1364 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1365 for (i = 0; i < HCLGE_TC_NUM; i++) {
1366 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1367
1368 req->tx_pkt_buff[i] =
1369 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1370 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1371 }
1372
1373 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1374 if (ret) {
1375 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1376 ret);
1377 return ret;
1378 }
1379
1380 return 0;
1381 }
1382
1383 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1384 struct hclge_pkt_buf_alloc *buf_alloc)
1385 {
1386 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1387
1388 if (ret) {
1389 dev_err(&hdev->pdev->dev,
1390 "tx buffer alloc failed %d\n", ret);
1391 return ret;
1392 }
1393
1394 return 0;
1395 }
1396
1397 static int hclge_get_tc_num(struct hclge_dev *hdev)
1398 {
1399 int i, cnt = 0;
1400
1401 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1402 if (hdev->hw_tc_map & BIT(i))
1403 cnt++;
1404 return cnt;
1405 }
1406
1407 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1408 {
1409 int i, cnt = 0;
1410
1411 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1412 if (hdev->hw_tc_map & BIT(i) &&
1413 hdev->tm_info.hw_pfc_map & BIT(i))
1414 cnt++;
1415 return cnt;
1416 }
1417
1418 /* Get the number of pfc enabled TCs, which have private buffer */
1419 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1420 struct hclge_pkt_buf_alloc *buf_alloc)
1421 {
1422 struct hclge_priv_buf *priv;
1423 int i, cnt = 0;
1424
1425 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1426 priv = &buf_alloc->priv_buf[i];
1427 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1428 priv->enable)
1429 cnt++;
1430 }
1431
1432 return cnt;
1433 }
1434
1435 /* Get the number of pfc disabled TCs, which have private buffer */
1436 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1437 struct hclge_pkt_buf_alloc *buf_alloc)
1438 {
1439 struct hclge_priv_buf *priv;
1440 int i, cnt = 0;
1441
1442 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1443 priv = &buf_alloc->priv_buf[i];
1444 if (hdev->hw_tc_map & BIT(i) &&
1445 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1446 priv->enable)
1447 cnt++;
1448 }
1449
1450 return cnt;
1451 }
1452
1453 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1454 {
1455 struct hclge_priv_buf *priv;
1456 u32 rx_priv = 0;
1457 int i;
1458
1459 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1460 priv = &buf_alloc->priv_buf[i];
1461 if (priv->enable)
1462 rx_priv += priv->buf_size;
1463 }
1464 return rx_priv;
1465 }
1466
1467 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1468 {
1469 u32 i, total_tx_size = 0;
1470
1471 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1472 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1473
1474 return total_tx_size;
1475 }
1476
1477 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1478 struct hclge_pkt_buf_alloc *buf_alloc,
1479 u32 rx_all)
1480 {
1481 u32 shared_buf_min, shared_buf_tc, shared_std;
1482 int tc_num, pfc_enable_num;
1483 u32 shared_buf;
1484 u32 rx_priv;
1485 int i;
1486
1487 tc_num = hclge_get_tc_num(hdev);
1488 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1489
1490 if (hnae3_dev_dcb_supported(hdev))
1491 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1492 else
1493 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
1494
1495 shared_buf_tc = pfc_enable_num * hdev->mps +
1496 (tc_num - pfc_enable_num) * hdev->mps / 2 +
1497 hdev->mps;
1498 shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1499
1500 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1501 if (rx_all <= rx_priv + shared_std)
1502 return false;
1503
1504 shared_buf = rx_all - rx_priv;
1505 buf_alloc->s_buf.buf_size = shared_buf;
1506 buf_alloc->s_buf.self.high = shared_buf;
1507 buf_alloc->s_buf.self.low = 2 * hdev->mps;
1508
1509 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1510 if ((hdev->hw_tc_map & BIT(i)) &&
1511 (hdev->tm_info.hw_pfc_map & BIT(i))) {
1512 buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
1513 buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
1514 } else {
1515 buf_alloc->s_buf.tc_thrd[i].low = 0;
1516 buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
1517 }
1518 }
1519
1520 return true;
1521 }
1522
1523 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1524 struct hclge_pkt_buf_alloc *buf_alloc)
1525 {
1526 u32 i, total_size;
1527
1528 total_size = hdev->pkt_buf_size;
1529
1530 /* alloc tx buffer for all enabled tc */
1531 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1532 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1533
1534 if (total_size < HCLGE_DEFAULT_TX_BUF)
1535 return -ENOMEM;
1536
1537 if (hdev->hw_tc_map & BIT(i))
1538 priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1539 else
1540 priv->tx_buf_size = 0;
1541
1542 total_size -= priv->tx_buf_size;
1543 }
1544
1545 return 0;
1546 }
1547
1548 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1549 * @hdev: pointer to struct hclge_dev
1550 * @buf_alloc: pointer to buffer calculation data
1551 * @return: 0: calculate sucessful, negative: fail
1552 */
1553 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1554 struct hclge_pkt_buf_alloc *buf_alloc)
1555 {
1556 u32 rx_all = hdev->pkt_buf_size;
1557 int no_pfc_priv_num, pfc_priv_num;
1558 struct hclge_priv_buf *priv;
1559 int i;
1560
1561 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1562
1563 /* When DCB is not supported, rx private
1564 * buffer is not allocated.
1565 */
1566 if (!hnae3_dev_dcb_supported(hdev)) {
1567 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1568 return -ENOMEM;
1569
1570 return 0;
1571 }
1572
1573 /* step 1, try to alloc private buffer for all enabled tc */
1574 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1575 priv = &buf_alloc->priv_buf[i];
1576 if (hdev->hw_tc_map & BIT(i)) {
1577 priv->enable = 1;
1578 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1579 priv->wl.low = hdev->mps;
1580 priv->wl.high = priv->wl.low + hdev->mps;
1581 priv->buf_size = priv->wl.high +
1582 HCLGE_DEFAULT_DV;
1583 } else {
1584 priv->wl.low = 0;
1585 priv->wl.high = 2 * hdev->mps;
1586 priv->buf_size = priv->wl.high;
1587 }
1588 } else {
1589 priv->enable = 0;
1590 priv->wl.low = 0;
1591 priv->wl.high = 0;
1592 priv->buf_size = 0;
1593 }
1594 }
1595
1596 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1597 return 0;
1598
1599 /* step 2, try to decrease the buffer size of
1600 * no pfc TC's private buffer
1601 */
1602 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1603 priv = &buf_alloc->priv_buf[i];
1604
1605 priv->enable = 0;
1606 priv->wl.low = 0;
1607 priv->wl.high = 0;
1608 priv->buf_size = 0;
1609
1610 if (!(hdev->hw_tc_map & BIT(i)))
1611 continue;
1612
1613 priv->enable = 1;
1614
1615 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1616 priv->wl.low = 128;
1617 priv->wl.high = priv->wl.low + hdev->mps;
1618 priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
1619 } else {
1620 priv->wl.low = 0;
1621 priv->wl.high = hdev->mps;
1622 priv->buf_size = priv->wl.high;
1623 }
1624 }
1625
1626 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1627 return 0;
1628
1629 /* step 3, try to reduce the number of pfc disabled TCs,
1630 * which have private buffer
1631 */
1632 /* get the total no pfc enable TC number, which have private buffer */
1633 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1634
1635 /* let the last to be cleared first */
1636 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1637 priv = &buf_alloc->priv_buf[i];
1638
1639 if (hdev->hw_tc_map & BIT(i) &&
1640 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1641 /* Clear the no pfc TC private buffer */
1642 priv->wl.low = 0;
1643 priv->wl.high = 0;
1644 priv->buf_size = 0;
1645 priv->enable = 0;
1646 no_pfc_priv_num--;
1647 }
1648
1649 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1650 no_pfc_priv_num == 0)
1651 break;
1652 }
1653
1654 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1655 return 0;
1656
1657 /* step 4, try to reduce the number of pfc enabled TCs
1658 * which have private buffer.
1659 */
1660 pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1661
1662 /* let the last to be cleared first */
1663 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1664 priv = &buf_alloc->priv_buf[i];
1665
1666 if (hdev->hw_tc_map & BIT(i) &&
1667 hdev->tm_info.hw_pfc_map & BIT(i)) {
1668 /* Reduce the number of pfc TC with private buffer */
1669 priv->wl.low = 0;
1670 priv->enable = 0;
1671 priv->wl.high = 0;
1672 priv->buf_size = 0;
1673 pfc_priv_num--;
1674 }
1675
1676 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1677 pfc_priv_num == 0)
1678 break;
1679 }
1680 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1681 return 0;
1682
1683 return -ENOMEM;
1684 }
1685
1686 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1687 struct hclge_pkt_buf_alloc *buf_alloc)
1688 {
1689 struct hclge_rx_priv_buff_cmd *req;
1690 struct hclge_desc desc;
1691 int ret;
1692 int i;
1693
1694 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1695 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1696
1697 /* Alloc private buffer TCs */
1698 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1699 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1700
1701 req->buf_num[i] =
1702 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1703 req->buf_num[i] |=
1704 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1705 }
1706
1707 req->shared_buf =
1708 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1709 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1710
1711 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1712 if (ret) {
1713 dev_err(&hdev->pdev->dev,
1714 "rx private buffer alloc cmd failed %d\n", ret);
1715 return ret;
1716 }
1717
1718 return 0;
1719 }
1720
1721 #define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
1722
1723 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1724 struct hclge_pkt_buf_alloc *buf_alloc)
1725 {
1726 struct hclge_rx_priv_wl_buf *req;
1727 struct hclge_priv_buf *priv;
1728 struct hclge_desc desc[2];
1729 int i, j;
1730 int ret;
1731
1732 for (i = 0; i < 2; i++) {
1733 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1734 false);
1735 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1736
1737 /* The first descriptor set the NEXT bit to 1 */
1738 if (i == 0)
1739 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1740 else
1741 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1742
1743 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1744 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1745
1746 priv = &buf_alloc->priv_buf[idx];
1747 req->tc_wl[j].high =
1748 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1749 req->tc_wl[j].high |=
1750 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) <<
1751 HCLGE_RX_PRIV_EN_B);
1752 req->tc_wl[j].low =
1753 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1754 req->tc_wl[j].low |=
1755 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) <<
1756 HCLGE_RX_PRIV_EN_B);
1757 }
1758 }
1759
1760 /* Send 2 descriptor at one time */
1761 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1762 if (ret) {
1763 dev_err(&hdev->pdev->dev,
1764 "rx private waterline config cmd failed %d\n",
1765 ret);
1766 return ret;
1767 }
1768 return 0;
1769 }
1770
1771 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1772 struct hclge_pkt_buf_alloc *buf_alloc)
1773 {
1774 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1775 struct hclge_rx_com_thrd *req;
1776 struct hclge_desc desc[2];
1777 struct hclge_tc_thrd *tc;
1778 int i, j;
1779 int ret;
1780
1781 for (i = 0; i < 2; i++) {
1782 hclge_cmd_setup_basic_desc(&desc[i],
1783 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1784 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1785
1786 /* The first descriptor set the NEXT bit to 1 */
1787 if (i == 0)
1788 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1789 else
1790 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1791
1792 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1793 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1794
1795 req->com_thrd[j].high =
1796 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1797 req->com_thrd[j].high |=
1798 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) <<
1799 HCLGE_RX_PRIV_EN_B);
1800 req->com_thrd[j].low =
1801 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1802 req->com_thrd[j].low |=
1803 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) <<
1804 HCLGE_RX_PRIV_EN_B);
1805 }
1806 }
1807
1808 /* Send 2 descriptors at one time */
1809 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1810 if (ret) {
1811 dev_err(&hdev->pdev->dev,
1812 "common threshold config cmd failed %d\n", ret);
1813 return ret;
1814 }
1815 return 0;
1816 }
1817
1818 static int hclge_common_wl_config(struct hclge_dev *hdev,
1819 struct hclge_pkt_buf_alloc *buf_alloc)
1820 {
1821 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1822 struct hclge_rx_com_wl *req;
1823 struct hclge_desc desc;
1824 int ret;
1825
1826 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1827
1828 req = (struct hclge_rx_com_wl *)desc.data;
1829 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1830 req->com_wl.high |=
1831 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) <<
1832 HCLGE_RX_PRIV_EN_B);
1833
1834 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1835 req->com_wl.low |=
1836 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) <<
1837 HCLGE_RX_PRIV_EN_B);
1838
1839 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1840 if (ret) {
1841 dev_err(&hdev->pdev->dev,
1842 "common waterline config cmd failed %d\n", ret);
1843 return ret;
1844 }
1845
1846 return 0;
1847 }
1848
1849 int hclge_buffer_alloc(struct hclge_dev *hdev)
1850 {
1851 struct hclge_pkt_buf_alloc *pkt_buf;
1852 int ret;
1853
1854 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1855 if (!pkt_buf)
1856 return -ENOMEM;
1857
1858 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1859 if (ret) {
1860 dev_err(&hdev->pdev->dev,
1861 "could not calc tx buffer size for all TCs %d\n", ret);
1862 goto out;
1863 }
1864
1865 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1866 if (ret) {
1867 dev_err(&hdev->pdev->dev,
1868 "could not alloc tx buffers %d\n", ret);
1869 goto out;
1870 }
1871
1872 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1873 if (ret) {
1874 dev_err(&hdev->pdev->dev,
1875 "could not calc rx priv buffer size for all TCs %d\n",
1876 ret);
1877 goto out;
1878 }
1879
1880 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1881 if (ret) {
1882 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1883 ret);
1884 goto out;
1885 }
1886
1887 if (hnae3_dev_dcb_supported(hdev)) {
1888 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1889 if (ret) {
1890 dev_err(&hdev->pdev->dev,
1891 "could not configure rx private waterline %d\n",
1892 ret);
1893 goto out;
1894 }
1895
1896 ret = hclge_common_thrd_config(hdev, pkt_buf);
1897 if (ret) {
1898 dev_err(&hdev->pdev->dev,
1899 "could not configure common threshold %d\n",
1900 ret);
1901 goto out;
1902 }
1903 }
1904
1905 ret = hclge_common_wl_config(hdev, pkt_buf);
1906 if (ret)
1907 dev_err(&hdev->pdev->dev,
1908 "could not configure common waterline %d\n", ret);
1909
1910 out:
1911 kfree(pkt_buf);
1912 return ret;
1913 }
1914
1915 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1916 {
1917 struct hnae3_handle *roce = &vport->roce;
1918 struct hnae3_handle *nic = &vport->nic;
1919
1920 roce->rinfo.num_vectors = vport->back->num_roce_msix;
1921
1922 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1923 vport->back->num_msi_left == 0)
1924 return -EINVAL;
1925
1926 roce->rinfo.base_vector = vport->back->roce_base_vector;
1927
1928 roce->rinfo.netdev = nic->kinfo.netdev;
1929 roce->rinfo.roce_io_base = vport->back->hw.io_base;
1930
1931 roce->pdev = nic->pdev;
1932 roce->ae_algo = nic->ae_algo;
1933 roce->numa_node_mask = nic->numa_node_mask;
1934
1935 return 0;
1936 }
1937
1938 static int hclge_init_msix(struct hclge_dev *hdev)
1939 {
1940 struct pci_dev *pdev = hdev->pdev;
1941 int ret, i;
1942
1943 hdev->msix_entries = devm_kcalloc(&pdev->dev, hdev->num_msi,
1944 sizeof(struct msix_entry),
1945 GFP_KERNEL);
1946 if (!hdev->msix_entries)
1947 return -ENOMEM;
1948
1949 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1950 sizeof(u16), GFP_KERNEL);
1951 if (!hdev->vector_status)
1952 return -ENOMEM;
1953
1954 for (i = 0; i < hdev->num_msi; i++) {
1955 hdev->msix_entries[i].entry = i;
1956 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1957 }
1958
1959 hdev->num_msi_left = hdev->num_msi;
1960 hdev->base_msi_vector = hdev->pdev->irq;
1961 hdev->roce_base_vector = hdev->base_msi_vector +
1962 HCLGE_ROCE_VECTOR_OFFSET;
1963
1964 ret = pci_enable_msix_range(hdev->pdev, hdev->msix_entries,
1965 hdev->num_msi, hdev->num_msi);
1966 if (ret < 0) {
1967 dev_info(&hdev->pdev->dev,
1968 "MSI-X vector alloc failed: %d\n", ret);
1969 return ret;
1970 }
1971
1972 return 0;
1973 }
1974
1975 static int hclge_init_msi(struct hclge_dev *hdev)
1976 {
1977 struct pci_dev *pdev = hdev->pdev;
1978 int vectors;
1979 int i;
1980
1981 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1982 sizeof(u16), GFP_KERNEL);
1983 if (!hdev->vector_status)
1984 return -ENOMEM;
1985
1986 for (i = 0; i < hdev->num_msi; i++)
1987 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1988
1989 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, PCI_IRQ_MSI);
1990 if (vectors < 0) {
1991 dev_err(&pdev->dev, "MSI vectors enable failed %d\n", vectors);
1992 return -EINVAL;
1993 }
1994 hdev->num_msi = vectors;
1995 hdev->num_msi_left = vectors;
1996 hdev->base_msi_vector = pdev->irq;
1997 hdev->roce_base_vector = hdev->base_msi_vector +
1998 HCLGE_ROCE_VECTOR_OFFSET;
1999
2000 return 0;
2001 }
2002
2003 static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed)
2004 {
2005 struct hclge_mac *mac = &hdev->hw.mac;
2006
2007 if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M))
2008 mac->duplex = (u8)duplex;
2009 else
2010 mac->duplex = HCLGE_MAC_FULL;
2011
2012 mac->speed = speed;
2013 }
2014
2015 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2016 {
2017 struct hclge_config_mac_speed_dup_cmd *req;
2018 struct hclge_desc desc;
2019 int ret;
2020
2021 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2022
2023 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2024
2025 hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2026
2027 switch (speed) {
2028 case HCLGE_MAC_SPEED_10M:
2029 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2030 HCLGE_CFG_SPEED_S, 6);
2031 break;
2032 case HCLGE_MAC_SPEED_100M:
2033 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2034 HCLGE_CFG_SPEED_S, 7);
2035 break;
2036 case HCLGE_MAC_SPEED_1G:
2037 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2038 HCLGE_CFG_SPEED_S, 0);
2039 break;
2040 case HCLGE_MAC_SPEED_10G:
2041 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2042 HCLGE_CFG_SPEED_S, 1);
2043 break;
2044 case HCLGE_MAC_SPEED_25G:
2045 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2046 HCLGE_CFG_SPEED_S, 2);
2047 break;
2048 case HCLGE_MAC_SPEED_40G:
2049 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2050 HCLGE_CFG_SPEED_S, 3);
2051 break;
2052 case HCLGE_MAC_SPEED_50G:
2053 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2054 HCLGE_CFG_SPEED_S, 4);
2055 break;
2056 case HCLGE_MAC_SPEED_100G:
2057 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2058 HCLGE_CFG_SPEED_S, 5);
2059 break;
2060 default:
2061 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2062 return -EINVAL;
2063 }
2064
2065 hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2066 1);
2067
2068 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2069 if (ret) {
2070 dev_err(&hdev->pdev->dev,
2071 "mac speed/duplex config cmd failed %d.\n", ret);
2072 return ret;
2073 }
2074
2075 hclge_check_speed_dup(hdev, duplex, speed);
2076
2077 return 0;
2078 }
2079
2080 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2081 u8 duplex)
2082 {
2083 struct hclge_vport *vport = hclge_get_vport(handle);
2084 struct hclge_dev *hdev = vport->back;
2085
2086 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2087 }
2088
2089 static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
2090 u8 *duplex)
2091 {
2092 struct hclge_query_an_speed_dup_cmd *req;
2093 struct hclge_desc desc;
2094 int speed_tmp;
2095 int ret;
2096
2097 req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
2098
2099 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
2100 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2101 if (ret) {
2102 dev_err(&hdev->pdev->dev,
2103 "mac speed/autoneg/duplex query cmd failed %d\n",
2104 ret);
2105 return ret;
2106 }
2107
2108 *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
2109 speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
2110 HCLGE_QUERY_SPEED_S);
2111
2112 ret = hclge_parse_speed(speed_tmp, speed);
2113 if (ret) {
2114 dev_err(&hdev->pdev->dev,
2115 "could not parse speed(=%d), %d\n", speed_tmp, ret);
2116 return -EIO;
2117 }
2118
2119 return 0;
2120 }
2121
2122 static int hclge_query_autoneg_result(struct hclge_dev *hdev)
2123 {
2124 struct hclge_mac *mac = &hdev->hw.mac;
2125 struct hclge_query_an_speed_dup_cmd *req;
2126 struct hclge_desc desc;
2127 int ret;
2128
2129 req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
2130
2131 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
2132 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2133 if (ret) {
2134 dev_err(&hdev->pdev->dev,
2135 "autoneg result query cmd failed %d.\n", ret);
2136 return ret;
2137 }
2138
2139 mac->autoneg = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_AN_B);
2140
2141 return 0;
2142 }
2143
2144 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2145 {
2146 struct hclge_config_auto_neg_cmd *req;
2147 struct hclge_desc desc;
2148 u32 flag = 0;
2149 int ret;
2150
2151 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2152
2153 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2154 hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2155 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2156
2157 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2158 if (ret) {
2159 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2160 ret);
2161 return ret;
2162 }
2163
2164 return 0;
2165 }
2166
2167 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2168 {
2169 struct hclge_vport *vport = hclge_get_vport(handle);
2170 struct hclge_dev *hdev = vport->back;
2171
2172 return hclge_set_autoneg_en(hdev, enable);
2173 }
2174
2175 static int hclge_get_autoneg(struct hnae3_handle *handle)
2176 {
2177 struct hclge_vport *vport = hclge_get_vport(handle);
2178 struct hclge_dev *hdev = vport->back;
2179
2180 hclge_query_autoneg_result(hdev);
2181
2182 return hdev->hw.mac.autoneg;
2183 }
2184
2185 static int hclge_mac_init(struct hclge_dev *hdev)
2186 {
2187 struct hclge_mac *mac = &hdev->hw.mac;
2188 int ret;
2189
2190 ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
2191 if (ret) {
2192 dev_err(&hdev->pdev->dev,
2193 "Config mac speed dup fail ret=%d\n", ret);
2194 return ret;
2195 }
2196
2197 mac->link = 0;
2198
2199 ret = hclge_mac_mdio_config(hdev);
2200 if (ret) {
2201 dev_warn(&hdev->pdev->dev,
2202 "mdio config fail ret=%d\n", ret);
2203 return ret;
2204 }
2205
2206 /* Initialize the MTA table work mode */
2207 hdev->accept_mta_mc = true;
2208 hdev->enable_mta = true;
2209 hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36;
2210
2211 ret = hclge_set_mta_filter_mode(hdev,
2212 hdev->mta_mac_sel_type,
2213 hdev->enable_mta);
2214 if (ret) {
2215 dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n",
2216 ret);
2217 return ret;
2218 }
2219
2220 return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
2221 }
2222
2223 static void hclge_task_schedule(struct hclge_dev *hdev)
2224 {
2225 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2226 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2227 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2228 (void)schedule_work(&hdev->service_task);
2229 }
2230
2231 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2232 {
2233 struct hclge_link_status_cmd *req;
2234 struct hclge_desc desc;
2235 int link_status;
2236 int ret;
2237
2238 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2239 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2240 if (ret) {
2241 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2242 ret);
2243 return ret;
2244 }
2245
2246 req = (struct hclge_link_status_cmd *)desc.data;
2247 link_status = req->status & HCLGE_LINK_STATUS;
2248
2249 return !!link_status;
2250 }
2251
2252 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2253 {
2254 int mac_state;
2255 int link_stat;
2256
2257 mac_state = hclge_get_mac_link_status(hdev);
2258
2259 if (hdev->hw.mac.phydev) {
2260 if (!genphy_read_status(hdev->hw.mac.phydev))
2261 link_stat = mac_state &
2262 hdev->hw.mac.phydev->link;
2263 else
2264 link_stat = 0;
2265
2266 } else {
2267 link_stat = mac_state;
2268 }
2269
2270 return !!link_stat;
2271 }
2272
2273 static void hclge_update_link_status(struct hclge_dev *hdev)
2274 {
2275 struct hnae3_client *client = hdev->nic_client;
2276 struct hnae3_handle *handle;
2277 int state;
2278 int i;
2279
2280 if (!client)
2281 return;
2282 state = hclge_get_mac_phy_link(hdev);
2283 if (state != hdev->hw.mac.link) {
2284 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2285 handle = &hdev->vport[i].nic;
2286 client->ops->link_status_change(handle, state);
2287 }
2288 hdev->hw.mac.link = state;
2289 }
2290 }
2291
2292 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2293 {
2294 struct hclge_mac mac = hdev->hw.mac;
2295 u8 duplex;
2296 int speed;
2297 int ret;
2298
2299 /* get the speed and duplex as autoneg'result from mac cmd when phy
2300 * doesn't exit.
2301 */
2302 if (mac.phydev)
2303 return 0;
2304
2305 /* update mac->antoneg. */
2306 ret = hclge_query_autoneg_result(hdev);
2307 if (ret) {
2308 dev_err(&hdev->pdev->dev,
2309 "autoneg result query failed %d\n", ret);
2310 return ret;
2311 }
2312
2313 if (!mac.autoneg)
2314 return 0;
2315
2316 ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex);
2317 if (ret) {
2318 dev_err(&hdev->pdev->dev,
2319 "mac autoneg/speed/duplex query failed %d\n", ret);
2320 return ret;
2321 }
2322
2323 if ((mac.speed != speed) || (mac.duplex != duplex)) {
2324 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2325 if (ret) {
2326 dev_err(&hdev->pdev->dev,
2327 "mac speed/duplex config failed %d\n", ret);
2328 return ret;
2329 }
2330 }
2331
2332 return 0;
2333 }
2334
2335 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2336 {
2337 struct hclge_vport *vport = hclge_get_vport(handle);
2338 struct hclge_dev *hdev = vport->back;
2339
2340 return hclge_update_speed_duplex(hdev);
2341 }
2342
2343 static int hclge_get_status(struct hnae3_handle *handle)
2344 {
2345 struct hclge_vport *vport = hclge_get_vport(handle);
2346 struct hclge_dev *hdev = vport->back;
2347
2348 hclge_update_link_status(hdev);
2349
2350 return hdev->hw.mac.link;
2351 }
2352
2353 static void hclge_service_timer(struct timer_list *t)
2354 {
2355 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2356
2357 mod_timer(&hdev->service_timer, jiffies + HZ);
2358 hclge_task_schedule(hdev);
2359 }
2360
2361 static void hclge_service_complete(struct hclge_dev *hdev)
2362 {
2363 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2364
2365 /* Flush memory before next watchdog */
2366 smp_mb__before_atomic();
2367 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2368 }
2369
2370 static void hclge_service_task(struct work_struct *work)
2371 {
2372 struct hclge_dev *hdev =
2373 container_of(work, struct hclge_dev, service_task);
2374
2375 hclge_update_speed_duplex(hdev);
2376 hclge_update_link_status(hdev);
2377 hclge_update_stats_for_all(hdev);
2378 hclge_service_complete(hdev);
2379 }
2380
2381 static void hclge_disable_sriov(struct hclge_dev *hdev)
2382 {
2383 /* If our VFs are assigned we cannot shut down SR-IOV
2384 * without causing issues, so just leave the hardware
2385 * available but disabled
2386 */
2387 if (pci_vfs_assigned(hdev->pdev)) {
2388 dev_warn(&hdev->pdev->dev,
2389 "disabling driver while VFs are assigned\n");
2390 return;
2391 }
2392
2393 pci_disable_sriov(hdev->pdev);
2394 }
2395
2396 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
2397 {
2398 /* VF handle has no client */
2399 if (!handle->client)
2400 return container_of(handle, struct hclge_vport, nic);
2401 else if (handle->client->type == HNAE3_CLIENT_ROCE)
2402 return container_of(handle, struct hclge_vport, roce);
2403 else
2404 return container_of(handle, struct hclge_vport, nic);
2405 }
2406
2407 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
2408 struct hnae3_vector_info *vector_info)
2409 {
2410 struct hclge_vport *vport = hclge_get_vport(handle);
2411 struct hnae3_vector_info *vector = vector_info;
2412 struct hclge_dev *hdev = vport->back;
2413 int alloc = 0;
2414 int i, j;
2415
2416 vector_num = min(hdev->num_msi_left, vector_num);
2417
2418 for (j = 0; j < vector_num; j++) {
2419 for (i = 1; i < hdev->num_msi; i++) {
2420 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
2421 vector->vector = pci_irq_vector(hdev->pdev, i);
2422 vector->io_addr = hdev->hw.io_base +
2423 HCLGE_VECTOR_REG_BASE +
2424 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
2425 vport->vport_id *
2426 HCLGE_VECTOR_VF_OFFSET;
2427 hdev->vector_status[i] = vport->vport_id;
2428
2429 vector++;
2430 alloc++;
2431
2432 break;
2433 }
2434 }
2435 }
2436 hdev->num_msi_left -= alloc;
2437 hdev->num_msi_used += alloc;
2438
2439 return alloc;
2440 }
2441
2442 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
2443 {
2444 int i;
2445
2446 for (i = 0; i < hdev->num_msi; i++) {
2447 if (hdev->msix_entries) {
2448 if (vector == hdev->msix_entries[i].vector)
2449 return i;
2450 } else {
2451 if (vector == (hdev->base_msi_vector + i))
2452 return i;
2453 }
2454 }
2455 return -EINVAL;
2456 }
2457
2458 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
2459 {
2460 return HCLGE_RSS_KEY_SIZE;
2461 }
2462
2463 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
2464 {
2465 return HCLGE_RSS_IND_TBL_SIZE;
2466 }
2467
2468 static int hclge_get_rss_algo(struct hclge_dev *hdev)
2469 {
2470 struct hclge_rss_config_cmd *req;
2471 struct hclge_desc desc;
2472 int rss_hash_algo;
2473 int ret;
2474
2475 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, true);
2476
2477 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2478 if (ret) {
2479 dev_err(&hdev->pdev->dev,
2480 "Get link status error, status =%d\n", ret);
2481 return ret;
2482 }
2483
2484 req = (struct hclge_rss_config_cmd *)desc.data;
2485 rss_hash_algo = (req->hash_config & HCLGE_RSS_HASH_ALGO_MASK);
2486
2487 if (rss_hash_algo == HCLGE_RSS_HASH_ALGO_TOEPLITZ)
2488 return ETH_RSS_HASH_TOP;
2489
2490 return -EINVAL;
2491 }
2492
2493 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
2494 const u8 hfunc, const u8 *key)
2495 {
2496 struct hclge_rss_config_cmd *req;
2497 struct hclge_desc desc;
2498 int key_offset;
2499 int key_size;
2500 int ret;
2501
2502 req = (struct hclge_rss_config_cmd *)desc.data;
2503
2504 for (key_offset = 0; key_offset < 3; key_offset++) {
2505 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
2506 false);
2507
2508 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
2509 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
2510
2511 if (key_offset == 2)
2512 key_size =
2513 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
2514 else
2515 key_size = HCLGE_RSS_HASH_KEY_NUM;
2516
2517 memcpy(req->hash_key,
2518 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
2519
2520 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2521 if (ret) {
2522 dev_err(&hdev->pdev->dev,
2523 "Configure RSS config fail, status = %d\n",
2524 ret);
2525 return ret;
2526 }
2527 }
2528 return 0;
2529 }
2530
2531 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir)
2532 {
2533 struct hclge_rss_indirection_table_cmd *req;
2534 struct hclge_desc desc;
2535 int i, j;
2536 int ret;
2537
2538 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
2539
2540 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
2541 hclge_cmd_setup_basic_desc
2542 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
2543
2544 req->start_table_index =
2545 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
2546 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
2547
2548 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
2549 req->rss_result[j] =
2550 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
2551
2552 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2553 if (ret) {
2554 dev_err(&hdev->pdev->dev,
2555 "Configure rss indir table fail,status = %d\n",
2556 ret);
2557 return ret;
2558 }
2559 }
2560 return 0;
2561 }
2562
2563 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
2564 u16 *tc_size, u16 *tc_offset)
2565 {
2566 struct hclge_rss_tc_mode_cmd *req;
2567 struct hclge_desc desc;
2568 int ret;
2569 int i;
2570
2571 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
2572 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
2573
2574 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2575 u16 mode = 0;
2576
2577 hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
2578 hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M,
2579 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
2580 hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
2581 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
2582
2583 req->rss_tc_mode[i] = cpu_to_le16(mode);
2584 }
2585
2586 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2587 if (ret) {
2588 dev_err(&hdev->pdev->dev,
2589 "Configure rss tc mode fail, status = %d\n", ret);
2590 return ret;
2591 }
2592
2593 return 0;
2594 }
2595
2596 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
2597 {
2598 struct hclge_rss_input_tuple_cmd *req;
2599 struct hclge_desc desc;
2600 int ret;
2601
2602 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
2603
2604 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
2605 req->ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2606 req->ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2607 req->ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
2608 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2609 req->ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2610 req->ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2611 req->ipv6_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
2612 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2613 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2614 if (ret) {
2615 dev_err(&hdev->pdev->dev,
2616 "Configure rss input fail, status = %d\n", ret);
2617 return ret;
2618 }
2619
2620 return 0;
2621 }
2622
2623 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
2624 u8 *key, u8 *hfunc)
2625 {
2626 struct hclge_vport *vport = hclge_get_vport(handle);
2627 struct hclge_dev *hdev = vport->back;
2628 int i;
2629
2630 /* Get hash algorithm */
2631 if (hfunc)
2632 *hfunc = hclge_get_rss_algo(hdev);
2633
2634 /* Get the RSS Key required by the user */
2635 if (key)
2636 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
2637
2638 /* Get indirect table */
2639 if (indir)
2640 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
2641 indir[i] = vport->rss_indirection_tbl[i];
2642
2643 return 0;
2644 }
2645
2646 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
2647 const u8 *key, const u8 hfunc)
2648 {
2649 struct hclge_vport *vport = hclge_get_vport(handle);
2650 struct hclge_dev *hdev = vport->back;
2651 u8 hash_algo;
2652 int ret, i;
2653
2654 /* Set the RSS Hash Key if specififed by the user */
2655 if (key) {
2656 /* Update the shadow RSS key with user specified qids */
2657 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
2658
2659 if (hfunc == ETH_RSS_HASH_TOP ||
2660 hfunc == ETH_RSS_HASH_NO_CHANGE)
2661 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
2662 else
2663 return -EINVAL;
2664 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
2665 if (ret)
2666 return ret;
2667 }
2668
2669 /* Update the shadow RSS table with user specified qids */
2670 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
2671 vport->rss_indirection_tbl[i] = indir[i];
2672
2673 /* Update the hardware */
2674 ret = hclge_set_rss_indir_table(hdev, indir);
2675 return ret;
2676 }
2677
2678 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
2679 {
2680 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
2681
2682 if (nfc->data & RXH_L4_B_2_3)
2683 hash_sets |= HCLGE_D_PORT_BIT;
2684 else
2685 hash_sets &= ~HCLGE_D_PORT_BIT;
2686
2687 if (nfc->data & RXH_IP_SRC)
2688 hash_sets |= HCLGE_S_IP_BIT;
2689 else
2690 hash_sets &= ~HCLGE_S_IP_BIT;
2691
2692 if (nfc->data & RXH_IP_DST)
2693 hash_sets |= HCLGE_D_IP_BIT;
2694 else
2695 hash_sets &= ~HCLGE_D_IP_BIT;
2696
2697 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
2698 hash_sets |= HCLGE_V_TAG_BIT;
2699
2700 return hash_sets;
2701 }
2702
2703 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
2704 struct ethtool_rxnfc *nfc)
2705 {
2706 struct hclge_vport *vport = hclge_get_vport(handle);
2707 struct hclge_dev *hdev = vport->back;
2708 struct hclge_rss_input_tuple_cmd *req;
2709 struct hclge_desc desc;
2710 u8 tuple_sets;
2711 int ret;
2712
2713 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2714 RXH_L4_B_0_1 | RXH_L4_B_2_3))
2715 return -EINVAL;
2716
2717 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
2718 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true);
2719 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2720 if (ret) {
2721 dev_err(&hdev->pdev->dev,
2722 "Read rss tuple fail, status = %d\n", ret);
2723 return ret;
2724 }
2725
2726 hclge_cmd_reuse_desc(&desc, false);
2727
2728 tuple_sets = hclge_get_rss_hash_bits(nfc);
2729 switch (nfc->flow_type) {
2730 case TCP_V4_FLOW:
2731 req->ipv4_tcp_en = tuple_sets;
2732 break;
2733 case TCP_V6_FLOW:
2734 req->ipv6_tcp_en = tuple_sets;
2735 break;
2736 case UDP_V4_FLOW:
2737 req->ipv4_udp_en = tuple_sets;
2738 break;
2739 case UDP_V6_FLOW:
2740 req->ipv6_udp_en = tuple_sets;
2741 break;
2742 case SCTP_V4_FLOW:
2743 req->ipv4_sctp_en = tuple_sets;
2744 break;
2745 case SCTP_V6_FLOW:
2746 if ((nfc->data & RXH_L4_B_0_1) ||
2747 (nfc->data & RXH_L4_B_2_3))
2748 return -EINVAL;
2749
2750 req->ipv6_sctp_en = tuple_sets;
2751 break;
2752 case IPV4_FLOW:
2753 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2754 break;
2755 case IPV6_FLOW:
2756 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2757 break;
2758 default:
2759 return -EINVAL;
2760 }
2761
2762 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2763 if (ret)
2764 dev_err(&hdev->pdev->dev,
2765 "Set rss tuple fail, status = %d\n", ret);
2766
2767 return ret;
2768 }
2769
2770 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
2771 struct ethtool_rxnfc *nfc)
2772 {
2773 struct hclge_vport *vport = hclge_get_vport(handle);
2774 struct hclge_dev *hdev = vport->back;
2775 struct hclge_rss_input_tuple_cmd *req;
2776 struct hclge_desc desc;
2777 u8 tuple_sets;
2778 int ret;
2779
2780 nfc->data = 0;
2781
2782 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
2783 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true);
2784 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2785 if (ret) {
2786 dev_err(&hdev->pdev->dev,
2787 "Read rss tuple fail, status = %d\n", ret);
2788 return ret;
2789 }
2790
2791 switch (nfc->flow_type) {
2792 case TCP_V4_FLOW:
2793 tuple_sets = req->ipv4_tcp_en;
2794 break;
2795 case UDP_V4_FLOW:
2796 tuple_sets = req->ipv4_udp_en;
2797 break;
2798 case TCP_V6_FLOW:
2799 tuple_sets = req->ipv6_tcp_en;
2800 break;
2801 case UDP_V6_FLOW:
2802 tuple_sets = req->ipv6_udp_en;
2803 break;
2804 case SCTP_V4_FLOW:
2805 tuple_sets = req->ipv4_sctp_en;
2806 break;
2807 case SCTP_V6_FLOW:
2808 tuple_sets = req->ipv6_sctp_en;
2809 break;
2810 case IPV4_FLOW:
2811 case IPV6_FLOW:
2812 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
2813 break;
2814 default:
2815 return -EINVAL;
2816 }
2817
2818 if (!tuple_sets)
2819 return 0;
2820
2821 if (tuple_sets & HCLGE_D_PORT_BIT)
2822 nfc->data |= RXH_L4_B_2_3;
2823 if (tuple_sets & HCLGE_S_PORT_BIT)
2824 nfc->data |= RXH_L4_B_0_1;
2825 if (tuple_sets & HCLGE_D_IP_BIT)
2826 nfc->data |= RXH_IP_DST;
2827 if (tuple_sets & HCLGE_S_IP_BIT)
2828 nfc->data |= RXH_IP_SRC;
2829
2830 return 0;
2831 }
2832
2833 static int hclge_get_tc_size(struct hnae3_handle *handle)
2834 {
2835 struct hclge_vport *vport = hclge_get_vport(handle);
2836 struct hclge_dev *hdev = vport->back;
2837
2838 return hdev->rss_size_max;
2839 }
2840
2841 int hclge_rss_init_hw(struct hclge_dev *hdev)
2842 {
2843 const u8 hfunc = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
2844 struct hclge_vport *vport = hdev->vport;
2845 u16 tc_offset[HCLGE_MAX_TC_NUM];
2846 u8 rss_key[HCLGE_RSS_KEY_SIZE];
2847 u16 tc_valid[HCLGE_MAX_TC_NUM];
2848 u16 tc_size[HCLGE_MAX_TC_NUM];
2849 u32 *rss_indir = NULL;
2850 u16 rss_size = 0, roundup_size;
2851 const u8 *key;
2852 int i, ret, j;
2853
2854 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
2855 if (!rss_indir)
2856 return -ENOMEM;
2857
2858 /* Get default RSS key */
2859 netdev_rss_key_fill(rss_key, HCLGE_RSS_KEY_SIZE);
2860
2861 /* Initialize RSS indirect table for each vport */
2862 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
2863 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) {
2864 vport[j].rss_indirection_tbl[i] =
2865 i % vport[j].alloc_rss_size;
2866
2867 /* vport 0 is for PF */
2868 if (j != 0)
2869 continue;
2870
2871 rss_size = vport[j].alloc_rss_size;
2872 rss_indir[i] = vport[j].rss_indirection_tbl[i];
2873 }
2874 }
2875 ret = hclge_set_rss_indir_table(hdev, rss_indir);
2876 if (ret)
2877 goto err;
2878
2879 key = rss_key;
2880 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
2881 if (ret)
2882 goto err;
2883
2884 ret = hclge_set_rss_input_tuple(hdev);
2885 if (ret)
2886 goto err;
2887
2888 /* Each TC have the same queue size, and tc_size set to hardware is
2889 * the log2 of roundup power of two of rss_size, the acutal queue
2890 * size is limited by indirection table.
2891 */
2892 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
2893 dev_err(&hdev->pdev->dev,
2894 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
2895 rss_size);
2896 ret = -EINVAL;
2897 goto err;
2898 }
2899
2900 roundup_size = roundup_pow_of_two(rss_size);
2901 roundup_size = ilog2(roundup_size);
2902
2903 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2904 tc_valid[i] = 0;
2905
2906 if (!(hdev->hw_tc_map & BIT(i)))
2907 continue;
2908
2909 tc_valid[i] = 1;
2910 tc_size[i] = roundup_size;
2911 tc_offset[i] = rss_size * i;
2912 }
2913
2914 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
2915
2916 err:
2917 kfree(rss_indir);
2918
2919 return ret;
2920 }
2921
2922 int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
2923 struct hnae3_ring_chain_node *ring_chain)
2924 {
2925 struct hclge_dev *hdev = vport->back;
2926 struct hclge_ctrl_vector_chain_cmd *req;
2927 struct hnae3_ring_chain_node *node;
2928 struct hclge_desc desc;
2929 int ret;
2930 int i;
2931
2932 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false);
2933
2934 req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
2935 req->int_vector_id = vector_id;
2936
2937 i = 0;
2938 for (node = ring_chain; node; node = node->next) {
2939 u16 type_and_id = 0;
2940
2941 hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
2942 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
2943 hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
2944 node->tqp_index);
2945 hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
2946 HCLGE_INT_GL_IDX_S,
2947 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
2948 req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
2949 req->vfid = vport->vport_id;
2950
2951 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
2952 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
2953
2954 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2955 if (ret) {
2956 dev_err(&hdev->pdev->dev,
2957 "Map TQP fail, status is %d.\n",
2958 ret);
2959 return ret;
2960 }
2961 i = 0;
2962
2963 hclge_cmd_setup_basic_desc(&desc,
2964 HCLGE_OPC_ADD_RING_TO_VECTOR,
2965 false);
2966 req->int_vector_id = vector_id;
2967 }
2968 }
2969
2970 if (i > 0) {
2971 req->int_cause_num = i;
2972
2973 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2974 if (ret) {
2975 dev_err(&hdev->pdev->dev,
2976 "Map TQP fail, status is %d.\n", ret);
2977 return ret;
2978 }
2979 }
2980
2981 return 0;
2982 }
2983
2984 static int hclge_map_handle_ring_to_vector(
2985 struct hnae3_handle *handle, int vector,
2986 struct hnae3_ring_chain_node *ring_chain)
2987 {
2988 struct hclge_vport *vport = hclge_get_vport(handle);
2989 struct hclge_dev *hdev = vport->back;
2990 int vector_id;
2991
2992 vector_id = hclge_get_vector_index(hdev, vector);
2993 if (vector_id < 0) {
2994 dev_err(&hdev->pdev->dev,
2995 "Get vector index fail. ret =%d\n", vector_id);
2996 return vector_id;
2997 }
2998
2999 return hclge_map_vport_ring_to_vector(vport, vector_id, ring_chain);
3000 }
3001
3002 static int hclge_unmap_ring_from_vector(
3003 struct hnae3_handle *handle, int vector,
3004 struct hnae3_ring_chain_node *ring_chain)
3005 {
3006 struct hclge_vport *vport = hclge_get_vport(handle);
3007 struct hclge_dev *hdev = vport->back;
3008 struct hclge_ctrl_vector_chain_cmd *req;
3009 struct hnae3_ring_chain_node *node;
3010 struct hclge_desc desc;
3011 int i, vector_id;
3012 int ret;
3013
3014 vector_id = hclge_get_vector_index(hdev, vector);
3015 if (vector_id < 0) {
3016 dev_err(&handle->pdev->dev,
3017 "Get vector index fail. ret =%d\n", vector_id);
3018 return vector_id;
3019 }
3020
3021 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false);
3022
3023 req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3024 req->int_vector_id = vector_id;
3025
3026 i = 0;
3027 for (node = ring_chain; node; node = node->next) {
3028 u16 type_and_id = 0;
3029
3030 hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
3031 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
3032 hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
3033 node->tqp_index);
3034 hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
3035 HCLGE_INT_GL_IDX_S,
3036 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
3037
3038 req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
3039 req->vfid = vport->vport_id;
3040
3041 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3042 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3043
3044 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3045 if (ret) {
3046 dev_err(&hdev->pdev->dev,
3047 "Unmap TQP fail, status is %d.\n",
3048 ret);
3049 return ret;
3050 }
3051 i = 0;
3052 hclge_cmd_setup_basic_desc(&desc,
3053 HCLGE_OPC_DEL_RING_TO_VECTOR,
3054 false);
3055 req->int_vector_id = vector_id;
3056 }
3057 }
3058
3059 if (i > 0) {
3060 req->int_cause_num = i;
3061
3062 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3063 if (ret) {
3064 dev_err(&hdev->pdev->dev,
3065 "Unmap TQP fail, status is %d.\n", ret);
3066 return ret;
3067 }
3068 }
3069
3070 return 0;
3071 }
3072
3073 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3074 struct hclge_promisc_param *param)
3075 {
3076 struct hclge_promisc_cfg_cmd *req;
3077 struct hclge_desc desc;
3078 int ret;
3079
3080 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3081
3082 req = (struct hclge_promisc_cfg_cmd *)desc.data;
3083 req->vf_id = param->vf_id;
3084 req->flag = (param->enable << HCLGE_PROMISC_EN_B);
3085
3086 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3087 if (ret) {
3088 dev_err(&hdev->pdev->dev,
3089 "Set promisc mode fail, status is %d.\n", ret);
3090 return ret;
3091 }
3092 return 0;
3093 }
3094
3095 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3096 bool en_mc, bool en_bc, int vport_id)
3097 {
3098 if (!param)
3099 return;
3100
3101 memset(param, 0, sizeof(struct hclge_promisc_param));
3102 if (en_uc)
3103 param->enable = HCLGE_PROMISC_EN_UC;
3104 if (en_mc)
3105 param->enable |= HCLGE_PROMISC_EN_MC;
3106 if (en_bc)
3107 param->enable |= HCLGE_PROMISC_EN_BC;
3108 param->vf_id = vport_id;
3109 }
3110
3111 static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en)
3112 {
3113 struct hclge_vport *vport = hclge_get_vport(handle);
3114 struct hclge_dev *hdev = vport->back;
3115 struct hclge_promisc_param param;
3116
3117 hclge_promisc_param_init(&param, en, en, true, vport->vport_id);
3118 hclge_cmd_set_promisc_mode(hdev, &param);
3119 }
3120
3121 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
3122 {
3123 struct hclge_desc desc;
3124 struct hclge_config_mac_mode_cmd *req =
3125 (struct hclge_config_mac_mode_cmd *)desc.data;
3126 u32 loop_en = 0;
3127 int ret;
3128
3129 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
3130 hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
3131 hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
3132 hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
3133 hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
3134 hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
3135 hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
3136 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
3137 hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
3138 hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
3139 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
3140 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
3141 hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
3142 hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
3143 hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
3144 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
3145
3146 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3147 if (ret)
3148 dev_err(&hdev->pdev->dev,
3149 "mac enable fail, ret =%d.\n", ret);
3150 }
3151
3152 static int hclge_set_loopback(struct hnae3_handle *handle,
3153 enum hnae3_loop loop_mode, bool en)
3154 {
3155 struct hclge_vport *vport = hclge_get_vport(handle);
3156 struct hclge_config_mac_mode_cmd *req;
3157 struct hclge_dev *hdev = vport->back;
3158 struct hclge_desc desc;
3159 u32 loop_en;
3160 int ret;
3161
3162 switch (loop_mode) {
3163 case HNAE3_MAC_INTER_LOOP_MAC:
3164 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
3165 /* 1 Read out the MAC mode config at first */
3166 hclge_cmd_setup_basic_desc(&desc,
3167 HCLGE_OPC_CONFIG_MAC_MODE,
3168 true);
3169 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3170 if (ret) {
3171 dev_err(&hdev->pdev->dev,
3172 "mac loopback get fail, ret =%d.\n",
3173 ret);
3174 return ret;
3175 }
3176
3177 /* 2 Then setup the loopback flag */
3178 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
3179 if (en)
3180 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 1);
3181 else
3182 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
3183
3184 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
3185
3186 /* 3 Config mac work mode with loopback flag
3187 * and its original configure parameters
3188 */
3189 hclge_cmd_reuse_desc(&desc, false);
3190 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3191 if (ret)
3192 dev_err(&hdev->pdev->dev,
3193 "mac loopback set fail, ret =%d.\n", ret);
3194 break;
3195 default:
3196 ret = -ENOTSUPP;
3197 dev_err(&hdev->pdev->dev,
3198 "loop_mode %d is not supported\n", loop_mode);
3199 break;
3200 }
3201
3202 return ret;
3203 }
3204
3205 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
3206 int stream_id, bool enable)
3207 {
3208 struct hclge_desc desc;
3209 struct hclge_cfg_com_tqp_queue_cmd *req =
3210 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
3211 int ret;
3212
3213 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
3214 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
3215 req->stream_id = cpu_to_le16(stream_id);
3216 req->enable |= enable << HCLGE_TQP_ENABLE_B;
3217
3218 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3219 if (ret)
3220 dev_err(&hdev->pdev->dev,
3221 "Tqp enable fail, status =%d.\n", ret);
3222 return ret;
3223 }
3224
3225 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
3226 {
3227 struct hclge_vport *vport = hclge_get_vport(handle);
3228 struct hnae3_queue *queue;
3229 struct hclge_tqp *tqp;
3230 int i;
3231
3232 for (i = 0; i < vport->alloc_tqps; i++) {
3233 queue = handle->kinfo.tqp[i];
3234 tqp = container_of(queue, struct hclge_tqp, q);
3235 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
3236 }
3237 }
3238
3239 static int hclge_ae_start(struct hnae3_handle *handle)
3240 {
3241 struct hclge_vport *vport = hclge_get_vport(handle);
3242 struct hclge_dev *hdev = vport->back;
3243 int i, queue_id, ret;
3244
3245 for (i = 0; i < vport->alloc_tqps; i++) {
3246 /* todo clear interrupt */
3247 /* ring enable */
3248 queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]);
3249 if (queue_id < 0) {
3250 dev_warn(&hdev->pdev->dev,
3251 "Get invalid queue id, ignore it\n");
3252 continue;
3253 }
3254
3255 hclge_tqp_enable(hdev, queue_id, 0, true);
3256 }
3257 /* mac enable */
3258 hclge_cfg_mac_mode(hdev, true);
3259 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
3260 mod_timer(&hdev->service_timer, jiffies + HZ);
3261
3262 ret = hclge_mac_start_phy(hdev);
3263 if (ret)
3264 return ret;
3265
3266 /* reset tqp stats */
3267 hclge_reset_tqp_stats(handle);
3268
3269 return 0;
3270 }
3271
3272 static void hclge_ae_stop(struct hnae3_handle *handle)
3273 {
3274 struct hclge_vport *vport = hclge_get_vport(handle);
3275 struct hclge_dev *hdev = vport->back;
3276 int i, queue_id;
3277
3278 for (i = 0; i < vport->alloc_tqps; i++) {
3279 /* Ring disable */
3280 queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]);
3281 if (queue_id < 0) {
3282 dev_warn(&hdev->pdev->dev,
3283 "Get invalid queue id, ignore it\n");
3284 continue;
3285 }
3286
3287 hclge_tqp_enable(hdev, queue_id, 0, false);
3288 }
3289 /* Mac disable */
3290 hclge_cfg_mac_mode(hdev, false);
3291
3292 hclge_mac_stop_phy(hdev);
3293
3294 /* reset tqp stats */
3295 hclge_reset_tqp_stats(handle);
3296 }
3297
3298 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
3299 u16 cmdq_resp, u8 resp_code,
3300 enum hclge_mac_vlan_tbl_opcode op)
3301 {
3302 struct hclge_dev *hdev = vport->back;
3303 int return_status = -EIO;
3304
3305 if (cmdq_resp) {
3306 dev_err(&hdev->pdev->dev,
3307 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
3308 cmdq_resp);
3309 return -EIO;
3310 }
3311
3312 if (op == HCLGE_MAC_VLAN_ADD) {
3313 if ((!resp_code) || (resp_code == 1)) {
3314 return_status = 0;
3315 } else if (resp_code == 2) {
3316 return_status = -EIO;
3317 dev_err(&hdev->pdev->dev,
3318 "add mac addr failed for uc_overflow.\n");
3319 } else if (resp_code == 3) {
3320 return_status = -EIO;
3321 dev_err(&hdev->pdev->dev,
3322 "add mac addr failed for mc_overflow.\n");
3323 } else {
3324 dev_err(&hdev->pdev->dev,
3325 "add mac addr failed for undefined, code=%d.\n",
3326 resp_code);
3327 }
3328 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
3329 if (!resp_code) {
3330 return_status = 0;
3331 } else if (resp_code == 1) {
3332 return_status = -EIO;
3333 dev_dbg(&hdev->pdev->dev,
3334 "remove mac addr failed for miss.\n");
3335 } else {
3336 dev_err(&hdev->pdev->dev,
3337 "remove mac addr failed for undefined, code=%d.\n",
3338 resp_code);
3339 }
3340 } else if (op == HCLGE_MAC_VLAN_LKUP) {
3341 if (!resp_code) {
3342 return_status = 0;
3343 } else if (resp_code == 1) {
3344 return_status = -EIO;
3345 dev_dbg(&hdev->pdev->dev,
3346 "lookup mac addr failed for miss.\n");
3347 } else {
3348 dev_err(&hdev->pdev->dev,
3349 "lookup mac addr failed for undefined, code=%d.\n",
3350 resp_code);
3351 }
3352 } else {
3353 return_status = -EIO;
3354 dev_err(&hdev->pdev->dev,
3355 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
3356 op);
3357 }
3358
3359 return return_status;
3360 }
3361
3362 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
3363 {
3364 int word_num;
3365 int bit_num;
3366
3367 if (vfid > 255 || vfid < 0)
3368 return -EIO;
3369
3370 if (vfid >= 0 && vfid <= 191) {
3371 word_num = vfid / 32;
3372 bit_num = vfid % 32;
3373 if (clr)
3374 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
3375 else
3376 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
3377 } else {
3378 word_num = (vfid - 192) / 32;
3379 bit_num = vfid % 32;
3380 if (clr)
3381 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
3382 else
3383 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
3384 }
3385
3386 return 0;
3387 }
3388
3389 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
3390 {
3391 #define HCLGE_DESC_NUMBER 3
3392 #define HCLGE_FUNC_NUMBER_PER_DESC 6
3393 int i, j;
3394
3395 for (i = 0; i < HCLGE_DESC_NUMBER; i++)
3396 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
3397 if (desc[i].data[j])
3398 return false;
3399
3400 return true;
3401 }
3402
3403 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
3404 const u8 *addr)
3405 {
3406 const unsigned char *mac_addr = addr;
3407 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
3408 (mac_addr[0]) | (mac_addr[1] << 8);
3409 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
3410
3411 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
3412 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
3413 }
3414
3415 static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
3416 const u8 *addr)
3417 {
3418 u16 high_val = addr[1] | (addr[0] << 8);
3419 struct hclge_dev *hdev = vport->back;
3420 u32 rsh = 4 - hdev->mta_mac_sel_type;
3421 u16 ret_val = (high_val >> rsh) & 0xfff;
3422
3423 return ret_val;
3424 }
3425
3426 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
3427 enum hclge_mta_dmac_sel_type mta_mac_sel,
3428 bool enable)
3429 {
3430 struct hclge_mta_filter_mode_cmd *req;
3431 struct hclge_desc desc;
3432 int ret;
3433
3434 req = (struct hclge_mta_filter_mode_cmd *)desc.data;
3435 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
3436
3437 hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
3438 enable);
3439 hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
3440 HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
3441
3442 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3443 if (ret) {
3444 dev_err(&hdev->pdev->dev,
3445 "Config mat filter mode failed for cmd_send, ret =%d.\n",
3446 ret);
3447 return ret;
3448 }
3449
3450 return 0;
3451 }
3452
3453 int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
3454 u8 func_id,
3455 bool enable)
3456 {
3457 struct hclge_cfg_func_mta_filter_cmd *req;
3458 struct hclge_desc desc;
3459 int ret;
3460
3461 req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
3462 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
3463
3464 hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
3465 enable);
3466 req->function_id = func_id;
3467
3468 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3469 if (ret) {
3470 dev_err(&hdev->pdev->dev,
3471 "Config func_id enable failed for cmd_send, ret =%d.\n",
3472 ret);
3473 return ret;
3474 }
3475
3476 return 0;
3477 }
3478
3479 static int hclge_set_mta_table_item(struct hclge_vport *vport,
3480 u16 idx,
3481 bool enable)
3482 {
3483 struct hclge_dev *hdev = vport->back;
3484 struct hclge_cfg_func_mta_item_cmd *req;
3485 struct hclge_desc desc;
3486 u16 item_idx = 0;
3487 int ret;
3488
3489 req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
3490 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
3491 hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
3492
3493 hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
3494 HCLGE_CFG_MTA_ITEM_IDX_S, idx);
3495 req->item_idx = cpu_to_le16(item_idx);
3496
3497 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3498 if (ret) {
3499 dev_err(&hdev->pdev->dev,
3500 "Config mta table item failed for cmd_send, ret =%d.\n",
3501 ret);
3502 return ret;
3503 }
3504
3505 return 0;
3506 }
3507
3508 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
3509 struct hclge_mac_vlan_tbl_entry_cmd *req)
3510 {
3511 struct hclge_dev *hdev = vport->back;
3512 struct hclge_desc desc;
3513 u8 resp_code;
3514 u16 retval;
3515 int ret;
3516
3517 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
3518
3519 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
3520
3521 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3522 if (ret) {
3523 dev_err(&hdev->pdev->dev,
3524 "del mac addr failed for cmd_send, ret =%d.\n",
3525 ret);
3526 return ret;
3527 }
3528 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
3529 retval = le16_to_cpu(desc.retval);
3530
3531 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
3532 HCLGE_MAC_VLAN_REMOVE);
3533 }
3534
3535 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
3536 struct hclge_mac_vlan_tbl_entry_cmd *req,
3537 struct hclge_desc *desc,
3538 bool is_mc)
3539 {
3540 struct hclge_dev *hdev = vport->back;
3541 u8 resp_code;
3542 u16 retval;
3543 int ret;
3544
3545 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
3546 if (is_mc) {
3547 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3548 memcpy(desc[0].data,
3549 req,
3550 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
3551 hclge_cmd_setup_basic_desc(&desc[1],
3552 HCLGE_OPC_MAC_VLAN_ADD,
3553 true);
3554 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3555 hclge_cmd_setup_basic_desc(&desc[2],
3556 HCLGE_OPC_MAC_VLAN_ADD,
3557 true);
3558 ret = hclge_cmd_send(&hdev->hw, desc, 3);
3559 } else {
3560 memcpy(desc[0].data,
3561 req,
3562 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
3563 ret = hclge_cmd_send(&hdev->hw, desc, 1);
3564 }
3565 if (ret) {
3566 dev_err(&hdev->pdev->dev,
3567 "lookup mac addr failed for cmd_send, ret =%d.\n",
3568 ret);
3569 return ret;
3570 }
3571 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
3572 retval = le16_to_cpu(desc[0].retval);
3573
3574 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
3575 HCLGE_MAC_VLAN_LKUP);
3576 }
3577
3578 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
3579 struct hclge_mac_vlan_tbl_entry_cmd *req,
3580 struct hclge_desc *mc_desc)
3581 {
3582 struct hclge_dev *hdev = vport->back;
3583 int cfg_status;
3584 u8 resp_code;
3585 u16 retval;
3586 int ret;
3587
3588 if (!mc_desc) {
3589 struct hclge_desc desc;
3590
3591 hclge_cmd_setup_basic_desc(&desc,
3592 HCLGE_OPC_MAC_VLAN_ADD,
3593 false);
3594 memcpy(desc.data, req,
3595 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
3596 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3597 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
3598 retval = le16_to_cpu(desc.retval);
3599
3600 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
3601 resp_code,
3602 HCLGE_MAC_VLAN_ADD);
3603 } else {
3604 mc_desc[0].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
3605 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3606 mc_desc[1].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
3607 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3608 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
3609 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
3610 memcpy(mc_desc[0].data, req,
3611 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
3612 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
3613 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
3614 retval = le16_to_cpu(mc_desc[0].retval);
3615
3616 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
3617 resp_code,
3618 HCLGE_MAC_VLAN_ADD);
3619 }
3620
3621 if (ret) {
3622 dev_err(&hdev->pdev->dev,
3623 "add mac addr failed for cmd_send, ret =%d.\n",
3624 ret);
3625 return ret;
3626 }
3627
3628 return cfg_status;
3629 }
3630
3631 static int hclge_add_uc_addr(struct hnae3_handle *handle,
3632 const unsigned char *addr)
3633 {
3634 struct hclge_vport *vport = hclge_get_vport(handle);
3635
3636 return hclge_add_uc_addr_common(vport, addr);
3637 }
3638
3639 int hclge_add_uc_addr_common(struct hclge_vport *vport,
3640 const unsigned char *addr)
3641 {
3642 struct hclge_dev *hdev = vport->back;
3643 struct hclge_mac_vlan_tbl_entry_cmd req;
3644 enum hclge_cmd_status status;
3645 u16 egress_port = 0;
3646
3647 /* mac addr check */
3648 if (is_zero_ether_addr(addr) ||
3649 is_broadcast_ether_addr(addr) ||
3650 is_multicast_ether_addr(addr)) {
3651 dev_err(&hdev->pdev->dev,
3652 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
3653 addr,
3654 is_zero_ether_addr(addr),
3655 is_broadcast_ether_addr(addr),
3656 is_multicast_ether_addr(addr));
3657 return -EINVAL;
3658 }
3659
3660 memset(&req, 0, sizeof(req));
3661 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3662 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3663 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0);
3664 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3665
3666 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0);
3667 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0);
3668 hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
3669 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
3670 hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M,
3671 HCLGE_MAC_EPORT_PFID_S, 0);
3672
3673 req.egress_port = cpu_to_le16(egress_port);
3674
3675 hclge_prepare_mac_addr(&req, addr);
3676
3677 status = hclge_add_mac_vlan_tbl(vport, &req, NULL);
3678
3679 return status;
3680 }
3681
3682 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
3683 const unsigned char *addr)
3684 {
3685 struct hclge_vport *vport = hclge_get_vport(handle);
3686
3687 return hclge_rm_uc_addr_common(vport, addr);
3688 }
3689
3690 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
3691 const unsigned char *addr)
3692 {
3693 struct hclge_dev *hdev = vport->back;
3694 struct hclge_mac_vlan_tbl_entry_cmd req;
3695 enum hclge_cmd_status status;
3696
3697 /* mac addr check */
3698 if (is_zero_ether_addr(addr) ||
3699 is_broadcast_ether_addr(addr) ||
3700 is_multicast_ether_addr(addr)) {
3701 dev_dbg(&hdev->pdev->dev,
3702 "Remove mac err! invalid mac:%pM.\n",
3703 addr);
3704 return -EINVAL;
3705 }
3706
3707 memset(&req, 0, sizeof(req));
3708 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3709 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3710 hclge_prepare_mac_addr(&req, addr);
3711 status = hclge_remove_mac_vlan_tbl(vport, &req);
3712
3713 return status;
3714 }
3715
3716 static int hclge_add_mc_addr(struct hnae3_handle *handle,
3717 const unsigned char *addr)
3718 {
3719 struct hclge_vport *vport = hclge_get_vport(handle);
3720
3721 return hclge_add_mc_addr_common(vport, addr);
3722 }
3723
3724 int hclge_add_mc_addr_common(struct hclge_vport *vport,
3725 const unsigned char *addr)
3726 {
3727 struct hclge_dev *hdev = vport->back;
3728 struct hclge_mac_vlan_tbl_entry_cmd req;
3729 struct hclge_desc desc[3];
3730 u16 tbl_idx;
3731 int status;
3732
3733 /* mac addr check */
3734 if (!is_multicast_ether_addr(addr)) {
3735 dev_err(&hdev->pdev->dev,
3736 "Add mc mac err! invalid mac:%pM.\n",
3737 addr);
3738 return -EINVAL;
3739 }
3740 memset(&req, 0, sizeof(req));
3741 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3742 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3743 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
3744 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3745 hclge_prepare_mac_addr(&req, addr);
3746 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
3747 if (!status) {
3748 /* This mac addr exist, update VFID for it */
3749 hclge_update_desc_vfid(desc, vport->vport_id, false);
3750 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
3751 } else {
3752 /* This mac addr do not exist, add new entry for it */
3753 memset(desc[0].data, 0, sizeof(desc[0].data));
3754 memset(desc[1].data, 0, sizeof(desc[0].data));
3755 memset(desc[2].data, 0, sizeof(desc[0].data));
3756 hclge_update_desc_vfid(desc, vport->vport_id, false);
3757 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
3758 }
3759
3760 /* Set MTA table for this MAC address */
3761 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
3762 status = hclge_set_mta_table_item(vport, tbl_idx, true);
3763
3764 return status;
3765 }
3766
3767 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
3768 const unsigned char *addr)
3769 {
3770 struct hclge_vport *vport = hclge_get_vport(handle);
3771
3772 return hclge_rm_mc_addr_common(vport, addr);
3773 }
3774
3775 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
3776 const unsigned char *addr)
3777 {
3778 struct hclge_dev *hdev = vport->back;
3779 struct hclge_mac_vlan_tbl_entry_cmd req;
3780 enum hclge_cmd_status status;
3781 struct hclge_desc desc[3];
3782 u16 tbl_idx;
3783
3784 /* mac addr check */
3785 if (!is_multicast_ether_addr(addr)) {
3786 dev_dbg(&hdev->pdev->dev,
3787 "Remove mc mac err! invalid mac:%pM.\n",
3788 addr);
3789 return -EINVAL;
3790 }
3791
3792 memset(&req, 0, sizeof(req));
3793 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3794 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3795 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
3796 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3797 hclge_prepare_mac_addr(&req, addr);
3798 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
3799 if (!status) {
3800 /* This mac addr exist, remove this handle's VFID for it */
3801 hclge_update_desc_vfid(desc, vport->vport_id, true);
3802
3803 if (hclge_is_all_function_id_zero(desc))
3804 /* All the vfid is zero, so need to delete this entry */
3805 status = hclge_remove_mac_vlan_tbl(vport, &req);
3806 else
3807 /* Not all the vfid is zero, update the vfid */
3808 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
3809
3810 } else {
3811 /* This mac addr do not exist, can't delete it */
3812 dev_err(&hdev->pdev->dev,
3813 "Rm multicast mac addr failed, ret = %d.\n",
3814 status);
3815 return -EIO;
3816 }
3817
3818 /* Set MTB table for this MAC address */
3819 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
3820 status = hclge_set_mta_table_item(vport, tbl_idx, false);
3821
3822 return status;
3823 }
3824
3825 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
3826 {
3827 struct hclge_vport *vport = hclge_get_vport(handle);
3828 struct hclge_dev *hdev = vport->back;
3829
3830 ether_addr_copy(p, hdev->hw.mac.mac_addr);
3831 }
3832
3833 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
3834 {
3835 const unsigned char *new_addr = (const unsigned char *)p;
3836 struct hclge_vport *vport = hclge_get_vport(handle);
3837 struct hclge_dev *hdev = vport->back;
3838
3839 /* mac addr check */
3840 if (is_zero_ether_addr(new_addr) ||
3841 is_broadcast_ether_addr(new_addr) ||
3842 is_multicast_ether_addr(new_addr)) {
3843 dev_err(&hdev->pdev->dev,
3844 "Change uc mac err! invalid mac:%p.\n",
3845 new_addr);
3846 return -EINVAL;
3847 }
3848
3849 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
3850
3851 if (!hclge_add_uc_addr(handle, new_addr)) {
3852 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
3853 return 0;
3854 }
3855
3856 return -EIO;
3857 }
3858
3859 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
3860 bool filter_en)
3861 {
3862 struct hclge_vlan_filter_ctrl_cmd *req;
3863 struct hclge_desc desc;
3864 int ret;
3865
3866 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
3867
3868 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
3869 req->vlan_type = vlan_type;
3870 req->vlan_fe = filter_en;
3871
3872 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3873 if (ret) {
3874 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
3875 ret);
3876 return ret;
3877 }
3878
3879 return 0;
3880 }
3881
3882 int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
3883 bool is_kill, u16 vlan, u8 qos, __be16 proto)
3884 {
3885 #define HCLGE_MAX_VF_BYTES 16
3886 struct hclge_vlan_filter_vf_cfg_cmd *req0;
3887 struct hclge_vlan_filter_vf_cfg_cmd *req1;
3888 struct hclge_desc desc[2];
3889 u8 vf_byte_val;
3890 u8 vf_byte_off;
3891 int ret;
3892
3893 hclge_cmd_setup_basic_desc(&desc[0],
3894 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
3895 hclge_cmd_setup_basic_desc(&desc[1],
3896 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
3897
3898 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3899
3900 vf_byte_off = vfid / 8;
3901 vf_byte_val = 1 << (vfid % 8);
3902
3903 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
3904 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
3905
3906 req0->vlan_id = cpu_to_le16(vlan);
3907 req0->vlan_cfg = is_kill;
3908
3909 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
3910 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
3911 else
3912 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
3913
3914 ret = hclge_cmd_send(&hdev->hw, desc, 2);
3915 if (ret) {
3916 dev_err(&hdev->pdev->dev,
3917 "Send vf vlan command fail, ret =%d.\n",
3918 ret);
3919 return ret;
3920 }
3921
3922 if (!is_kill) {
3923 if (!req0->resp_code || req0->resp_code == 1)
3924 return 0;
3925
3926 dev_err(&hdev->pdev->dev,
3927 "Add vf vlan filter fail, ret =%d.\n",
3928 req0->resp_code);
3929 } else {
3930 if (!req0->resp_code)
3931 return 0;
3932
3933 dev_err(&hdev->pdev->dev,
3934 "Kill vf vlan filter fail, ret =%d.\n",
3935 req0->resp_code);
3936 }
3937
3938 return -EIO;
3939 }
3940
3941 static int hclge_set_port_vlan_filter(struct hnae3_handle *handle,
3942 __be16 proto, u16 vlan_id,
3943 bool is_kill)
3944 {
3945 struct hclge_vport *vport = hclge_get_vport(handle);
3946 struct hclge_dev *hdev = vport->back;
3947 struct hclge_vlan_filter_pf_cfg_cmd *req;
3948 struct hclge_desc desc;
3949 u8 vlan_offset_byte_val;
3950 u8 vlan_offset_byte;
3951 u8 vlan_offset_160;
3952 int ret;
3953
3954 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
3955
3956 vlan_offset_160 = vlan_id / 160;
3957 vlan_offset_byte = (vlan_id % 160) / 8;
3958 vlan_offset_byte_val = 1 << (vlan_id % 8);
3959
3960 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
3961 req->vlan_offset = vlan_offset_160;
3962 req->vlan_cfg = is_kill;
3963 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
3964
3965 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3966 if (ret) {
3967 dev_err(&hdev->pdev->dev,
3968 "port vlan command, send fail, ret =%d.\n",
3969 ret);
3970 return ret;
3971 }
3972
3973 ret = hclge_set_vf_vlan_common(hdev, 0, is_kill, vlan_id, 0, proto);
3974 if (ret) {
3975 dev_err(&hdev->pdev->dev,
3976 "Set pf vlan filter config fail, ret =%d.\n",
3977 ret);
3978 return -EIO;
3979 }
3980
3981 return 0;
3982 }
3983
3984 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
3985 u16 vlan, u8 qos, __be16 proto)
3986 {
3987 struct hclge_vport *vport = hclge_get_vport(handle);
3988 struct hclge_dev *hdev = vport->back;
3989
3990 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
3991 return -EINVAL;
3992 if (proto != htons(ETH_P_8021Q))
3993 return -EPROTONOSUPPORT;
3994
3995 return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto);
3996 }
3997
3998 static int hclge_init_vlan_config(struct hclge_dev *hdev)
3999 {
4000 #define HCLGE_VLAN_TYPE_VF_TABLE 0
4001 #define HCLGE_VLAN_TYPE_PORT_TABLE 1
4002 struct hnae3_handle *handle;
4003 int ret;
4004
4005 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE,
4006 true);
4007 if (ret)
4008 return ret;
4009
4010 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE,
4011 true);
4012 if (ret)
4013 return ret;
4014
4015 handle = &hdev->vport[0].nic;
4016 return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
4017 }
4018
4019 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
4020 {
4021 struct hclge_vport *vport = hclge_get_vport(handle);
4022 struct hclge_config_max_frm_size_cmd *req;
4023 struct hclge_dev *hdev = vport->back;
4024 struct hclge_desc desc;
4025 int ret;
4026
4027 if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU))
4028 return -EINVAL;
4029
4030 hdev->mps = new_mtu;
4031 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
4032
4033 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
4034 req->max_frm_size = cpu_to_le16(new_mtu);
4035
4036 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4037 if (ret) {
4038 dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
4039 return ret;
4040 }
4041
4042 return 0;
4043 }
4044
4045 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
4046 bool enable)
4047 {
4048 struct hclge_reset_tqp_queue_cmd *req;
4049 struct hclge_desc desc;
4050 int ret;
4051
4052 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
4053
4054 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
4055 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
4056 hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
4057
4058 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4059 if (ret) {
4060 dev_err(&hdev->pdev->dev,
4061 "Send tqp reset cmd error, status =%d\n", ret);
4062 return ret;
4063 }
4064
4065 return 0;
4066 }
4067
4068 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
4069 {
4070 struct hclge_reset_tqp_queue_cmd *req;
4071 struct hclge_desc desc;
4072 int ret;
4073
4074 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
4075
4076 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
4077 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
4078
4079 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4080 if (ret) {
4081 dev_err(&hdev->pdev->dev,
4082 "Get reset status error, status =%d\n", ret);
4083 return ret;
4084 }
4085
4086 return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
4087 }
4088
4089 static void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
4090 {
4091 struct hclge_vport *vport = hclge_get_vport(handle);
4092 struct hclge_dev *hdev = vport->back;
4093 int reset_try_times = 0;
4094 int reset_status;
4095 int ret;
4096
4097 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
4098 if (ret) {
4099 dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
4100 return;
4101 }
4102
4103 ret = hclge_send_reset_tqp_cmd(hdev, queue_id, true);
4104 if (ret) {
4105 dev_warn(&hdev->pdev->dev,
4106 "Send reset tqp cmd fail, ret = %d\n", ret);
4107 return;
4108 }
4109
4110 reset_try_times = 0;
4111 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
4112 /* Wait for tqp hw reset */
4113 msleep(20);
4114 reset_status = hclge_get_reset_status(hdev, queue_id);
4115 if (reset_status)
4116 break;
4117 }
4118
4119 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
4120 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
4121 return;
4122 }
4123
4124 ret = hclge_send_reset_tqp_cmd(hdev, queue_id, false);
4125 if (ret) {
4126 dev_warn(&hdev->pdev->dev,
4127 "Deassert the soft reset fail, ret = %d\n", ret);
4128 return;
4129 }
4130 }
4131
4132 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
4133 {
4134 struct hclge_vport *vport = hclge_get_vport(handle);
4135 struct hclge_dev *hdev = vport->back;
4136
4137 return hdev->fw_version;
4138 }
4139
4140 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
4141 u32 *rx_en, u32 *tx_en)
4142 {
4143 struct hclge_vport *vport = hclge_get_vport(handle);
4144 struct hclge_dev *hdev = vport->back;
4145
4146 *auto_neg = hclge_get_autoneg(handle);
4147
4148 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
4149 *rx_en = 0;
4150 *tx_en = 0;
4151 return;
4152 }
4153
4154 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
4155 *rx_en = 1;
4156 *tx_en = 0;
4157 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
4158 *tx_en = 1;
4159 *rx_en = 0;
4160 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
4161 *rx_en = 1;
4162 *tx_en = 1;
4163 } else {
4164 *rx_en = 0;
4165 *tx_en = 0;
4166 }
4167 }
4168
4169 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
4170 u8 *auto_neg, u32 *speed, u8 *duplex)
4171 {
4172 struct hclge_vport *vport = hclge_get_vport(handle);
4173 struct hclge_dev *hdev = vport->back;
4174
4175 if (speed)
4176 *speed = hdev->hw.mac.speed;
4177 if (duplex)
4178 *duplex = hdev->hw.mac.duplex;
4179 if (auto_neg)
4180 *auto_neg = hdev->hw.mac.autoneg;
4181 }
4182
4183 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
4184 {
4185 struct hclge_vport *vport = hclge_get_vport(handle);
4186 struct hclge_dev *hdev = vport->back;
4187
4188 if (media_type)
4189 *media_type = hdev->hw.mac.media_type;
4190 }
4191
4192 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
4193 u8 *tp_mdix_ctrl, u8 *tp_mdix)
4194 {
4195 struct hclge_vport *vport = hclge_get_vport(handle);
4196 struct hclge_dev *hdev = vport->back;
4197 struct phy_device *phydev = hdev->hw.mac.phydev;
4198 int mdix_ctrl, mdix, retval, is_resolved;
4199
4200 if (!phydev) {
4201 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
4202 *tp_mdix = ETH_TP_MDI_INVALID;
4203 return;
4204 }
4205
4206 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
4207
4208 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
4209 mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
4210 HCLGE_PHY_MDIX_CTRL_S);
4211
4212 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
4213 mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
4214 is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
4215
4216 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
4217
4218 switch (mdix_ctrl) {
4219 case 0x0:
4220 *tp_mdix_ctrl = ETH_TP_MDI;
4221 break;
4222 case 0x1:
4223 *tp_mdix_ctrl = ETH_TP_MDI_X;
4224 break;
4225 case 0x3:
4226 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
4227 break;
4228 default:
4229 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
4230 break;
4231 }
4232
4233 if (!is_resolved)
4234 *tp_mdix = ETH_TP_MDI_INVALID;
4235 else if (mdix)
4236 *tp_mdix = ETH_TP_MDI_X;
4237 else
4238 *tp_mdix = ETH_TP_MDI;
4239 }
4240
4241 static int hclge_init_client_instance(struct hnae3_client *client,
4242 struct hnae3_ae_dev *ae_dev)
4243 {
4244 struct hclge_dev *hdev = ae_dev->priv;
4245 struct hclge_vport *vport;
4246 int i, ret;
4247
4248 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4249 vport = &hdev->vport[i];
4250
4251 switch (client->type) {
4252 case HNAE3_CLIENT_KNIC:
4253
4254 hdev->nic_client = client;
4255 vport->nic.client = client;
4256 ret = client->ops->init_instance(&vport->nic);
4257 if (ret)
4258 goto err;
4259
4260 if (hdev->roce_client &&
4261 hnae3_dev_roce_supported(hdev)) {
4262 struct hnae3_client *rc = hdev->roce_client;
4263
4264 ret = hclge_init_roce_base_info(vport);
4265 if (ret)
4266 goto err;
4267
4268 ret = rc->ops->init_instance(&vport->roce);
4269 if (ret)
4270 goto err;
4271 }
4272
4273 break;
4274 case HNAE3_CLIENT_UNIC:
4275 hdev->nic_client = client;
4276 vport->nic.client = client;
4277
4278 ret = client->ops->init_instance(&vport->nic);
4279 if (ret)
4280 goto err;
4281
4282 break;
4283 case HNAE3_CLIENT_ROCE:
4284 if (hnae3_dev_roce_supported(hdev)) {
4285 hdev->roce_client = client;
4286 vport->roce.client = client;
4287 }
4288
4289 if (hdev->roce_client) {
4290 ret = hclge_init_roce_base_info(vport);
4291 if (ret)
4292 goto err;
4293
4294 ret = client->ops->init_instance(&vport->roce);
4295 if (ret)
4296 goto err;
4297 }
4298 }
4299 }
4300
4301 return 0;
4302 err:
4303 return ret;
4304 }
4305
4306 static void hclge_uninit_client_instance(struct hnae3_client *client,
4307 struct hnae3_ae_dev *ae_dev)
4308 {
4309 struct hclge_dev *hdev = ae_dev->priv;
4310 struct hclge_vport *vport;
4311 int i;
4312
4313 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4314 vport = &hdev->vport[i];
4315 if (hdev->roce_client)
4316 hdev->roce_client->ops->uninit_instance(&vport->roce,
4317 0);
4318 if (client->type == HNAE3_CLIENT_ROCE)
4319 return;
4320 if (client->ops->uninit_instance)
4321 client->ops->uninit_instance(&vport->nic, 0);
4322 }
4323 }
4324
4325 static int hclge_pci_init(struct hclge_dev *hdev)
4326 {
4327 struct pci_dev *pdev = hdev->pdev;
4328 struct hclge_hw *hw;
4329 int ret;
4330
4331 ret = pci_enable_device(pdev);
4332 if (ret) {
4333 dev_err(&pdev->dev, "failed to enable PCI device\n");
4334 goto err_no_drvdata;
4335 }
4336
4337 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4338 if (ret) {
4339 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4340 if (ret) {
4341 dev_err(&pdev->dev,
4342 "can't set consistent PCI DMA");
4343 goto err_disable_device;
4344 }
4345 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
4346 }
4347
4348 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
4349 if (ret) {
4350 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
4351 goto err_disable_device;
4352 }
4353
4354 pci_set_master(pdev);
4355 hw = &hdev->hw;
4356 hw->back = hdev;
4357 hw->io_base = pcim_iomap(pdev, 2, 0);
4358 if (!hw->io_base) {
4359 dev_err(&pdev->dev, "Can't map configuration register space\n");
4360 ret = -ENOMEM;
4361 goto err_clr_master;
4362 }
4363
4364 return 0;
4365 err_clr_master:
4366 pci_clear_master(pdev);
4367 pci_release_regions(pdev);
4368 err_disable_device:
4369 pci_disable_device(pdev);
4370 err_no_drvdata:
4371 pci_set_drvdata(pdev, NULL);
4372
4373 return ret;
4374 }
4375
4376 static void hclge_pci_uninit(struct hclge_dev *hdev)
4377 {
4378 struct pci_dev *pdev = hdev->pdev;
4379
4380 if (hdev->flag & HCLGE_FLAG_USE_MSIX) {
4381 pci_disable_msix(pdev);
4382 devm_kfree(&pdev->dev, hdev->msix_entries);
4383 hdev->msix_entries = NULL;
4384 } else {
4385 pci_disable_msi(pdev);
4386 }
4387
4388 pci_clear_master(pdev);
4389 pci_release_mem_regions(pdev);
4390 pci_disable_device(pdev);
4391 }
4392
4393 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
4394 {
4395 struct pci_dev *pdev = ae_dev->pdev;
4396 struct hclge_dev *hdev;
4397 int ret;
4398
4399 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
4400 if (!hdev) {
4401 ret = -ENOMEM;
4402 goto err_hclge_dev;
4403 }
4404
4405 hdev->flag |= HCLGE_FLAG_USE_MSIX;
4406 hdev->pdev = pdev;
4407 hdev->ae_dev = ae_dev;
4408 ae_dev->priv = hdev;
4409
4410 ret = hclge_pci_init(hdev);
4411 if (ret) {
4412 dev_err(&pdev->dev, "PCI init failed\n");
4413 goto err_pci_init;
4414 }
4415
4416 /* Command queue initialize */
4417 ret = hclge_cmd_init(hdev);
4418 if (ret)
4419 goto err_cmd_init;
4420
4421 ret = hclge_get_cap(hdev);
4422 if (ret) {
4423 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
4424 ret);
4425 return ret;
4426 }
4427
4428 ret = hclge_configure(hdev);
4429 if (ret) {
4430 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
4431 return ret;
4432 }
4433
4434 if (hdev->flag & HCLGE_FLAG_USE_MSIX)
4435 ret = hclge_init_msix(hdev);
4436 else
4437 ret = hclge_init_msi(hdev);
4438 if (ret) {
4439 dev_err(&pdev->dev, "Init msix/msi error, ret = %d.\n", ret);
4440 return ret;
4441 }
4442
4443 ret = hclge_alloc_tqps(hdev);
4444 if (ret) {
4445 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
4446 return ret;
4447 }
4448
4449 ret = hclge_alloc_vport(hdev);
4450 if (ret) {
4451 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
4452 return ret;
4453 }
4454
4455 ret = hclge_mac_init(hdev);
4456 if (ret) {
4457 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
4458 return ret;
4459 }
4460 ret = hclge_buffer_alloc(hdev);
4461 if (ret) {
4462 dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret);
4463 return ret;
4464 }
4465
4466 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
4467 if (ret) {
4468 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
4469 return ret;
4470 }
4471
4472 ret = hclge_init_vlan_config(hdev);
4473 if (ret) {
4474 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
4475 return ret;
4476 }
4477
4478 ret = hclge_tm_schd_init(hdev);
4479 if (ret) {
4480 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
4481 return ret;
4482 }
4483
4484 ret = hclge_rss_init_hw(hdev);
4485 if (ret) {
4486 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
4487 return ret;
4488 }
4489
4490 hclge_dcb_ops_set(hdev);
4491
4492 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
4493 INIT_WORK(&hdev->service_task, hclge_service_task);
4494
4495 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
4496 set_bit(HCLGE_STATE_DOWN, &hdev->state);
4497
4498 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
4499 return 0;
4500
4501 err_cmd_init:
4502 pci_release_regions(pdev);
4503 err_pci_init:
4504 pci_set_drvdata(pdev, NULL);
4505 err_hclge_dev:
4506 return ret;
4507 }
4508
4509 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
4510 {
4511 struct hclge_dev *hdev = ae_dev->priv;
4512 struct hclge_mac *mac = &hdev->hw.mac;
4513
4514 set_bit(HCLGE_STATE_DOWN, &hdev->state);
4515
4516 if (IS_ENABLED(CONFIG_PCI_IOV))
4517 hclge_disable_sriov(hdev);
4518
4519 if (hdev->service_timer.function)
4520 del_timer_sync(&hdev->service_timer);
4521 if (hdev->service_task.func)
4522 cancel_work_sync(&hdev->service_task);
4523
4524 if (mac->phydev)
4525 mdiobus_unregister(mac->mdio_bus);
4526
4527 hclge_destroy_cmd_queue(&hdev->hw);
4528 hclge_pci_uninit(hdev);
4529 ae_dev->priv = NULL;
4530 }
4531
4532 static const struct hnae3_ae_ops hclge_ops = {
4533 .init_ae_dev = hclge_init_ae_dev,
4534 .uninit_ae_dev = hclge_uninit_ae_dev,
4535 .init_client_instance = hclge_init_client_instance,
4536 .uninit_client_instance = hclge_uninit_client_instance,
4537 .map_ring_to_vector = hclge_map_handle_ring_to_vector,
4538 .unmap_ring_from_vector = hclge_unmap_ring_from_vector,
4539 .get_vector = hclge_get_vector,
4540 .set_promisc_mode = hclge_set_promisc_mode,
4541 .set_loopback = hclge_set_loopback,
4542 .start = hclge_ae_start,
4543 .stop = hclge_ae_stop,
4544 .get_status = hclge_get_status,
4545 .get_ksettings_an_result = hclge_get_ksettings_an_result,
4546 .update_speed_duplex_h = hclge_update_speed_duplex_h,
4547 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
4548 .get_media_type = hclge_get_media_type,
4549 .get_rss_key_size = hclge_get_rss_key_size,
4550 .get_rss_indir_size = hclge_get_rss_indir_size,
4551 .get_rss = hclge_get_rss,
4552 .set_rss = hclge_set_rss,
4553 .set_rss_tuple = hclge_set_rss_tuple,
4554 .get_rss_tuple = hclge_get_rss_tuple,
4555 .get_tc_size = hclge_get_tc_size,
4556 .get_mac_addr = hclge_get_mac_addr,
4557 .set_mac_addr = hclge_set_mac_addr,
4558 .add_uc_addr = hclge_add_uc_addr,
4559 .rm_uc_addr = hclge_rm_uc_addr,
4560 .add_mc_addr = hclge_add_mc_addr,
4561 .rm_mc_addr = hclge_rm_mc_addr,
4562 .set_autoneg = hclge_set_autoneg,
4563 .get_autoneg = hclge_get_autoneg,
4564 .get_pauseparam = hclge_get_pauseparam,
4565 .set_mtu = hclge_set_mtu,
4566 .reset_queue = hclge_reset_tqp,
4567 .get_stats = hclge_get_stats,
4568 .update_stats = hclge_update_stats,
4569 .get_strings = hclge_get_strings,
4570 .get_sset_count = hclge_get_sset_count,
4571 .get_fw_version = hclge_get_fw_version,
4572 .get_mdix_mode = hclge_get_mdix_mode,
4573 .set_vlan_filter = hclge_set_port_vlan_filter,
4574 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
4575 };
4576
4577 static struct hnae3_ae_algo ae_algo = {
4578 .ops = &hclge_ops,
4579 .name = HCLGE_NAME,
4580 .pdev_id_table = ae_algo_pci_tbl,
4581 };
4582
4583 static int hclge_init(void)
4584 {
4585 pr_info("%s is initializing\n", HCLGE_NAME);
4586
4587 return hnae3_register_ae_algo(&ae_algo);
4588 }
4589
4590 static void hclge_exit(void)
4591 {
4592 hnae3_unregister_ae_algo(&ae_algo);
4593 }
4594 module_init(hclge_init);
4595 module_exit(hclge_exit);
4596
4597 MODULE_LICENSE("GPL");
4598 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
4599 MODULE_DESCRIPTION("HCLGE Driver");
4600 MODULE_VERSION(HCLGE_MOD_VERSION);