]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
net: hns3: Add support for IFF_ALLMULTI flag
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 /*
2 * Copyright (c) 2016-2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10 #include <linux/acpi.h>
11 #include <linux/device.h>
12 #include <linux/etherdevice.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/pci.h>
19 #include <linux/platform_device.h>
20 #include <linux/if_vlan.h>
21 #include <net/rtnetlink.h>
22 #include "hclge_cmd.h"
23 #include "hclge_dcb.h"
24 #include "hclge_main.h"
25 #include "hclge_mbx.h"
26 #include "hclge_mdio.h"
27 #include "hclge_tm.h"
28 #include "hnae3.h"
29
30 #define HCLGE_NAME "hclge"
31 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
32 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
33 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
34 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
35
36 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
37 enum hclge_mta_dmac_sel_type mta_mac_sel,
38 bool enable);
39 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
40 static int hclge_init_vlan_config(struct hclge_dev *hdev);
41 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
42
43 static struct hnae3_ae_algo ae_algo;
44
45 static const struct pci_device_id ae_algo_pci_tbl[] = {
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
51 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
52 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
53 /* required last entry */
54 {0, }
55 };
56
57 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
58
59 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
60 "Mac Loopback test",
61 "Serdes Loopback test",
62 "Phy Loopback test"
63 };
64
65 static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = {
66 {"igu_rx_oversize_pkt",
67 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)},
68 {"igu_rx_undersize_pkt",
69 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)},
70 {"igu_rx_out_all_pkt",
71 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)},
72 {"igu_rx_uni_pkt",
73 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)},
74 {"igu_rx_multi_pkt",
75 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)},
76 {"igu_rx_broad_pkt",
77 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)},
78 {"egu_tx_out_all_pkt",
79 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)},
80 {"egu_tx_uni_pkt",
81 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)},
82 {"egu_tx_multi_pkt",
83 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)},
84 {"egu_tx_broad_pkt",
85 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)},
86 {"ssu_ppp_mac_key_num",
87 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)},
88 {"ssu_ppp_host_key_num",
89 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)},
90 {"ppp_ssu_mac_rlt_num",
91 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)},
92 {"ppp_ssu_host_rlt_num",
93 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)},
94 {"ssu_tx_in_num",
95 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)},
96 {"ssu_tx_out_num",
97 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)},
98 {"ssu_rx_in_num",
99 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)},
100 {"ssu_rx_out_num",
101 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)}
102 };
103
104 static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = {
105 {"igu_rx_err_pkt",
106 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)},
107 {"igu_rx_no_eof_pkt",
108 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)},
109 {"igu_rx_no_sof_pkt",
110 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)},
111 {"egu_tx_1588_pkt",
112 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)},
113 {"ssu_full_drop_num",
114 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)},
115 {"ssu_part_drop_num",
116 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)},
117 {"ppp_key_drop_num",
118 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)},
119 {"ppp_rlt_drop_num",
120 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)},
121 {"ssu_key_drop_num",
122 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)},
123 {"pkt_curr_buf_cnt",
124 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)},
125 {"qcn_fb_rcv_cnt",
126 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)},
127 {"qcn_fb_drop_cnt",
128 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)},
129 {"qcn_fb_invaild_cnt",
130 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)},
131 {"rx_packet_tc0_in_cnt",
132 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)},
133 {"rx_packet_tc1_in_cnt",
134 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)},
135 {"rx_packet_tc2_in_cnt",
136 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)},
137 {"rx_packet_tc3_in_cnt",
138 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)},
139 {"rx_packet_tc4_in_cnt",
140 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)},
141 {"rx_packet_tc5_in_cnt",
142 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)},
143 {"rx_packet_tc6_in_cnt",
144 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)},
145 {"rx_packet_tc7_in_cnt",
146 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)},
147 {"rx_packet_tc0_out_cnt",
148 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)},
149 {"rx_packet_tc1_out_cnt",
150 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)},
151 {"rx_packet_tc2_out_cnt",
152 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)},
153 {"rx_packet_tc3_out_cnt",
154 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)},
155 {"rx_packet_tc4_out_cnt",
156 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)},
157 {"rx_packet_tc5_out_cnt",
158 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)},
159 {"rx_packet_tc6_out_cnt",
160 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)},
161 {"rx_packet_tc7_out_cnt",
162 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)},
163 {"tx_packet_tc0_in_cnt",
164 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)},
165 {"tx_packet_tc1_in_cnt",
166 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)},
167 {"tx_packet_tc2_in_cnt",
168 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)},
169 {"tx_packet_tc3_in_cnt",
170 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)},
171 {"tx_packet_tc4_in_cnt",
172 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)},
173 {"tx_packet_tc5_in_cnt",
174 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)},
175 {"tx_packet_tc6_in_cnt",
176 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)},
177 {"tx_packet_tc7_in_cnt",
178 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)},
179 {"tx_packet_tc0_out_cnt",
180 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)},
181 {"tx_packet_tc1_out_cnt",
182 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)},
183 {"tx_packet_tc2_out_cnt",
184 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)},
185 {"tx_packet_tc3_out_cnt",
186 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)},
187 {"tx_packet_tc4_out_cnt",
188 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)},
189 {"tx_packet_tc5_out_cnt",
190 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)},
191 {"tx_packet_tc6_out_cnt",
192 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)},
193 {"tx_packet_tc7_out_cnt",
194 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)},
195 {"pkt_curr_buf_tc0_cnt",
196 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)},
197 {"pkt_curr_buf_tc1_cnt",
198 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)},
199 {"pkt_curr_buf_tc2_cnt",
200 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)},
201 {"pkt_curr_buf_tc3_cnt",
202 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)},
203 {"pkt_curr_buf_tc4_cnt",
204 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)},
205 {"pkt_curr_buf_tc5_cnt",
206 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)},
207 {"pkt_curr_buf_tc6_cnt",
208 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)},
209 {"pkt_curr_buf_tc7_cnt",
210 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)},
211 {"mb_uncopy_num",
212 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)},
213 {"lo_pri_unicast_rlt_drop_num",
214 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)},
215 {"hi_pri_multicast_rlt_drop_num",
216 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)},
217 {"lo_pri_multicast_rlt_drop_num",
218 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)},
219 {"rx_oq_drop_pkt_cnt",
220 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)},
221 {"tx_oq_drop_pkt_cnt",
222 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)},
223 {"nic_l2_err_drop_pkt_cnt",
224 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)},
225 {"roc_l2_err_drop_pkt_cnt",
226 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)}
227 };
228
229 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
230 {"mac_tx_mac_pause_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
232 {"mac_rx_mac_pause_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
234 {"mac_tx_pfc_pri0_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
236 {"mac_tx_pfc_pri1_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
238 {"mac_tx_pfc_pri2_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
240 {"mac_tx_pfc_pri3_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
242 {"mac_tx_pfc_pri4_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
244 {"mac_tx_pfc_pri5_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
246 {"mac_tx_pfc_pri6_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
248 {"mac_tx_pfc_pri7_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
250 {"mac_rx_pfc_pri0_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
252 {"mac_rx_pfc_pri1_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
254 {"mac_rx_pfc_pri2_pkt_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
256 {"mac_rx_pfc_pri3_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
258 {"mac_rx_pfc_pri4_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
260 {"mac_rx_pfc_pri5_pkt_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
262 {"mac_rx_pfc_pri6_pkt_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
264 {"mac_rx_pfc_pri7_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
266 {"mac_tx_total_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
268 {"mac_tx_total_oct_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
270 {"mac_tx_good_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
272 {"mac_tx_bad_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
274 {"mac_tx_good_oct_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
276 {"mac_tx_bad_oct_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
278 {"mac_tx_uni_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
280 {"mac_tx_multi_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
282 {"mac_tx_broad_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
284 {"mac_tx_undersize_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
286 {"mac_tx_oversize_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
288 {"mac_tx_64_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
290 {"mac_tx_65_127_oct_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
292 {"mac_tx_128_255_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
294 {"mac_tx_256_511_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
296 {"mac_tx_512_1023_oct_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
298 {"mac_tx_1024_1518_oct_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
300 {"mac_tx_1519_2047_oct_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
302 {"mac_tx_2048_4095_oct_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
304 {"mac_tx_4096_8191_oct_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
306 {"mac_tx_8192_9216_oct_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
308 {"mac_tx_9217_12287_oct_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
310 {"mac_tx_12288_16383_oct_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
312 {"mac_tx_1519_max_good_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
314 {"mac_tx_1519_max_bad_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
316 {"mac_rx_total_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
318 {"mac_rx_total_oct_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
320 {"mac_rx_good_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
322 {"mac_rx_bad_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
324 {"mac_rx_good_oct_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
326 {"mac_rx_bad_oct_num",
327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
328 {"mac_rx_uni_pkt_num",
329 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
330 {"mac_rx_multi_pkt_num",
331 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
332 {"mac_rx_broad_pkt_num",
333 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
334 {"mac_rx_undersize_pkt_num",
335 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
336 {"mac_rx_oversize_pkt_num",
337 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
338 {"mac_rx_64_oct_pkt_num",
339 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
340 {"mac_rx_65_127_oct_pkt_num",
341 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
342 {"mac_rx_128_255_oct_pkt_num",
343 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
344 {"mac_rx_256_511_oct_pkt_num",
345 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
346 {"mac_rx_512_1023_oct_pkt_num",
347 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
348 {"mac_rx_1024_1518_oct_pkt_num",
349 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
350 {"mac_rx_1519_2047_oct_pkt_num",
351 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
352 {"mac_rx_2048_4095_oct_pkt_num",
353 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
354 {"mac_rx_4096_8191_oct_pkt_num",
355 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
356 {"mac_rx_8192_9216_oct_pkt_num",
357 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
358 {"mac_rx_9217_12287_oct_pkt_num",
359 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
360 {"mac_rx_12288_16383_oct_pkt_num",
361 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
362 {"mac_rx_1519_max_good_pkt_num",
363 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
364 {"mac_rx_1519_max_bad_pkt_num",
365 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
366
367 {"mac_tx_fragment_pkt_num",
368 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
369 {"mac_tx_undermin_pkt_num",
370 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
371 {"mac_tx_jabber_pkt_num",
372 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
373 {"mac_tx_err_all_pkt_num",
374 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
375 {"mac_tx_from_app_good_pkt_num",
376 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
377 {"mac_tx_from_app_bad_pkt_num",
378 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
379 {"mac_rx_fragment_pkt_num",
380 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
381 {"mac_rx_undermin_pkt_num",
382 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
383 {"mac_rx_jabber_pkt_num",
384 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
385 {"mac_rx_fcs_err_pkt_num",
386 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
387 {"mac_rx_send_app_good_pkt_num",
388 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
389 {"mac_rx_send_app_bad_pkt_num",
390 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
391 };
392
393 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
394 {
395 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
396 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
397 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
398 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
399 .i_port_bitmap = 0x1,
400 },
401 };
402
403 static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
404 {
405 #define HCLGE_64_BIT_CMD_NUM 5
406 #define HCLGE_64_BIT_RTN_DATANUM 4
407 u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats);
408 struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM];
409 __le64 *desc_data;
410 int i, k, n;
411 int ret;
412
413 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true);
414 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM);
415 if (ret) {
416 dev_err(&hdev->pdev->dev,
417 "Get 64 bit pkt stats fail, status = %d.\n", ret);
418 return ret;
419 }
420
421 for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) {
422 if (unlikely(i == 0)) {
423 desc_data = (__le64 *)(&desc[i].data[0]);
424 n = HCLGE_64_BIT_RTN_DATANUM - 1;
425 } else {
426 desc_data = (__le64 *)(&desc[i]);
427 n = HCLGE_64_BIT_RTN_DATANUM;
428 }
429 for (k = 0; k < n; k++) {
430 *data++ += le64_to_cpu(*desc_data);
431 desc_data++;
432 }
433 }
434
435 return 0;
436 }
437
438 static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats)
439 {
440 stats->pkt_curr_buf_cnt = 0;
441 stats->pkt_curr_buf_tc0_cnt = 0;
442 stats->pkt_curr_buf_tc1_cnt = 0;
443 stats->pkt_curr_buf_tc2_cnt = 0;
444 stats->pkt_curr_buf_tc3_cnt = 0;
445 stats->pkt_curr_buf_tc4_cnt = 0;
446 stats->pkt_curr_buf_tc5_cnt = 0;
447 stats->pkt_curr_buf_tc6_cnt = 0;
448 stats->pkt_curr_buf_tc7_cnt = 0;
449 }
450
451 static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
452 {
453 #define HCLGE_32_BIT_CMD_NUM 8
454 #define HCLGE_32_BIT_RTN_DATANUM 8
455
456 struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM];
457 struct hclge_32_bit_stats *all_32_bit_stats;
458 __le32 *desc_data;
459 int i, k, n;
460 u64 *data;
461 int ret;
462
463 all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats;
464 data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt);
465
466 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true);
467 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM);
468 if (ret) {
469 dev_err(&hdev->pdev->dev,
470 "Get 32 bit pkt stats fail, status = %d.\n", ret);
471
472 return ret;
473 }
474
475 hclge_reset_partial_32bit_counter(all_32_bit_stats);
476 for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) {
477 if (unlikely(i == 0)) {
478 __le16 *desc_data_16bit;
479
480 all_32_bit_stats->igu_rx_err_pkt +=
481 le32_to_cpu(desc[i].data[0]);
482
483 desc_data_16bit = (__le16 *)&desc[i].data[1];
484 all_32_bit_stats->igu_rx_no_eof_pkt +=
485 le16_to_cpu(*desc_data_16bit);
486
487 desc_data_16bit++;
488 all_32_bit_stats->igu_rx_no_sof_pkt +=
489 le16_to_cpu(*desc_data_16bit);
490
491 desc_data = &desc[i].data[2];
492 n = HCLGE_32_BIT_RTN_DATANUM - 4;
493 } else {
494 desc_data = (__le32 *)&desc[i];
495 n = HCLGE_32_BIT_RTN_DATANUM;
496 }
497 for (k = 0; k < n; k++) {
498 *data++ += le32_to_cpu(*desc_data);
499 desc_data++;
500 }
501 }
502
503 return 0;
504 }
505
506 static int hclge_mac_update_stats(struct hclge_dev *hdev)
507 {
508 #define HCLGE_MAC_CMD_NUM 21
509 #define HCLGE_RTN_DATA_NUM 4
510
511 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
512 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
513 __le64 *desc_data;
514 int i, k, n;
515 int ret;
516
517 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
518 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
519 if (ret) {
520 dev_err(&hdev->pdev->dev,
521 "Get MAC pkt stats fail, status = %d.\n", ret);
522
523 return ret;
524 }
525
526 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
527 if (unlikely(i == 0)) {
528 desc_data = (__le64 *)(&desc[i].data[0]);
529 n = HCLGE_RTN_DATA_NUM - 2;
530 } else {
531 desc_data = (__le64 *)(&desc[i]);
532 n = HCLGE_RTN_DATA_NUM;
533 }
534 for (k = 0; k < n; k++) {
535 *data++ += le64_to_cpu(*desc_data);
536 desc_data++;
537 }
538 }
539
540 return 0;
541 }
542
543 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
544 {
545 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
546 struct hclge_vport *vport = hclge_get_vport(handle);
547 struct hclge_dev *hdev = vport->back;
548 struct hnae3_queue *queue;
549 struct hclge_desc desc[1];
550 struct hclge_tqp *tqp;
551 int ret, i;
552
553 for (i = 0; i < kinfo->num_tqps; i++) {
554 queue = handle->kinfo.tqp[i];
555 tqp = container_of(queue, struct hclge_tqp, q);
556 /* command : HCLGE_OPC_QUERY_IGU_STAT */
557 hclge_cmd_setup_basic_desc(&desc[0],
558 HCLGE_OPC_QUERY_RX_STATUS,
559 true);
560
561 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
562 ret = hclge_cmd_send(&hdev->hw, desc, 1);
563 if (ret) {
564 dev_err(&hdev->pdev->dev,
565 "Query tqp stat fail, status = %d,queue = %d\n",
566 ret, i);
567 return ret;
568 }
569 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
570 le32_to_cpu(desc[0].data[1]);
571 }
572
573 for (i = 0; i < kinfo->num_tqps; i++) {
574 queue = handle->kinfo.tqp[i];
575 tqp = container_of(queue, struct hclge_tqp, q);
576 /* command : HCLGE_OPC_QUERY_IGU_STAT */
577 hclge_cmd_setup_basic_desc(&desc[0],
578 HCLGE_OPC_QUERY_TX_STATUS,
579 true);
580
581 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
582 ret = hclge_cmd_send(&hdev->hw, desc, 1);
583 if (ret) {
584 dev_err(&hdev->pdev->dev,
585 "Query tqp stat fail, status = %d,queue = %d\n",
586 ret, i);
587 return ret;
588 }
589 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
590 le32_to_cpu(desc[0].data[1]);
591 }
592
593 return 0;
594 }
595
596 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
597 {
598 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
599 struct hclge_tqp *tqp;
600 u64 *buff = data;
601 int i;
602
603 for (i = 0; i < kinfo->num_tqps; i++) {
604 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
605 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
606 }
607
608 for (i = 0; i < kinfo->num_tqps; i++) {
609 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
610 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
611 }
612
613 return buff;
614 }
615
616 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
617 {
618 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
619
620 return kinfo->num_tqps * (2);
621 }
622
623 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
624 {
625 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
626 u8 *buff = data;
627 int i = 0;
628
629 for (i = 0; i < kinfo->num_tqps; i++) {
630 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
631 struct hclge_tqp, q);
632 snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
633 tqp->index);
634 buff = buff + ETH_GSTRING_LEN;
635 }
636
637 for (i = 0; i < kinfo->num_tqps; i++) {
638 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
639 struct hclge_tqp, q);
640 snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
641 tqp->index);
642 buff = buff + ETH_GSTRING_LEN;
643 }
644
645 return buff;
646 }
647
648 static u64 *hclge_comm_get_stats(void *comm_stats,
649 const struct hclge_comm_stats_str strs[],
650 int size, u64 *data)
651 {
652 u64 *buf = data;
653 u32 i;
654
655 for (i = 0; i < size; i++)
656 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
657
658 return buf + size;
659 }
660
661 static u8 *hclge_comm_get_strings(u32 stringset,
662 const struct hclge_comm_stats_str strs[],
663 int size, u8 *data)
664 {
665 char *buff = (char *)data;
666 u32 i;
667
668 if (stringset != ETH_SS_STATS)
669 return buff;
670
671 for (i = 0; i < size; i++) {
672 snprintf(buff, ETH_GSTRING_LEN,
673 strs[i].desc);
674 buff = buff + ETH_GSTRING_LEN;
675 }
676
677 return (u8 *)buff;
678 }
679
680 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
681 struct net_device_stats *net_stats)
682 {
683 net_stats->tx_dropped = 0;
684 net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num;
685 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
686 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
687
688 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
689 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
690 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
691 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
692 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
693
694 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
695 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
696
697 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
698 net_stats->rx_length_errors =
699 hw_stats->mac_stats.mac_rx_undersize_pkt_num;
700 net_stats->rx_length_errors +=
701 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
702 net_stats->rx_over_errors =
703 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
704 }
705
706 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
707 {
708 struct hnae3_handle *handle;
709 int status;
710
711 handle = &hdev->vport[0].nic;
712 if (handle->client) {
713 status = hclge_tqps_update_stats(handle);
714 if (status) {
715 dev_err(&hdev->pdev->dev,
716 "Update TQPS stats fail, status = %d.\n",
717 status);
718 }
719 }
720
721 status = hclge_mac_update_stats(hdev);
722 if (status)
723 dev_err(&hdev->pdev->dev,
724 "Update MAC stats fail, status = %d.\n", status);
725
726 status = hclge_32_bit_update_stats(hdev);
727 if (status)
728 dev_err(&hdev->pdev->dev,
729 "Update 32 bit stats fail, status = %d.\n",
730 status);
731
732 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
733 }
734
735 static void hclge_update_stats(struct hnae3_handle *handle,
736 struct net_device_stats *net_stats)
737 {
738 struct hclge_vport *vport = hclge_get_vport(handle);
739 struct hclge_dev *hdev = vport->back;
740 struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
741 int status;
742
743 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
744 return;
745
746 status = hclge_mac_update_stats(hdev);
747 if (status)
748 dev_err(&hdev->pdev->dev,
749 "Update MAC stats fail, status = %d.\n",
750 status);
751
752 status = hclge_32_bit_update_stats(hdev);
753 if (status)
754 dev_err(&hdev->pdev->dev,
755 "Update 32 bit stats fail, status = %d.\n",
756 status);
757
758 status = hclge_64_bit_update_stats(hdev);
759 if (status)
760 dev_err(&hdev->pdev->dev,
761 "Update 64 bit stats fail, status = %d.\n",
762 status);
763
764 status = hclge_tqps_update_stats(handle);
765 if (status)
766 dev_err(&hdev->pdev->dev,
767 "Update TQPS stats fail, status = %d.\n",
768 status);
769
770 hclge_update_netstat(hw_stats, net_stats);
771
772 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
773 }
774
775 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
776 {
777 #define HCLGE_LOOPBACK_TEST_FLAGS 0x7
778
779 struct hclge_vport *vport = hclge_get_vport(handle);
780 struct hclge_dev *hdev = vport->back;
781 int count = 0;
782
783 /* Loopback test support rules:
784 * mac: only GE mode support
785 * serdes: all mac mode will support include GE/XGE/LGE/CGE
786 * phy: only support when phy device exist on board
787 */
788 if (stringset == ETH_SS_TEST) {
789 /* clear loopback bit flags at first */
790 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
791 if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
792 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
793 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
794 count += 1;
795 handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK;
796 } else {
797 count = -EOPNOTSUPP;
798 }
799 } else if (stringset == ETH_SS_STATS) {
800 count = ARRAY_SIZE(g_mac_stats_string) +
801 ARRAY_SIZE(g_all_32bit_stats_string) +
802 ARRAY_SIZE(g_all_64bit_stats_string) +
803 hclge_tqps_get_sset_count(handle, stringset);
804 }
805
806 return count;
807 }
808
809 static void hclge_get_strings(struct hnae3_handle *handle,
810 u32 stringset,
811 u8 *data)
812 {
813 u8 *p = (char *)data;
814 int size;
815
816 if (stringset == ETH_SS_STATS) {
817 size = ARRAY_SIZE(g_mac_stats_string);
818 p = hclge_comm_get_strings(stringset,
819 g_mac_stats_string,
820 size,
821 p);
822 size = ARRAY_SIZE(g_all_32bit_stats_string);
823 p = hclge_comm_get_strings(stringset,
824 g_all_32bit_stats_string,
825 size,
826 p);
827 size = ARRAY_SIZE(g_all_64bit_stats_string);
828 p = hclge_comm_get_strings(stringset,
829 g_all_64bit_stats_string,
830 size,
831 p);
832 p = hclge_tqps_get_strings(handle, p);
833 } else if (stringset == ETH_SS_TEST) {
834 if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) {
835 memcpy(p,
836 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC],
837 ETH_GSTRING_LEN);
838 p += ETH_GSTRING_LEN;
839 }
840 if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) {
841 memcpy(p,
842 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES],
843 ETH_GSTRING_LEN);
844 p += ETH_GSTRING_LEN;
845 }
846 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
847 memcpy(p,
848 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY],
849 ETH_GSTRING_LEN);
850 p += ETH_GSTRING_LEN;
851 }
852 }
853 }
854
855 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
856 {
857 struct hclge_vport *vport = hclge_get_vport(handle);
858 struct hclge_dev *hdev = vport->back;
859 u64 *p;
860
861 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
862 g_mac_stats_string,
863 ARRAY_SIZE(g_mac_stats_string),
864 data);
865 p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats,
866 g_all_32bit_stats_string,
867 ARRAY_SIZE(g_all_32bit_stats_string),
868 p);
869 p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats,
870 g_all_64bit_stats_string,
871 ARRAY_SIZE(g_all_64bit_stats_string),
872 p);
873 p = hclge_tqps_get_stats(handle, p);
874 }
875
876 static int hclge_parse_func_status(struct hclge_dev *hdev,
877 struct hclge_func_status_cmd *status)
878 {
879 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
880 return -EINVAL;
881
882 /* Set the pf to main pf */
883 if (status->pf_state & HCLGE_PF_STATE_MAIN)
884 hdev->flag |= HCLGE_FLAG_MAIN;
885 else
886 hdev->flag &= ~HCLGE_FLAG_MAIN;
887
888 return 0;
889 }
890
891 static int hclge_query_function_status(struct hclge_dev *hdev)
892 {
893 struct hclge_func_status_cmd *req;
894 struct hclge_desc desc;
895 int timeout = 0;
896 int ret;
897
898 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
899 req = (struct hclge_func_status_cmd *)desc.data;
900
901 do {
902 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
903 if (ret) {
904 dev_err(&hdev->pdev->dev,
905 "query function status failed %d.\n",
906 ret);
907
908 return ret;
909 }
910
911 /* Check pf reset is done */
912 if (req->pf_state)
913 break;
914 usleep_range(1000, 2000);
915 } while (timeout++ < 5);
916
917 ret = hclge_parse_func_status(hdev, req);
918
919 return ret;
920 }
921
922 static int hclge_query_pf_resource(struct hclge_dev *hdev)
923 {
924 struct hclge_pf_res_cmd *req;
925 struct hclge_desc desc;
926 int ret;
927
928 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
929 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
930 if (ret) {
931 dev_err(&hdev->pdev->dev,
932 "query pf resource failed %d.\n", ret);
933 return ret;
934 }
935
936 req = (struct hclge_pf_res_cmd *)desc.data;
937 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
938 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
939
940 if (hnae3_dev_roce_supported(hdev)) {
941 hdev->num_roce_msi =
942 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
943 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
944
945 /* PF should have NIC vectors and Roce vectors,
946 * NIC vectors are queued before Roce vectors.
947 */
948 hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET;
949 } else {
950 hdev->num_msi =
951 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
952 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
953 }
954
955 return 0;
956 }
957
958 static int hclge_parse_speed(int speed_cmd, int *speed)
959 {
960 switch (speed_cmd) {
961 case 6:
962 *speed = HCLGE_MAC_SPEED_10M;
963 break;
964 case 7:
965 *speed = HCLGE_MAC_SPEED_100M;
966 break;
967 case 0:
968 *speed = HCLGE_MAC_SPEED_1G;
969 break;
970 case 1:
971 *speed = HCLGE_MAC_SPEED_10G;
972 break;
973 case 2:
974 *speed = HCLGE_MAC_SPEED_25G;
975 break;
976 case 3:
977 *speed = HCLGE_MAC_SPEED_40G;
978 break;
979 case 4:
980 *speed = HCLGE_MAC_SPEED_50G;
981 break;
982 case 5:
983 *speed = HCLGE_MAC_SPEED_100G;
984 break;
985 default:
986 return -EINVAL;
987 }
988
989 return 0;
990 }
991
992 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
993 u8 speed_ability)
994 {
995 unsigned long *supported = hdev->hw.mac.supported;
996
997 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
998 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
999 supported);
1000
1001 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1002 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1003 supported);
1004
1005 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1006 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1007 supported);
1008
1009 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1010 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1011 supported);
1012
1013 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1014 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1015 supported);
1016
1017 set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
1018 set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1019 }
1020
1021 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1022 {
1023 u8 media_type = hdev->hw.mac.media_type;
1024
1025 if (media_type != HNAE3_MEDIA_TYPE_FIBER)
1026 return;
1027
1028 hclge_parse_fiber_link_mode(hdev, speed_ability);
1029 }
1030
1031 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1032 {
1033 struct hclge_cfg_param_cmd *req;
1034 u64 mac_addr_tmp_high;
1035 u64 mac_addr_tmp;
1036 int i;
1037
1038 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1039
1040 /* get the configuration */
1041 cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]),
1042 HCLGE_CFG_VMDQ_M,
1043 HCLGE_CFG_VMDQ_S);
1044 cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
1045 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1046 cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
1047 HCLGE_CFG_TQP_DESC_N_M,
1048 HCLGE_CFG_TQP_DESC_N_S);
1049
1050 cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]),
1051 HCLGE_CFG_PHY_ADDR_M,
1052 HCLGE_CFG_PHY_ADDR_S);
1053 cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]),
1054 HCLGE_CFG_MEDIA_TP_M,
1055 HCLGE_CFG_MEDIA_TP_S);
1056 cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]),
1057 HCLGE_CFG_RX_BUF_LEN_M,
1058 HCLGE_CFG_RX_BUF_LEN_S);
1059 /* get mac_address */
1060 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1061 mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]),
1062 HCLGE_CFG_MAC_ADDR_H_M,
1063 HCLGE_CFG_MAC_ADDR_H_S);
1064
1065 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1066
1067 cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
1068 HCLGE_CFG_DEFAULT_SPEED_M,
1069 HCLGE_CFG_DEFAULT_SPEED_S);
1070 cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]),
1071 HCLGE_CFG_RSS_SIZE_M,
1072 HCLGE_CFG_RSS_SIZE_S);
1073
1074 for (i = 0; i < ETH_ALEN; i++)
1075 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1076
1077 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1078 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1079
1080 cfg->speed_ability = hnae_get_field(__le32_to_cpu(req->param[1]),
1081 HCLGE_CFG_SPEED_ABILITY_M,
1082 HCLGE_CFG_SPEED_ABILITY_S);
1083 }
1084
1085 /* hclge_get_cfg: query the static parameter from flash
1086 * @hdev: pointer to struct hclge_dev
1087 * @hcfg: the config structure to be getted
1088 */
1089 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1090 {
1091 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1092 struct hclge_cfg_param_cmd *req;
1093 int i, ret;
1094
1095 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1096 u32 offset = 0;
1097
1098 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1099 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1100 true);
1101 hnae_set_field(offset, HCLGE_CFG_OFFSET_M,
1102 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1103 /* Len should be united by 4 bytes when send to hardware */
1104 hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1105 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1106 req->offset = cpu_to_le32(offset);
1107 }
1108
1109 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1110 if (ret) {
1111 dev_err(&hdev->pdev->dev,
1112 "get config failed %d.\n", ret);
1113 return ret;
1114 }
1115
1116 hclge_parse_cfg(hcfg, desc);
1117 return 0;
1118 }
1119
1120 static int hclge_get_cap(struct hclge_dev *hdev)
1121 {
1122 int ret;
1123
1124 ret = hclge_query_function_status(hdev);
1125 if (ret) {
1126 dev_err(&hdev->pdev->dev,
1127 "query function status error %d.\n", ret);
1128 return ret;
1129 }
1130
1131 /* get pf resource */
1132 ret = hclge_query_pf_resource(hdev);
1133 if (ret) {
1134 dev_err(&hdev->pdev->dev,
1135 "query pf resource error %d.\n", ret);
1136 return ret;
1137 }
1138
1139 return 0;
1140 }
1141
1142 static int hclge_configure(struct hclge_dev *hdev)
1143 {
1144 struct hclge_cfg cfg;
1145 int ret, i;
1146
1147 ret = hclge_get_cfg(hdev, &cfg);
1148 if (ret) {
1149 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1150 return ret;
1151 }
1152
1153 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1154 hdev->base_tqp_pid = 0;
1155 hdev->rss_size_max = cfg.rss_size_max;
1156 hdev->rx_buf_len = cfg.rx_buf_len;
1157 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1158 hdev->hw.mac.media_type = cfg.media_type;
1159 hdev->hw.mac.phy_addr = cfg.phy_addr;
1160 hdev->num_desc = cfg.tqp_desc_num;
1161 hdev->tm_info.num_pg = 1;
1162 hdev->tc_max = cfg.tc_num;
1163 hdev->tm_info.hw_pfc_map = 0;
1164
1165 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1166 if (ret) {
1167 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1168 return ret;
1169 }
1170
1171 hclge_parse_link_mode(hdev, cfg.speed_ability);
1172
1173 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1174 (hdev->tc_max < 1)) {
1175 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1176 hdev->tc_max);
1177 hdev->tc_max = 1;
1178 }
1179
1180 /* Dev does not support DCB */
1181 if (!hnae3_dev_dcb_supported(hdev)) {
1182 hdev->tc_max = 1;
1183 hdev->pfc_max = 0;
1184 } else {
1185 hdev->pfc_max = hdev->tc_max;
1186 }
1187
1188 hdev->tm_info.num_tc = hdev->tc_max;
1189
1190 /* Currently not support uncontiuous tc */
1191 for (i = 0; i < hdev->tm_info.num_tc; i++)
1192 hnae_set_bit(hdev->hw_tc_map, i, 1);
1193
1194 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1195
1196 return ret;
1197 }
1198
1199 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1200 int tso_mss_max)
1201 {
1202 struct hclge_cfg_tso_status_cmd *req;
1203 struct hclge_desc desc;
1204 u16 tso_mss;
1205
1206 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1207
1208 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1209
1210 tso_mss = 0;
1211 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1212 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1213 req->tso_mss_min = cpu_to_le16(tso_mss);
1214
1215 tso_mss = 0;
1216 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1217 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1218 req->tso_mss_max = cpu_to_le16(tso_mss);
1219
1220 return hclge_cmd_send(&hdev->hw, &desc, 1);
1221 }
1222
1223 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1224 {
1225 struct hclge_tqp *tqp;
1226 int i;
1227
1228 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1229 sizeof(struct hclge_tqp), GFP_KERNEL);
1230 if (!hdev->htqp)
1231 return -ENOMEM;
1232
1233 tqp = hdev->htqp;
1234
1235 for (i = 0; i < hdev->num_tqps; i++) {
1236 tqp->dev = &hdev->pdev->dev;
1237 tqp->index = i;
1238
1239 tqp->q.ae_algo = &ae_algo;
1240 tqp->q.buf_size = hdev->rx_buf_len;
1241 tqp->q.desc_num = hdev->num_desc;
1242 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1243 i * HCLGE_TQP_REG_SIZE;
1244
1245 tqp++;
1246 }
1247
1248 return 0;
1249 }
1250
1251 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1252 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1253 {
1254 struct hclge_tqp_map_cmd *req;
1255 struct hclge_desc desc;
1256 int ret;
1257
1258 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1259
1260 req = (struct hclge_tqp_map_cmd *)desc.data;
1261 req->tqp_id = cpu_to_le16(tqp_pid);
1262 req->tqp_vf = func_id;
1263 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1264 1 << HCLGE_TQP_MAP_EN_B;
1265 req->tqp_vid = cpu_to_le16(tqp_vid);
1266
1267 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1268 if (ret) {
1269 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n",
1270 ret);
1271 return ret;
1272 }
1273
1274 return 0;
1275 }
1276
1277 static int hclge_assign_tqp(struct hclge_vport *vport,
1278 struct hnae3_queue **tqp, u16 num_tqps)
1279 {
1280 struct hclge_dev *hdev = vport->back;
1281 int i, alloced;
1282
1283 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1284 alloced < num_tqps; i++) {
1285 if (!hdev->htqp[i].alloced) {
1286 hdev->htqp[i].q.handle = &vport->nic;
1287 hdev->htqp[i].q.tqp_index = alloced;
1288 tqp[alloced] = &hdev->htqp[i].q;
1289 hdev->htqp[i].alloced = true;
1290 alloced++;
1291 }
1292 }
1293 vport->alloc_tqps = num_tqps;
1294
1295 return 0;
1296 }
1297
1298 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps)
1299 {
1300 struct hnae3_handle *nic = &vport->nic;
1301 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1302 struct hclge_dev *hdev = vport->back;
1303 int i, ret;
1304
1305 kinfo->num_desc = hdev->num_desc;
1306 kinfo->rx_buf_len = hdev->rx_buf_len;
1307 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1308 kinfo->rss_size
1309 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1310 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
1311
1312 for (i = 0; i < HNAE3_MAX_TC; i++) {
1313 if (hdev->hw_tc_map & BIT(i)) {
1314 kinfo->tc_info[i].enable = true;
1315 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1316 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1317 kinfo->tc_info[i].tc = i;
1318 } else {
1319 /* Set to default queue if TC is disable */
1320 kinfo->tc_info[i].enable = false;
1321 kinfo->tc_info[i].tqp_offset = 0;
1322 kinfo->tc_info[i].tqp_count = 1;
1323 kinfo->tc_info[i].tc = 0;
1324 }
1325 }
1326
1327 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1328 sizeof(struct hnae3_queue *), GFP_KERNEL);
1329 if (!kinfo->tqp)
1330 return -ENOMEM;
1331
1332 ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps);
1333 if (ret) {
1334 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1335 return -EINVAL;
1336 }
1337
1338 return 0;
1339 }
1340
1341 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1342 struct hclge_vport *vport)
1343 {
1344 struct hnae3_handle *nic = &vport->nic;
1345 struct hnae3_knic_private_info *kinfo;
1346 u16 i;
1347
1348 kinfo = &nic->kinfo;
1349 for (i = 0; i < kinfo->num_tqps; i++) {
1350 struct hclge_tqp *q =
1351 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1352 bool is_pf;
1353 int ret;
1354
1355 is_pf = !(vport->vport_id);
1356 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1357 i, is_pf);
1358 if (ret)
1359 return ret;
1360 }
1361
1362 return 0;
1363 }
1364
1365 static int hclge_map_tqp(struct hclge_dev *hdev)
1366 {
1367 struct hclge_vport *vport = hdev->vport;
1368 u16 i, num_vport;
1369
1370 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1371 for (i = 0; i < num_vport; i++) {
1372 int ret;
1373
1374 ret = hclge_map_tqp_to_vport(hdev, vport);
1375 if (ret)
1376 return ret;
1377
1378 vport++;
1379 }
1380
1381 return 0;
1382 }
1383
1384 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1385 {
1386 /* this would be initialized later */
1387 }
1388
1389 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1390 {
1391 struct hnae3_handle *nic = &vport->nic;
1392 struct hclge_dev *hdev = vport->back;
1393 int ret;
1394
1395 nic->pdev = hdev->pdev;
1396 nic->ae_algo = &ae_algo;
1397 nic->numa_node_mask = hdev->numa_node_mask;
1398
1399 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1400 ret = hclge_knic_setup(vport, num_tqps);
1401 if (ret) {
1402 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1403 ret);
1404 return ret;
1405 }
1406 } else {
1407 hclge_unic_setup(vport, num_tqps);
1408 }
1409
1410 return 0;
1411 }
1412
1413 static int hclge_alloc_vport(struct hclge_dev *hdev)
1414 {
1415 struct pci_dev *pdev = hdev->pdev;
1416 struct hclge_vport *vport;
1417 u32 tqp_main_vport;
1418 u32 tqp_per_vport;
1419 int num_vport, i;
1420 int ret;
1421
1422 /* We need to alloc a vport for main NIC of PF */
1423 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1424
1425 if (hdev->num_tqps < num_vport) {
1426 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1427 hdev->num_tqps, num_vport);
1428 return -EINVAL;
1429 }
1430
1431 /* Alloc the same number of TQPs for every vport */
1432 tqp_per_vport = hdev->num_tqps / num_vport;
1433 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1434
1435 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1436 GFP_KERNEL);
1437 if (!vport)
1438 return -ENOMEM;
1439
1440 hdev->vport = vport;
1441 hdev->num_alloc_vport = num_vport;
1442
1443 if (IS_ENABLED(CONFIG_PCI_IOV))
1444 hdev->num_alloc_vfs = hdev->num_req_vfs;
1445
1446 for (i = 0; i < num_vport; i++) {
1447 vport->back = hdev;
1448 vport->vport_id = i;
1449
1450 if (i == 0)
1451 ret = hclge_vport_setup(vport, tqp_main_vport);
1452 else
1453 ret = hclge_vport_setup(vport, tqp_per_vport);
1454 if (ret) {
1455 dev_err(&pdev->dev,
1456 "vport setup failed for vport %d, %d\n",
1457 i, ret);
1458 return ret;
1459 }
1460
1461 vport++;
1462 }
1463
1464 return 0;
1465 }
1466
1467 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1468 struct hclge_pkt_buf_alloc *buf_alloc)
1469 {
1470 /* TX buffer size is unit by 128 byte */
1471 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1472 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1473 struct hclge_tx_buff_alloc_cmd *req;
1474 struct hclge_desc desc;
1475 int ret;
1476 u8 i;
1477
1478 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1479
1480 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1481 for (i = 0; i < HCLGE_TC_NUM; i++) {
1482 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1483
1484 req->tx_pkt_buff[i] =
1485 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1486 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1487 }
1488
1489 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1490 if (ret) {
1491 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1492 ret);
1493 return ret;
1494 }
1495
1496 return 0;
1497 }
1498
1499 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1500 struct hclge_pkt_buf_alloc *buf_alloc)
1501 {
1502 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1503
1504 if (ret) {
1505 dev_err(&hdev->pdev->dev,
1506 "tx buffer alloc failed %d\n", ret);
1507 return ret;
1508 }
1509
1510 return 0;
1511 }
1512
1513 static int hclge_get_tc_num(struct hclge_dev *hdev)
1514 {
1515 int i, cnt = 0;
1516
1517 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1518 if (hdev->hw_tc_map & BIT(i))
1519 cnt++;
1520 return cnt;
1521 }
1522
1523 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1524 {
1525 int i, cnt = 0;
1526
1527 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1528 if (hdev->hw_tc_map & BIT(i) &&
1529 hdev->tm_info.hw_pfc_map & BIT(i))
1530 cnt++;
1531 return cnt;
1532 }
1533
1534 /* Get the number of pfc enabled TCs, which have private buffer */
1535 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1536 struct hclge_pkt_buf_alloc *buf_alloc)
1537 {
1538 struct hclge_priv_buf *priv;
1539 int i, cnt = 0;
1540
1541 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1542 priv = &buf_alloc->priv_buf[i];
1543 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1544 priv->enable)
1545 cnt++;
1546 }
1547
1548 return cnt;
1549 }
1550
1551 /* Get the number of pfc disabled TCs, which have private buffer */
1552 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1553 struct hclge_pkt_buf_alloc *buf_alloc)
1554 {
1555 struct hclge_priv_buf *priv;
1556 int i, cnt = 0;
1557
1558 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1559 priv = &buf_alloc->priv_buf[i];
1560 if (hdev->hw_tc_map & BIT(i) &&
1561 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1562 priv->enable)
1563 cnt++;
1564 }
1565
1566 return cnt;
1567 }
1568
1569 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1570 {
1571 struct hclge_priv_buf *priv;
1572 u32 rx_priv = 0;
1573 int i;
1574
1575 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1576 priv = &buf_alloc->priv_buf[i];
1577 if (priv->enable)
1578 rx_priv += priv->buf_size;
1579 }
1580 return rx_priv;
1581 }
1582
1583 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1584 {
1585 u32 i, total_tx_size = 0;
1586
1587 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1588 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1589
1590 return total_tx_size;
1591 }
1592
1593 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1594 struct hclge_pkt_buf_alloc *buf_alloc,
1595 u32 rx_all)
1596 {
1597 u32 shared_buf_min, shared_buf_tc, shared_std;
1598 int tc_num, pfc_enable_num;
1599 u32 shared_buf;
1600 u32 rx_priv;
1601 int i;
1602
1603 tc_num = hclge_get_tc_num(hdev);
1604 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1605
1606 if (hnae3_dev_dcb_supported(hdev))
1607 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1608 else
1609 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
1610
1611 shared_buf_tc = pfc_enable_num * hdev->mps +
1612 (tc_num - pfc_enable_num) * hdev->mps / 2 +
1613 hdev->mps;
1614 shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1615
1616 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1617 if (rx_all <= rx_priv + shared_std)
1618 return false;
1619
1620 shared_buf = rx_all - rx_priv;
1621 buf_alloc->s_buf.buf_size = shared_buf;
1622 buf_alloc->s_buf.self.high = shared_buf;
1623 buf_alloc->s_buf.self.low = 2 * hdev->mps;
1624
1625 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1626 if ((hdev->hw_tc_map & BIT(i)) &&
1627 (hdev->tm_info.hw_pfc_map & BIT(i))) {
1628 buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
1629 buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
1630 } else {
1631 buf_alloc->s_buf.tc_thrd[i].low = 0;
1632 buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
1633 }
1634 }
1635
1636 return true;
1637 }
1638
1639 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1640 struct hclge_pkt_buf_alloc *buf_alloc)
1641 {
1642 u32 i, total_size;
1643
1644 total_size = hdev->pkt_buf_size;
1645
1646 /* alloc tx buffer for all enabled tc */
1647 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1648 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1649
1650 if (total_size < HCLGE_DEFAULT_TX_BUF)
1651 return -ENOMEM;
1652
1653 if (hdev->hw_tc_map & BIT(i))
1654 priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1655 else
1656 priv->tx_buf_size = 0;
1657
1658 total_size -= priv->tx_buf_size;
1659 }
1660
1661 return 0;
1662 }
1663
1664 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1665 * @hdev: pointer to struct hclge_dev
1666 * @buf_alloc: pointer to buffer calculation data
1667 * @return: 0: calculate sucessful, negative: fail
1668 */
1669 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1670 struct hclge_pkt_buf_alloc *buf_alloc)
1671 {
1672 u32 rx_all = hdev->pkt_buf_size;
1673 int no_pfc_priv_num, pfc_priv_num;
1674 struct hclge_priv_buf *priv;
1675 int i;
1676
1677 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1678
1679 /* When DCB is not supported, rx private
1680 * buffer is not allocated.
1681 */
1682 if (!hnae3_dev_dcb_supported(hdev)) {
1683 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1684 return -ENOMEM;
1685
1686 return 0;
1687 }
1688
1689 /* step 1, try to alloc private buffer for all enabled tc */
1690 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1691 priv = &buf_alloc->priv_buf[i];
1692 if (hdev->hw_tc_map & BIT(i)) {
1693 priv->enable = 1;
1694 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1695 priv->wl.low = hdev->mps;
1696 priv->wl.high = priv->wl.low + hdev->mps;
1697 priv->buf_size = priv->wl.high +
1698 HCLGE_DEFAULT_DV;
1699 } else {
1700 priv->wl.low = 0;
1701 priv->wl.high = 2 * hdev->mps;
1702 priv->buf_size = priv->wl.high;
1703 }
1704 } else {
1705 priv->enable = 0;
1706 priv->wl.low = 0;
1707 priv->wl.high = 0;
1708 priv->buf_size = 0;
1709 }
1710 }
1711
1712 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1713 return 0;
1714
1715 /* step 2, try to decrease the buffer size of
1716 * no pfc TC's private buffer
1717 */
1718 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1719 priv = &buf_alloc->priv_buf[i];
1720
1721 priv->enable = 0;
1722 priv->wl.low = 0;
1723 priv->wl.high = 0;
1724 priv->buf_size = 0;
1725
1726 if (!(hdev->hw_tc_map & BIT(i)))
1727 continue;
1728
1729 priv->enable = 1;
1730
1731 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1732 priv->wl.low = 128;
1733 priv->wl.high = priv->wl.low + hdev->mps;
1734 priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
1735 } else {
1736 priv->wl.low = 0;
1737 priv->wl.high = hdev->mps;
1738 priv->buf_size = priv->wl.high;
1739 }
1740 }
1741
1742 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1743 return 0;
1744
1745 /* step 3, try to reduce the number of pfc disabled TCs,
1746 * which have private buffer
1747 */
1748 /* get the total no pfc enable TC number, which have private buffer */
1749 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1750
1751 /* let the last to be cleared first */
1752 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1753 priv = &buf_alloc->priv_buf[i];
1754
1755 if (hdev->hw_tc_map & BIT(i) &&
1756 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1757 /* Clear the no pfc TC private buffer */
1758 priv->wl.low = 0;
1759 priv->wl.high = 0;
1760 priv->buf_size = 0;
1761 priv->enable = 0;
1762 no_pfc_priv_num--;
1763 }
1764
1765 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1766 no_pfc_priv_num == 0)
1767 break;
1768 }
1769
1770 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1771 return 0;
1772
1773 /* step 4, try to reduce the number of pfc enabled TCs
1774 * which have private buffer.
1775 */
1776 pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1777
1778 /* let the last to be cleared first */
1779 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1780 priv = &buf_alloc->priv_buf[i];
1781
1782 if (hdev->hw_tc_map & BIT(i) &&
1783 hdev->tm_info.hw_pfc_map & BIT(i)) {
1784 /* Reduce the number of pfc TC with private buffer */
1785 priv->wl.low = 0;
1786 priv->enable = 0;
1787 priv->wl.high = 0;
1788 priv->buf_size = 0;
1789 pfc_priv_num--;
1790 }
1791
1792 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1793 pfc_priv_num == 0)
1794 break;
1795 }
1796 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1797 return 0;
1798
1799 return -ENOMEM;
1800 }
1801
1802 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1803 struct hclge_pkt_buf_alloc *buf_alloc)
1804 {
1805 struct hclge_rx_priv_buff_cmd *req;
1806 struct hclge_desc desc;
1807 int ret;
1808 int i;
1809
1810 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1811 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1812
1813 /* Alloc private buffer TCs */
1814 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1815 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1816
1817 req->buf_num[i] =
1818 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1819 req->buf_num[i] |=
1820 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1821 }
1822
1823 req->shared_buf =
1824 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1825 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1826
1827 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1828 if (ret) {
1829 dev_err(&hdev->pdev->dev,
1830 "rx private buffer alloc cmd failed %d\n", ret);
1831 return ret;
1832 }
1833
1834 return 0;
1835 }
1836
1837 #define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
1838
1839 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1840 struct hclge_pkt_buf_alloc *buf_alloc)
1841 {
1842 struct hclge_rx_priv_wl_buf *req;
1843 struct hclge_priv_buf *priv;
1844 struct hclge_desc desc[2];
1845 int i, j;
1846 int ret;
1847
1848 for (i = 0; i < 2; i++) {
1849 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1850 false);
1851 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1852
1853 /* The first descriptor set the NEXT bit to 1 */
1854 if (i == 0)
1855 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1856 else
1857 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1858
1859 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1860 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1861
1862 priv = &buf_alloc->priv_buf[idx];
1863 req->tc_wl[j].high =
1864 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1865 req->tc_wl[j].high |=
1866 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) <<
1867 HCLGE_RX_PRIV_EN_B);
1868 req->tc_wl[j].low =
1869 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1870 req->tc_wl[j].low |=
1871 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) <<
1872 HCLGE_RX_PRIV_EN_B);
1873 }
1874 }
1875
1876 /* Send 2 descriptor at one time */
1877 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1878 if (ret) {
1879 dev_err(&hdev->pdev->dev,
1880 "rx private waterline config cmd failed %d\n",
1881 ret);
1882 return ret;
1883 }
1884 return 0;
1885 }
1886
1887 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1888 struct hclge_pkt_buf_alloc *buf_alloc)
1889 {
1890 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1891 struct hclge_rx_com_thrd *req;
1892 struct hclge_desc desc[2];
1893 struct hclge_tc_thrd *tc;
1894 int i, j;
1895 int ret;
1896
1897 for (i = 0; i < 2; i++) {
1898 hclge_cmd_setup_basic_desc(&desc[i],
1899 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1900 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1901
1902 /* The first descriptor set the NEXT bit to 1 */
1903 if (i == 0)
1904 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1905 else
1906 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1907
1908 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1909 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1910
1911 req->com_thrd[j].high =
1912 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1913 req->com_thrd[j].high |=
1914 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) <<
1915 HCLGE_RX_PRIV_EN_B);
1916 req->com_thrd[j].low =
1917 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1918 req->com_thrd[j].low |=
1919 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) <<
1920 HCLGE_RX_PRIV_EN_B);
1921 }
1922 }
1923
1924 /* Send 2 descriptors at one time */
1925 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1926 if (ret) {
1927 dev_err(&hdev->pdev->dev,
1928 "common threshold config cmd failed %d\n", ret);
1929 return ret;
1930 }
1931 return 0;
1932 }
1933
1934 static int hclge_common_wl_config(struct hclge_dev *hdev,
1935 struct hclge_pkt_buf_alloc *buf_alloc)
1936 {
1937 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1938 struct hclge_rx_com_wl *req;
1939 struct hclge_desc desc;
1940 int ret;
1941
1942 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1943
1944 req = (struct hclge_rx_com_wl *)desc.data;
1945 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1946 req->com_wl.high |=
1947 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) <<
1948 HCLGE_RX_PRIV_EN_B);
1949
1950 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1951 req->com_wl.low |=
1952 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) <<
1953 HCLGE_RX_PRIV_EN_B);
1954
1955 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1956 if (ret) {
1957 dev_err(&hdev->pdev->dev,
1958 "common waterline config cmd failed %d\n", ret);
1959 return ret;
1960 }
1961
1962 return 0;
1963 }
1964
1965 int hclge_buffer_alloc(struct hclge_dev *hdev)
1966 {
1967 struct hclge_pkt_buf_alloc *pkt_buf;
1968 int ret;
1969
1970 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1971 if (!pkt_buf)
1972 return -ENOMEM;
1973
1974 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1975 if (ret) {
1976 dev_err(&hdev->pdev->dev,
1977 "could not calc tx buffer size for all TCs %d\n", ret);
1978 goto out;
1979 }
1980
1981 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1982 if (ret) {
1983 dev_err(&hdev->pdev->dev,
1984 "could not alloc tx buffers %d\n", ret);
1985 goto out;
1986 }
1987
1988 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1989 if (ret) {
1990 dev_err(&hdev->pdev->dev,
1991 "could not calc rx priv buffer size for all TCs %d\n",
1992 ret);
1993 goto out;
1994 }
1995
1996 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1997 if (ret) {
1998 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1999 ret);
2000 goto out;
2001 }
2002
2003 if (hnae3_dev_dcb_supported(hdev)) {
2004 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2005 if (ret) {
2006 dev_err(&hdev->pdev->dev,
2007 "could not configure rx private waterline %d\n",
2008 ret);
2009 goto out;
2010 }
2011
2012 ret = hclge_common_thrd_config(hdev, pkt_buf);
2013 if (ret) {
2014 dev_err(&hdev->pdev->dev,
2015 "could not configure common threshold %d\n",
2016 ret);
2017 goto out;
2018 }
2019 }
2020
2021 ret = hclge_common_wl_config(hdev, pkt_buf);
2022 if (ret)
2023 dev_err(&hdev->pdev->dev,
2024 "could not configure common waterline %d\n", ret);
2025
2026 out:
2027 kfree(pkt_buf);
2028 return ret;
2029 }
2030
2031 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2032 {
2033 struct hnae3_handle *roce = &vport->roce;
2034 struct hnae3_handle *nic = &vport->nic;
2035
2036 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2037
2038 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2039 vport->back->num_msi_left == 0)
2040 return -EINVAL;
2041
2042 roce->rinfo.base_vector = vport->back->roce_base_vector;
2043
2044 roce->rinfo.netdev = nic->kinfo.netdev;
2045 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2046
2047 roce->pdev = nic->pdev;
2048 roce->ae_algo = nic->ae_algo;
2049 roce->numa_node_mask = nic->numa_node_mask;
2050
2051 return 0;
2052 }
2053
2054 static int hclge_init_msi(struct hclge_dev *hdev)
2055 {
2056 struct pci_dev *pdev = hdev->pdev;
2057 int vectors;
2058 int i;
2059
2060 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2061 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2062 if (vectors < 0) {
2063 dev_err(&pdev->dev,
2064 "failed(%d) to allocate MSI/MSI-X vectors\n",
2065 vectors);
2066 return vectors;
2067 }
2068 if (vectors < hdev->num_msi)
2069 dev_warn(&hdev->pdev->dev,
2070 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2071 hdev->num_msi, vectors);
2072
2073 hdev->num_msi = vectors;
2074 hdev->num_msi_left = vectors;
2075 hdev->base_msi_vector = pdev->irq;
2076 hdev->roce_base_vector = hdev->base_msi_vector +
2077 HCLGE_ROCE_VECTOR_OFFSET;
2078
2079 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2080 sizeof(u16), GFP_KERNEL);
2081 if (!hdev->vector_status) {
2082 pci_free_irq_vectors(pdev);
2083 return -ENOMEM;
2084 }
2085
2086 for (i = 0; i < hdev->num_msi; i++)
2087 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2088
2089 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2090 sizeof(int), GFP_KERNEL);
2091 if (!hdev->vector_irq) {
2092 pci_free_irq_vectors(pdev);
2093 return -ENOMEM;
2094 }
2095
2096 return 0;
2097 }
2098
2099 static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed)
2100 {
2101 struct hclge_mac *mac = &hdev->hw.mac;
2102
2103 if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M))
2104 mac->duplex = (u8)duplex;
2105 else
2106 mac->duplex = HCLGE_MAC_FULL;
2107
2108 mac->speed = speed;
2109 }
2110
2111 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2112 {
2113 struct hclge_config_mac_speed_dup_cmd *req;
2114 struct hclge_desc desc;
2115 int ret;
2116
2117 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2118
2119 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2120
2121 hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2122
2123 switch (speed) {
2124 case HCLGE_MAC_SPEED_10M:
2125 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2126 HCLGE_CFG_SPEED_S, 6);
2127 break;
2128 case HCLGE_MAC_SPEED_100M:
2129 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2130 HCLGE_CFG_SPEED_S, 7);
2131 break;
2132 case HCLGE_MAC_SPEED_1G:
2133 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2134 HCLGE_CFG_SPEED_S, 0);
2135 break;
2136 case HCLGE_MAC_SPEED_10G:
2137 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2138 HCLGE_CFG_SPEED_S, 1);
2139 break;
2140 case HCLGE_MAC_SPEED_25G:
2141 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2142 HCLGE_CFG_SPEED_S, 2);
2143 break;
2144 case HCLGE_MAC_SPEED_40G:
2145 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2146 HCLGE_CFG_SPEED_S, 3);
2147 break;
2148 case HCLGE_MAC_SPEED_50G:
2149 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2150 HCLGE_CFG_SPEED_S, 4);
2151 break;
2152 case HCLGE_MAC_SPEED_100G:
2153 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2154 HCLGE_CFG_SPEED_S, 5);
2155 break;
2156 default:
2157 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2158 return -EINVAL;
2159 }
2160
2161 hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2162 1);
2163
2164 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2165 if (ret) {
2166 dev_err(&hdev->pdev->dev,
2167 "mac speed/duplex config cmd failed %d.\n", ret);
2168 return ret;
2169 }
2170
2171 hclge_check_speed_dup(hdev, duplex, speed);
2172
2173 return 0;
2174 }
2175
2176 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2177 u8 duplex)
2178 {
2179 struct hclge_vport *vport = hclge_get_vport(handle);
2180 struct hclge_dev *hdev = vport->back;
2181
2182 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2183 }
2184
2185 static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
2186 u8 *duplex)
2187 {
2188 struct hclge_query_an_speed_dup_cmd *req;
2189 struct hclge_desc desc;
2190 int speed_tmp;
2191 int ret;
2192
2193 req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
2194
2195 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
2196 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2197 if (ret) {
2198 dev_err(&hdev->pdev->dev,
2199 "mac speed/autoneg/duplex query cmd failed %d\n",
2200 ret);
2201 return ret;
2202 }
2203
2204 *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
2205 speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
2206 HCLGE_QUERY_SPEED_S);
2207
2208 ret = hclge_parse_speed(speed_tmp, speed);
2209 if (ret) {
2210 dev_err(&hdev->pdev->dev,
2211 "could not parse speed(=%d), %d\n", speed_tmp, ret);
2212 return -EIO;
2213 }
2214
2215 return 0;
2216 }
2217
2218 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2219 {
2220 struct hclge_config_auto_neg_cmd *req;
2221 struct hclge_desc desc;
2222 u32 flag = 0;
2223 int ret;
2224
2225 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2226
2227 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2228 hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2229 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2230
2231 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2232 if (ret) {
2233 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2234 ret);
2235 return ret;
2236 }
2237
2238 return 0;
2239 }
2240
2241 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2242 {
2243 struct hclge_vport *vport = hclge_get_vport(handle);
2244 struct hclge_dev *hdev = vport->back;
2245
2246 return hclge_set_autoneg_en(hdev, enable);
2247 }
2248
2249 static int hclge_get_autoneg(struct hnae3_handle *handle)
2250 {
2251 struct hclge_vport *vport = hclge_get_vport(handle);
2252 struct hclge_dev *hdev = vport->back;
2253 struct phy_device *phydev = hdev->hw.mac.phydev;
2254
2255 if (phydev)
2256 return phydev->autoneg;
2257
2258 return hdev->hw.mac.autoneg;
2259 }
2260
2261 static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
2262 bool mask_vlan,
2263 u8 *mac_mask)
2264 {
2265 struct hclge_mac_vlan_mask_entry_cmd *req;
2266 struct hclge_desc desc;
2267 int status;
2268
2269 req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
2270 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
2271
2272 hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
2273 mask_vlan ? 1 : 0);
2274 ether_addr_copy(req->mac_mask, mac_mask);
2275
2276 status = hclge_cmd_send(&hdev->hw, &desc, 1);
2277 if (status)
2278 dev_err(&hdev->pdev->dev,
2279 "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
2280 status);
2281
2282 return status;
2283 }
2284
2285 static int hclge_mac_init(struct hclge_dev *hdev)
2286 {
2287 struct hnae3_handle *handle = &hdev->vport[0].nic;
2288 struct net_device *netdev = handle->kinfo.netdev;
2289 struct hclge_mac *mac = &hdev->hw.mac;
2290 u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
2291 int mtu;
2292 int ret;
2293
2294 ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
2295 if (ret) {
2296 dev_err(&hdev->pdev->dev,
2297 "Config mac speed dup fail ret=%d\n", ret);
2298 return ret;
2299 }
2300
2301 mac->link = 0;
2302
2303 /* Initialize the MTA table work mode */
2304 hdev->accept_mta_mc = true;
2305 hdev->enable_mta = true;
2306 hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36;
2307
2308 ret = hclge_set_mta_filter_mode(hdev,
2309 hdev->mta_mac_sel_type,
2310 hdev->enable_mta);
2311 if (ret) {
2312 dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n",
2313 ret);
2314 return ret;
2315 }
2316
2317 ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
2318 if (ret) {
2319 dev_err(&hdev->pdev->dev,
2320 "set mta filter mode fail ret=%d\n", ret);
2321 return ret;
2322 }
2323
2324 ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
2325 if (ret) {
2326 dev_err(&hdev->pdev->dev,
2327 "set default mac_vlan_mask fail ret=%d\n", ret);
2328 return ret;
2329 }
2330
2331 if (netdev)
2332 mtu = netdev->mtu;
2333 else
2334 mtu = ETH_DATA_LEN;
2335
2336 ret = hclge_set_mtu(handle, mtu);
2337 if (ret) {
2338 dev_err(&hdev->pdev->dev,
2339 "set mtu failed ret=%d\n", ret);
2340 return ret;
2341 }
2342
2343 return 0;
2344 }
2345
2346 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2347 {
2348 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2349 schedule_work(&hdev->mbx_service_task);
2350 }
2351
2352 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2353 {
2354 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2355 schedule_work(&hdev->rst_service_task);
2356 }
2357
2358 static void hclge_task_schedule(struct hclge_dev *hdev)
2359 {
2360 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2361 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2362 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2363 (void)schedule_work(&hdev->service_task);
2364 }
2365
2366 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2367 {
2368 struct hclge_link_status_cmd *req;
2369 struct hclge_desc desc;
2370 int link_status;
2371 int ret;
2372
2373 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2374 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2375 if (ret) {
2376 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2377 ret);
2378 return ret;
2379 }
2380
2381 req = (struct hclge_link_status_cmd *)desc.data;
2382 link_status = req->status & HCLGE_LINK_STATUS;
2383
2384 return !!link_status;
2385 }
2386
2387 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2388 {
2389 int mac_state;
2390 int link_stat;
2391
2392 mac_state = hclge_get_mac_link_status(hdev);
2393
2394 if (hdev->hw.mac.phydev) {
2395 if (!genphy_read_status(hdev->hw.mac.phydev))
2396 link_stat = mac_state &
2397 hdev->hw.mac.phydev->link;
2398 else
2399 link_stat = 0;
2400
2401 } else {
2402 link_stat = mac_state;
2403 }
2404
2405 return !!link_stat;
2406 }
2407
2408 static void hclge_update_link_status(struct hclge_dev *hdev)
2409 {
2410 struct hnae3_client *client = hdev->nic_client;
2411 struct hnae3_handle *handle;
2412 int state;
2413 int i;
2414
2415 if (!client)
2416 return;
2417 state = hclge_get_mac_phy_link(hdev);
2418 if (state != hdev->hw.mac.link) {
2419 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2420 handle = &hdev->vport[i].nic;
2421 client->ops->link_status_change(handle, state);
2422 }
2423 hdev->hw.mac.link = state;
2424 }
2425 }
2426
2427 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2428 {
2429 struct hclge_mac mac = hdev->hw.mac;
2430 u8 duplex;
2431 int speed;
2432 int ret;
2433
2434 /* get the speed and duplex as autoneg'result from mac cmd when phy
2435 * doesn't exit.
2436 */
2437 if (mac.phydev || !mac.autoneg)
2438 return 0;
2439
2440 ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex);
2441 if (ret) {
2442 dev_err(&hdev->pdev->dev,
2443 "mac autoneg/speed/duplex query failed %d\n", ret);
2444 return ret;
2445 }
2446
2447 if ((mac.speed != speed) || (mac.duplex != duplex)) {
2448 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2449 if (ret) {
2450 dev_err(&hdev->pdev->dev,
2451 "mac speed/duplex config failed %d\n", ret);
2452 return ret;
2453 }
2454 }
2455
2456 return 0;
2457 }
2458
2459 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2460 {
2461 struct hclge_vport *vport = hclge_get_vport(handle);
2462 struct hclge_dev *hdev = vport->back;
2463
2464 return hclge_update_speed_duplex(hdev);
2465 }
2466
2467 static int hclge_get_status(struct hnae3_handle *handle)
2468 {
2469 struct hclge_vport *vport = hclge_get_vport(handle);
2470 struct hclge_dev *hdev = vport->back;
2471
2472 hclge_update_link_status(hdev);
2473
2474 return hdev->hw.mac.link;
2475 }
2476
2477 static void hclge_service_timer(struct timer_list *t)
2478 {
2479 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2480
2481 mod_timer(&hdev->service_timer, jiffies + HZ);
2482 hdev->hw_stats.stats_timer++;
2483 hclge_task_schedule(hdev);
2484 }
2485
2486 static void hclge_service_complete(struct hclge_dev *hdev)
2487 {
2488 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2489
2490 /* Flush memory before next watchdog */
2491 smp_mb__before_atomic();
2492 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2493 }
2494
2495 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2496 {
2497 u32 rst_src_reg;
2498 u32 cmdq_src_reg;
2499
2500 /* fetch the events from their corresponding regs */
2501 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
2502 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2503
2504 /* Assumption: If by any chance reset and mailbox events are reported
2505 * together then we will only process reset event in this go and will
2506 * defer the processing of the mailbox events. Since, we would have not
2507 * cleared RX CMDQ event this time we would receive again another
2508 * interrupt from H/W just for the mailbox.
2509 */
2510
2511 /* check for vector0 reset event sources */
2512 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2513 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2514 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2515 return HCLGE_VECTOR0_EVENT_RST;
2516 }
2517
2518 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2519 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2520 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2521 return HCLGE_VECTOR0_EVENT_RST;
2522 }
2523
2524 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2525 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2526 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2527 return HCLGE_VECTOR0_EVENT_RST;
2528 }
2529
2530 /* check for vector0 mailbox(=CMDQ RX) event source */
2531 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2532 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2533 *clearval = cmdq_src_reg;
2534 return HCLGE_VECTOR0_EVENT_MBX;
2535 }
2536
2537 return HCLGE_VECTOR0_EVENT_OTHER;
2538 }
2539
2540 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2541 u32 regclr)
2542 {
2543 switch (event_type) {
2544 case HCLGE_VECTOR0_EVENT_RST:
2545 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2546 break;
2547 case HCLGE_VECTOR0_EVENT_MBX:
2548 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2549 break;
2550 }
2551 }
2552
2553 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2554 {
2555 writel(enable ? 1 : 0, vector->addr);
2556 }
2557
2558 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2559 {
2560 struct hclge_dev *hdev = data;
2561 u32 event_cause;
2562 u32 clearval;
2563
2564 hclge_enable_vector(&hdev->misc_vector, false);
2565 event_cause = hclge_check_event_cause(hdev, &clearval);
2566
2567 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2568 switch (event_cause) {
2569 case HCLGE_VECTOR0_EVENT_RST:
2570 hclge_reset_task_schedule(hdev);
2571 break;
2572 case HCLGE_VECTOR0_EVENT_MBX:
2573 /* If we are here then,
2574 * 1. Either we are not handling any mbx task and we are not
2575 * scheduled as well
2576 * OR
2577 * 2. We could be handling a mbx task but nothing more is
2578 * scheduled.
2579 * In both cases, we should schedule mbx task as there are more
2580 * mbx messages reported by this interrupt.
2581 */
2582 hclge_mbx_task_schedule(hdev);
2583
2584 default:
2585 dev_dbg(&hdev->pdev->dev,
2586 "received unknown or unhandled event of vector0\n");
2587 break;
2588 }
2589
2590 /* we should clear the source of interrupt */
2591 hclge_clear_event_cause(hdev, event_cause, clearval);
2592 hclge_enable_vector(&hdev->misc_vector, true);
2593
2594 return IRQ_HANDLED;
2595 }
2596
2597 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2598 {
2599 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2600 hdev->num_msi_left += 1;
2601 hdev->num_msi_used -= 1;
2602 }
2603
2604 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2605 {
2606 struct hclge_misc_vector *vector = &hdev->misc_vector;
2607
2608 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2609
2610 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2611 hdev->vector_status[0] = 0;
2612
2613 hdev->num_msi_left -= 1;
2614 hdev->num_msi_used += 1;
2615 }
2616
2617 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2618 {
2619 int ret;
2620
2621 hclge_get_misc_vector(hdev);
2622
2623 /* this would be explicitly freed in the end */
2624 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2625 0, "hclge_misc", hdev);
2626 if (ret) {
2627 hclge_free_vector(hdev, 0);
2628 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2629 hdev->misc_vector.vector_irq);
2630 }
2631
2632 return ret;
2633 }
2634
2635 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2636 {
2637 free_irq(hdev->misc_vector.vector_irq, hdev);
2638 hclge_free_vector(hdev, 0);
2639 }
2640
2641 static int hclge_notify_client(struct hclge_dev *hdev,
2642 enum hnae3_reset_notify_type type)
2643 {
2644 struct hnae3_client *client = hdev->nic_client;
2645 u16 i;
2646
2647 if (!client->ops->reset_notify)
2648 return -EOPNOTSUPP;
2649
2650 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2651 struct hnae3_handle *handle = &hdev->vport[i].nic;
2652 int ret;
2653
2654 ret = client->ops->reset_notify(handle, type);
2655 if (ret)
2656 return ret;
2657 }
2658
2659 return 0;
2660 }
2661
2662 static int hclge_reset_wait(struct hclge_dev *hdev)
2663 {
2664 #define HCLGE_RESET_WATI_MS 100
2665 #define HCLGE_RESET_WAIT_CNT 5
2666 u32 val, reg, reg_bit;
2667 u32 cnt = 0;
2668
2669 switch (hdev->reset_type) {
2670 case HNAE3_GLOBAL_RESET:
2671 reg = HCLGE_GLOBAL_RESET_REG;
2672 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2673 break;
2674 case HNAE3_CORE_RESET:
2675 reg = HCLGE_GLOBAL_RESET_REG;
2676 reg_bit = HCLGE_CORE_RESET_BIT;
2677 break;
2678 case HNAE3_FUNC_RESET:
2679 reg = HCLGE_FUN_RST_ING;
2680 reg_bit = HCLGE_FUN_RST_ING_B;
2681 break;
2682 default:
2683 dev_err(&hdev->pdev->dev,
2684 "Wait for unsupported reset type: %d\n",
2685 hdev->reset_type);
2686 return -EINVAL;
2687 }
2688
2689 val = hclge_read_dev(&hdev->hw, reg);
2690 while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2691 msleep(HCLGE_RESET_WATI_MS);
2692 val = hclge_read_dev(&hdev->hw, reg);
2693 cnt++;
2694 }
2695
2696 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2697 dev_warn(&hdev->pdev->dev,
2698 "Wait for reset timeout: %d\n", hdev->reset_type);
2699 return -EBUSY;
2700 }
2701
2702 return 0;
2703 }
2704
2705 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2706 {
2707 struct hclge_desc desc;
2708 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2709 int ret;
2710
2711 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2712 hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0);
2713 hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2714 req->fun_reset_vfid = func_id;
2715
2716 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2717 if (ret)
2718 dev_err(&hdev->pdev->dev,
2719 "send function reset cmd fail, status =%d\n", ret);
2720
2721 return ret;
2722 }
2723
2724 static void hclge_do_reset(struct hclge_dev *hdev)
2725 {
2726 struct pci_dev *pdev = hdev->pdev;
2727 u32 val;
2728
2729 switch (hdev->reset_type) {
2730 case HNAE3_GLOBAL_RESET:
2731 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2732 hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2733 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2734 dev_info(&pdev->dev, "Global Reset requested\n");
2735 break;
2736 case HNAE3_CORE_RESET:
2737 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2738 hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2739 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2740 dev_info(&pdev->dev, "Core Reset requested\n");
2741 break;
2742 case HNAE3_FUNC_RESET:
2743 dev_info(&pdev->dev, "PF Reset requested\n");
2744 hclge_func_reset_cmd(hdev, 0);
2745 /* schedule again to check later */
2746 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2747 hclge_reset_task_schedule(hdev);
2748 break;
2749 default:
2750 dev_warn(&pdev->dev,
2751 "Unsupported reset type: %d\n", hdev->reset_type);
2752 break;
2753 }
2754 }
2755
2756 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2757 unsigned long *addr)
2758 {
2759 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2760
2761 /* return the highest priority reset level amongst all */
2762 if (test_bit(HNAE3_GLOBAL_RESET, addr))
2763 rst_level = HNAE3_GLOBAL_RESET;
2764 else if (test_bit(HNAE3_CORE_RESET, addr))
2765 rst_level = HNAE3_CORE_RESET;
2766 else if (test_bit(HNAE3_IMP_RESET, addr))
2767 rst_level = HNAE3_IMP_RESET;
2768 else if (test_bit(HNAE3_FUNC_RESET, addr))
2769 rst_level = HNAE3_FUNC_RESET;
2770
2771 /* now, clear all other resets */
2772 clear_bit(HNAE3_GLOBAL_RESET, addr);
2773 clear_bit(HNAE3_CORE_RESET, addr);
2774 clear_bit(HNAE3_IMP_RESET, addr);
2775 clear_bit(HNAE3_FUNC_RESET, addr);
2776
2777 return rst_level;
2778 }
2779
2780 static void hclge_reset(struct hclge_dev *hdev)
2781 {
2782 /* perform reset of the stack & ae device for a client */
2783
2784 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2785
2786 if (!hclge_reset_wait(hdev)) {
2787 rtnl_lock();
2788 hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2789 hclge_reset_ae_dev(hdev->ae_dev);
2790 hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2791 rtnl_unlock();
2792 } else {
2793 /* schedule again to check pending resets later */
2794 set_bit(hdev->reset_type, &hdev->reset_pending);
2795 hclge_reset_task_schedule(hdev);
2796 }
2797
2798 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2799 }
2800
2801 static void hclge_reset_event(struct hnae3_handle *handle)
2802 {
2803 struct hclge_vport *vport = hclge_get_vport(handle);
2804 struct hclge_dev *hdev = vport->back;
2805
2806 /* check if this is a new reset request and we are not here just because
2807 * last reset attempt did not succeed and watchdog hit us again. We will
2808 * know this if last reset request did not occur very recently (watchdog
2809 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
2810 * In case of new request we reset the "reset level" to PF reset.
2811 */
2812 if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
2813 handle->reset_level = HNAE3_FUNC_RESET;
2814
2815 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
2816 handle->reset_level);
2817
2818 /* request reset & schedule reset task */
2819 set_bit(handle->reset_level, &hdev->reset_request);
2820 hclge_reset_task_schedule(hdev);
2821
2822 if (handle->reset_level < HNAE3_GLOBAL_RESET)
2823 handle->reset_level++;
2824
2825 handle->last_reset_time = jiffies;
2826 }
2827
2828 static void hclge_reset_subtask(struct hclge_dev *hdev)
2829 {
2830 /* check if there is any ongoing reset in the hardware. This status can
2831 * be checked from reset_pending. If there is then, we need to wait for
2832 * hardware to complete reset.
2833 * a. If we are able to figure out in reasonable time that hardware
2834 * has fully resetted then, we can proceed with driver, client
2835 * reset.
2836 * b. else, we can come back later to check this status so re-sched
2837 * now.
2838 */
2839 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
2840 if (hdev->reset_type != HNAE3_NONE_RESET)
2841 hclge_reset(hdev);
2842
2843 /* check if we got any *new* reset requests to be honored */
2844 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
2845 if (hdev->reset_type != HNAE3_NONE_RESET)
2846 hclge_do_reset(hdev);
2847
2848 hdev->reset_type = HNAE3_NONE_RESET;
2849 }
2850
2851 static void hclge_reset_service_task(struct work_struct *work)
2852 {
2853 struct hclge_dev *hdev =
2854 container_of(work, struct hclge_dev, rst_service_task);
2855
2856 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
2857 return;
2858
2859 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
2860
2861 hclge_reset_subtask(hdev);
2862
2863 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
2864 }
2865
2866 static void hclge_mailbox_service_task(struct work_struct *work)
2867 {
2868 struct hclge_dev *hdev =
2869 container_of(work, struct hclge_dev, mbx_service_task);
2870
2871 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
2872 return;
2873
2874 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
2875
2876 hclge_mbx_handler(hdev);
2877
2878 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
2879 }
2880
2881 static void hclge_service_task(struct work_struct *work)
2882 {
2883 struct hclge_dev *hdev =
2884 container_of(work, struct hclge_dev, service_task);
2885
2886 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
2887 hclge_update_stats_for_all(hdev);
2888 hdev->hw_stats.stats_timer = 0;
2889 }
2890
2891 hclge_update_speed_duplex(hdev);
2892 hclge_update_link_status(hdev);
2893 hclge_service_complete(hdev);
2894 }
2895
2896 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
2897 {
2898 /* VF handle has no client */
2899 if (!handle->client)
2900 return container_of(handle, struct hclge_vport, nic);
2901 else if (handle->client->type == HNAE3_CLIENT_ROCE)
2902 return container_of(handle, struct hclge_vport, roce);
2903 else
2904 return container_of(handle, struct hclge_vport, nic);
2905 }
2906
2907 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
2908 struct hnae3_vector_info *vector_info)
2909 {
2910 struct hclge_vport *vport = hclge_get_vport(handle);
2911 struct hnae3_vector_info *vector = vector_info;
2912 struct hclge_dev *hdev = vport->back;
2913 int alloc = 0;
2914 int i, j;
2915
2916 vector_num = min(hdev->num_msi_left, vector_num);
2917
2918 for (j = 0; j < vector_num; j++) {
2919 for (i = 1; i < hdev->num_msi; i++) {
2920 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
2921 vector->vector = pci_irq_vector(hdev->pdev, i);
2922 vector->io_addr = hdev->hw.io_base +
2923 HCLGE_VECTOR_REG_BASE +
2924 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
2925 vport->vport_id *
2926 HCLGE_VECTOR_VF_OFFSET;
2927 hdev->vector_status[i] = vport->vport_id;
2928 hdev->vector_irq[i] = vector->vector;
2929
2930 vector++;
2931 alloc++;
2932
2933 break;
2934 }
2935 }
2936 }
2937 hdev->num_msi_left -= alloc;
2938 hdev->num_msi_used += alloc;
2939
2940 return alloc;
2941 }
2942
2943 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
2944 {
2945 int i;
2946
2947 for (i = 0; i < hdev->num_msi; i++)
2948 if (vector == hdev->vector_irq[i])
2949 return i;
2950
2951 return -EINVAL;
2952 }
2953
2954 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
2955 {
2956 struct hclge_vport *vport = hclge_get_vport(handle);
2957 struct hclge_dev *hdev = vport->back;
2958 int vector_id;
2959
2960 vector_id = hclge_get_vector_index(hdev, vector);
2961 if (vector_id < 0) {
2962 dev_err(&hdev->pdev->dev,
2963 "Get vector index fail. vector_id =%d\n", vector_id);
2964 return vector_id;
2965 }
2966
2967 hclge_free_vector(hdev, vector_id);
2968
2969 return 0;
2970 }
2971
2972 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
2973 {
2974 return HCLGE_RSS_KEY_SIZE;
2975 }
2976
2977 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
2978 {
2979 return HCLGE_RSS_IND_TBL_SIZE;
2980 }
2981
2982 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
2983 const u8 hfunc, const u8 *key)
2984 {
2985 struct hclge_rss_config_cmd *req;
2986 struct hclge_desc desc;
2987 int key_offset;
2988 int key_size;
2989 int ret;
2990
2991 req = (struct hclge_rss_config_cmd *)desc.data;
2992
2993 for (key_offset = 0; key_offset < 3; key_offset++) {
2994 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
2995 false);
2996
2997 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
2998 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
2999
3000 if (key_offset == 2)
3001 key_size =
3002 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3003 else
3004 key_size = HCLGE_RSS_HASH_KEY_NUM;
3005
3006 memcpy(req->hash_key,
3007 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3008
3009 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3010 if (ret) {
3011 dev_err(&hdev->pdev->dev,
3012 "Configure RSS config fail, status = %d\n",
3013 ret);
3014 return ret;
3015 }
3016 }
3017 return 0;
3018 }
3019
3020 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3021 {
3022 struct hclge_rss_indirection_table_cmd *req;
3023 struct hclge_desc desc;
3024 int i, j;
3025 int ret;
3026
3027 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3028
3029 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3030 hclge_cmd_setup_basic_desc
3031 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3032
3033 req->start_table_index =
3034 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3035 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3036
3037 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3038 req->rss_result[j] =
3039 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3040
3041 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3042 if (ret) {
3043 dev_err(&hdev->pdev->dev,
3044 "Configure rss indir table fail,status = %d\n",
3045 ret);
3046 return ret;
3047 }
3048 }
3049 return 0;
3050 }
3051
3052 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3053 u16 *tc_size, u16 *tc_offset)
3054 {
3055 struct hclge_rss_tc_mode_cmd *req;
3056 struct hclge_desc desc;
3057 int ret;
3058 int i;
3059
3060 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3061 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3062
3063 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3064 u16 mode = 0;
3065
3066 hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3067 hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3068 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3069 hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3070 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3071
3072 req->rss_tc_mode[i] = cpu_to_le16(mode);
3073 }
3074
3075 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3076 if (ret) {
3077 dev_err(&hdev->pdev->dev,
3078 "Configure rss tc mode fail, status = %d\n", ret);
3079 return ret;
3080 }
3081
3082 return 0;
3083 }
3084
3085 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3086 {
3087 struct hclge_rss_input_tuple_cmd *req;
3088 struct hclge_desc desc;
3089 int ret;
3090
3091 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3092
3093 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3094
3095 /* Get the tuple cfg from pf */
3096 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3097 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3098 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3099 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3100 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3101 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3102 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3103 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3104 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3105 if (ret) {
3106 dev_err(&hdev->pdev->dev,
3107 "Configure rss input fail, status = %d\n", ret);
3108 return ret;
3109 }
3110
3111 return 0;
3112 }
3113
3114 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3115 u8 *key, u8 *hfunc)
3116 {
3117 struct hclge_vport *vport = hclge_get_vport(handle);
3118 int i;
3119
3120 /* Get hash algorithm */
3121 if (hfunc)
3122 *hfunc = vport->rss_algo;
3123
3124 /* Get the RSS Key required by the user */
3125 if (key)
3126 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3127
3128 /* Get indirect table */
3129 if (indir)
3130 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3131 indir[i] = vport->rss_indirection_tbl[i];
3132
3133 return 0;
3134 }
3135
3136 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3137 const u8 *key, const u8 hfunc)
3138 {
3139 struct hclge_vport *vport = hclge_get_vport(handle);
3140 struct hclge_dev *hdev = vport->back;
3141 u8 hash_algo;
3142 int ret, i;
3143
3144 /* Set the RSS Hash Key if specififed by the user */
3145 if (key) {
3146
3147 if (hfunc == ETH_RSS_HASH_TOP ||
3148 hfunc == ETH_RSS_HASH_NO_CHANGE)
3149 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3150 else
3151 return -EINVAL;
3152 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3153 if (ret)
3154 return ret;
3155
3156 /* Update the shadow RSS key with user specified qids */
3157 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3158 vport->rss_algo = hash_algo;
3159 }
3160
3161 /* Update the shadow RSS table with user specified qids */
3162 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3163 vport->rss_indirection_tbl[i] = indir[i];
3164
3165 /* Update the hardware */
3166 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3167 }
3168
3169 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3170 {
3171 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3172
3173 if (nfc->data & RXH_L4_B_2_3)
3174 hash_sets |= HCLGE_D_PORT_BIT;
3175 else
3176 hash_sets &= ~HCLGE_D_PORT_BIT;
3177
3178 if (nfc->data & RXH_IP_SRC)
3179 hash_sets |= HCLGE_S_IP_BIT;
3180 else
3181 hash_sets &= ~HCLGE_S_IP_BIT;
3182
3183 if (nfc->data & RXH_IP_DST)
3184 hash_sets |= HCLGE_D_IP_BIT;
3185 else
3186 hash_sets &= ~HCLGE_D_IP_BIT;
3187
3188 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3189 hash_sets |= HCLGE_V_TAG_BIT;
3190
3191 return hash_sets;
3192 }
3193
3194 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3195 struct ethtool_rxnfc *nfc)
3196 {
3197 struct hclge_vport *vport = hclge_get_vport(handle);
3198 struct hclge_dev *hdev = vport->back;
3199 struct hclge_rss_input_tuple_cmd *req;
3200 struct hclge_desc desc;
3201 u8 tuple_sets;
3202 int ret;
3203
3204 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3205 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3206 return -EINVAL;
3207
3208 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3209 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3210
3211 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3212 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3213 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3214 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3215 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3216 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3217 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3218 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3219
3220 tuple_sets = hclge_get_rss_hash_bits(nfc);
3221 switch (nfc->flow_type) {
3222 case TCP_V4_FLOW:
3223 req->ipv4_tcp_en = tuple_sets;
3224 break;
3225 case TCP_V6_FLOW:
3226 req->ipv6_tcp_en = tuple_sets;
3227 break;
3228 case UDP_V4_FLOW:
3229 req->ipv4_udp_en = tuple_sets;
3230 break;
3231 case UDP_V6_FLOW:
3232 req->ipv6_udp_en = tuple_sets;
3233 break;
3234 case SCTP_V4_FLOW:
3235 req->ipv4_sctp_en = tuple_sets;
3236 break;
3237 case SCTP_V6_FLOW:
3238 if ((nfc->data & RXH_L4_B_0_1) ||
3239 (nfc->data & RXH_L4_B_2_3))
3240 return -EINVAL;
3241
3242 req->ipv6_sctp_en = tuple_sets;
3243 break;
3244 case IPV4_FLOW:
3245 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3246 break;
3247 case IPV6_FLOW:
3248 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3249 break;
3250 default:
3251 return -EINVAL;
3252 }
3253
3254 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3255 if (ret) {
3256 dev_err(&hdev->pdev->dev,
3257 "Set rss tuple fail, status = %d\n", ret);
3258 return ret;
3259 }
3260
3261 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3262 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3263 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3264 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3265 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3266 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3267 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3268 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3269 return 0;
3270 }
3271
3272 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3273 struct ethtool_rxnfc *nfc)
3274 {
3275 struct hclge_vport *vport = hclge_get_vport(handle);
3276 u8 tuple_sets;
3277
3278 nfc->data = 0;
3279
3280 switch (nfc->flow_type) {
3281 case TCP_V4_FLOW:
3282 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3283 break;
3284 case UDP_V4_FLOW:
3285 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3286 break;
3287 case TCP_V6_FLOW:
3288 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3289 break;
3290 case UDP_V6_FLOW:
3291 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3292 break;
3293 case SCTP_V4_FLOW:
3294 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3295 break;
3296 case SCTP_V6_FLOW:
3297 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3298 break;
3299 case IPV4_FLOW:
3300 case IPV6_FLOW:
3301 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3302 break;
3303 default:
3304 return -EINVAL;
3305 }
3306
3307 if (!tuple_sets)
3308 return 0;
3309
3310 if (tuple_sets & HCLGE_D_PORT_BIT)
3311 nfc->data |= RXH_L4_B_2_3;
3312 if (tuple_sets & HCLGE_S_PORT_BIT)
3313 nfc->data |= RXH_L4_B_0_1;
3314 if (tuple_sets & HCLGE_D_IP_BIT)
3315 nfc->data |= RXH_IP_DST;
3316 if (tuple_sets & HCLGE_S_IP_BIT)
3317 nfc->data |= RXH_IP_SRC;
3318
3319 return 0;
3320 }
3321
3322 static int hclge_get_tc_size(struct hnae3_handle *handle)
3323 {
3324 struct hclge_vport *vport = hclge_get_vport(handle);
3325 struct hclge_dev *hdev = vport->back;
3326
3327 return hdev->rss_size_max;
3328 }
3329
3330 int hclge_rss_init_hw(struct hclge_dev *hdev)
3331 {
3332 struct hclge_vport *vport = hdev->vport;
3333 u8 *rss_indir = vport[0].rss_indirection_tbl;
3334 u16 rss_size = vport[0].alloc_rss_size;
3335 u8 *key = vport[0].rss_hash_key;
3336 u8 hfunc = vport[0].rss_algo;
3337 u16 tc_offset[HCLGE_MAX_TC_NUM];
3338 u16 tc_valid[HCLGE_MAX_TC_NUM];
3339 u16 tc_size[HCLGE_MAX_TC_NUM];
3340 u16 roundup_size;
3341 int i, ret;
3342
3343 ret = hclge_set_rss_indir_table(hdev, rss_indir);
3344 if (ret)
3345 return ret;
3346
3347 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3348 if (ret)
3349 return ret;
3350
3351 ret = hclge_set_rss_input_tuple(hdev);
3352 if (ret)
3353 return ret;
3354
3355 /* Each TC have the same queue size, and tc_size set to hardware is
3356 * the log2 of roundup power of two of rss_size, the acutal queue
3357 * size is limited by indirection table.
3358 */
3359 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3360 dev_err(&hdev->pdev->dev,
3361 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3362 rss_size);
3363 return -EINVAL;
3364 }
3365
3366 roundup_size = roundup_pow_of_two(rss_size);
3367 roundup_size = ilog2(roundup_size);
3368
3369 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3370 tc_valid[i] = 0;
3371
3372 if (!(hdev->hw_tc_map & BIT(i)))
3373 continue;
3374
3375 tc_valid[i] = 1;
3376 tc_size[i] = roundup_size;
3377 tc_offset[i] = rss_size * i;
3378 }
3379
3380 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3381 }
3382
3383 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3384 {
3385 struct hclge_vport *vport = hdev->vport;
3386 int i, j;
3387
3388 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3389 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3390 vport[j].rss_indirection_tbl[i] =
3391 i % vport[j].alloc_rss_size;
3392 }
3393 }
3394
3395 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3396 {
3397 struct hclge_vport *vport = hdev->vport;
3398 int i;
3399
3400 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3401 vport[i].rss_tuple_sets.ipv4_tcp_en =
3402 HCLGE_RSS_INPUT_TUPLE_OTHER;
3403 vport[i].rss_tuple_sets.ipv4_udp_en =
3404 HCLGE_RSS_INPUT_TUPLE_OTHER;
3405 vport[i].rss_tuple_sets.ipv4_sctp_en =
3406 HCLGE_RSS_INPUT_TUPLE_SCTP;
3407 vport[i].rss_tuple_sets.ipv4_fragment_en =
3408 HCLGE_RSS_INPUT_TUPLE_OTHER;
3409 vport[i].rss_tuple_sets.ipv6_tcp_en =
3410 HCLGE_RSS_INPUT_TUPLE_OTHER;
3411 vport[i].rss_tuple_sets.ipv6_udp_en =
3412 HCLGE_RSS_INPUT_TUPLE_OTHER;
3413 vport[i].rss_tuple_sets.ipv6_sctp_en =
3414 HCLGE_RSS_INPUT_TUPLE_SCTP;
3415 vport[i].rss_tuple_sets.ipv6_fragment_en =
3416 HCLGE_RSS_INPUT_TUPLE_OTHER;
3417
3418 vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3419
3420 netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE);
3421 }
3422
3423 hclge_rss_indir_init_cfg(hdev);
3424 }
3425
3426 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3427 int vector_id, bool en,
3428 struct hnae3_ring_chain_node *ring_chain)
3429 {
3430 struct hclge_dev *hdev = vport->back;
3431 struct hnae3_ring_chain_node *node;
3432 struct hclge_desc desc;
3433 struct hclge_ctrl_vector_chain_cmd *req
3434 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3435 enum hclge_cmd_status status;
3436 enum hclge_opcode_type op;
3437 u16 tqp_type_and_id;
3438 int i;
3439
3440 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3441 hclge_cmd_setup_basic_desc(&desc, op, false);
3442 req->int_vector_id = vector_id;
3443
3444 i = 0;
3445 for (node = ring_chain; node; node = node->next) {
3446 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3447 hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
3448 HCLGE_INT_TYPE_S,
3449 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
3450 hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3451 HCLGE_TQP_ID_S, node->tqp_index);
3452 hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3453 HCLGE_INT_GL_IDX_S,
3454 hnae_get_field(node->int_gl_idx,
3455 HNAE3_RING_GL_IDX_M,
3456 HNAE3_RING_GL_IDX_S));
3457 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3458 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3459 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3460 req->vfid = vport->vport_id;
3461
3462 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3463 if (status) {
3464 dev_err(&hdev->pdev->dev,
3465 "Map TQP fail, status is %d.\n",
3466 status);
3467 return -EIO;
3468 }
3469 i = 0;
3470
3471 hclge_cmd_setup_basic_desc(&desc,
3472 op,
3473 false);
3474 req->int_vector_id = vector_id;
3475 }
3476 }
3477
3478 if (i > 0) {
3479 req->int_cause_num = i;
3480 req->vfid = vport->vport_id;
3481 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3482 if (status) {
3483 dev_err(&hdev->pdev->dev,
3484 "Map TQP fail, status is %d.\n", status);
3485 return -EIO;
3486 }
3487 }
3488
3489 return 0;
3490 }
3491
3492 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3493 int vector,
3494 struct hnae3_ring_chain_node *ring_chain)
3495 {
3496 struct hclge_vport *vport = hclge_get_vport(handle);
3497 struct hclge_dev *hdev = vport->back;
3498 int vector_id;
3499
3500 vector_id = hclge_get_vector_index(hdev, vector);
3501 if (vector_id < 0) {
3502 dev_err(&hdev->pdev->dev,
3503 "Get vector index fail. vector_id =%d\n", vector_id);
3504 return vector_id;
3505 }
3506
3507 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3508 }
3509
3510 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3511 int vector,
3512 struct hnae3_ring_chain_node *ring_chain)
3513 {
3514 struct hclge_vport *vport = hclge_get_vport(handle);
3515 struct hclge_dev *hdev = vport->back;
3516 int vector_id, ret;
3517
3518 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3519 return 0;
3520
3521 vector_id = hclge_get_vector_index(hdev, vector);
3522 if (vector_id < 0) {
3523 dev_err(&handle->pdev->dev,
3524 "Get vector index fail. ret =%d\n", vector_id);
3525 return vector_id;
3526 }
3527
3528 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3529 if (ret)
3530 dev_err(&handle->pdev->dev,
3531 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3532 vector_id,
3533 ret);
3534
3535 return ret;
3536 }
3537
3538 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3539 struct hclge_promisc_param *param)
3540 {
3541 struct hclge_promisc_cfg_cmd *req;
3542 struct hclge_desc desc;
3543 int ret;
3544
3545 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3546
3547 req = (struct hclge_promisc_cfg_cmd *)desc.data;
3548 req->vf_id = param->vf_id;
3549
3550 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3551 * pdev revision(0x20), new revision support them. The
3552 * value of this two fields will not return error when driver
3553 * send command to fireware in revision(0x20).
3554 */
3555 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3556 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3557
3558 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3559 if (ret) {
3560 dev_err(&hdev->pdev->dev,
3561 "Set promisc mode fail, status is %d.\n", ret);
3562 return ret;
3563 }
3564 return 0;
3565 }
3566
3567 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3568 bool en_mc, bool en_bc, int vport_id)
3569 {
3570 if (!param)
3571 return;
3572
3573 memset(param, 0, sizeof(struct hclge_promisc_param));
3574 if (en_uc)
3575 param->enable = HCLGE_PROMISC_EN_UC;
3576 if (en_mc)
3577 param->enable |= HCLGE_PROMISC_EN_MC;
3578 if (en_bc)
3579 param->enable |= HCLGE_PROMISC_EN_BC;
3580 param->vf_id = vport_id;
3581 }
3582
3583 static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3584 bool en_mc_pmc)
3585 {
3586 struct hclge_vport *vport = hclge_get_vport(handle);
3587 struct hclge_dev *hdev = vport->back;
3588 struct hclge_promisc_param param;
3589
3590 hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, true,
3591 vport->vport_id);
3592 hclge_cmd_set_promisc_mode(hdev, &param);
3593 }
3594
3595 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
3596 {
3597 struct hclge_desc desc;
3598 struct hclge_config_mac_mode_cmd *req =
3599 (struct hclge_config_mac_mode_cmd *)desc.data;
3600 u32 loop_en = 0;
3601 int ret;
3602
3603 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
3604 hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
3605 hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
3606 hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
3607 hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
3608 hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
3609 hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
3610 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
3611 hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
3612 hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
3613 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
3614 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
3615 hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
3616 hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
3617 hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
3618 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
3619
3620 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3621 if (ret)
3622 dev_err(&hdev->pdev->dev,
3623 "mac enable fail, ret =%d.\n", ret);
3624 }
3625
3626 static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
3627 {
3628 struct hclge_config_mac_mode_cmd *req;
3629 struct hclge_desc desc;
3630 u32 loop_en;
3631 int ret;
3632
3633 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
3634 /* 1 Read out the MAC mode config at first */
3635 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
3636 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3637 if (ret) {
3638 dev_err(&hdev->pdev->dev,
3639 "mac loopback get fail, ret =%d.\n", ret);
3640 return ret;
3641 }
3642
3643 /* 2 Then setup the loopback flag */
3644 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
3645 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
3646
3647 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
3648
3649 /* 3 Config mac work mode with loopback flag
3650 * and its original configure parameters
3651 */
3652 hclge_cmd_reuse_desc(&desc, false);
3653 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3654 if (ret)
3655 dev_err(&hdev->pdev->dev,
3656 "mac loopback set fail, ret =%d.\n", ret);
3657 return ret;
3658 }
3659
3660 static int hclge_set_loopback(struct hnae3_handle *handle,
3661 enum hnae3_loop loop_mode, bool en)
3662 {
3663 struct hclge_vport *vport = hclge_get_vport(handle);
3664 struct hclge_dev *hdev = vport->back;
3665 int ret;
3666
3667 switch (loop_mode) {
3668 case HNAE3_MAC_INTER_LOOP_MAC:
3669 ret = hclge_set_mac_loopback(hdev, en);
3670 break;
3671 default:
3672 ret = -ENOTSUPP;
3673 dev_err(&hdev->pdev->dev,
3674 "loop_mode %d is not supported\n", loop_mode);
3675 break;
3676 }
3677
3678 return ret;
3679 }
3680
3681 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
3682 int stream_id, bool enable)
3683 {
3684 struct hclge_desc desc;
3685 struct hclge_cfg_com_tqp_queue_cmd *req =
3686 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
3687 int ret;
3688
3689 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
3690 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
3691 req->stream_id = cpu_to_le16(stream_id);
3692 req->enable |= enable << HCLGE_TQP_ENABLE_B;
3693
3694 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3695 if (ret)
3696 dev_err(&hdev->pdev->dev,
3697 "Tqp enable fail, status =%d.\n", ret);
3698 return ret;
3699 }
3700
3701 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
3702 {
3703 struct hclge_vport *vport = hclge_get_vport(handle);
3704 struct hnae3_queue *queue;
3705 struct hclge_tqp *tqp;
3706 int i;
3707
3708 for (i = 0; i < vport->alloc_tqps; i++) {
3709 queue = handle->kinfo.tqp[i];
3710 tqp = container_of(queue, struct hclge_tqp, q);
3711 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
3712 }
3713 }
3714
3715 static int hclge_ae_start(struct hnae3_handle *handle)
3716 {
3717 struct hclge_vport *vport = hclge_get_vport(handle);
3718 struct hclge_dev *hdev = vport->back;
3719 int i, ret;
3720
3721 for (i = 0; i < vport->alloc_tqps; i++)
3722 hclge_tqp_enable(hdev, i, 0, true);
3723
3724 /* mac enable */
3725 hclge_cfg_mac_mode(hdev, true);
3726 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
3727 mod_timer(&hdev->service_timer, jiffies + HZ);
3728 hdev->hw.mac.link = 0;
3729
3730 /* reset tqp stats */
3731 hclge_reset_tqp_stats(handle);
3732
3733 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3734 return 0;
3735
3736 ret = hclge_mac_start_phy(hdev);
3737 if (ret)
3738 return ret;
3739
3740 return 0;
3741 }
3742
3743 static void hclge_ae_stop(struct hnae3_handle *handle)
3744 {
3745 struct hclge_vport *vport = hclge_get_vport(handle);
3746 struct hclge_dev *hdev = vport->back;
3747 int i;
3748
3749 del_timer_sync(&hdev->service_timer);
3750 cancel_work_sync(&hdev->service_task);
3751
3752 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3753 return;
3754
3755 for (i = 0; i < vport->alloc_tqps; i++)
3756 hclge_tqp_enable(hdev, i, 0, false);
3757
3758 /* Mac disable */
3759 hclge_cfg_mac_mode(hdev, false);
3760
3761 hclge_mac_stop_phy(hdev);
3762
3763 /* reset tqp stats */
3764 hclge_reset_tqp_stats(handle);
3765 del_timer_sync(&hdev->service_timer);
3766 cancel_work_sync(&hdev->service_task);
3767 hclge_update_link_status(hdev);
3768 }
3769
3770 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
3771 u16 cmdq_resp, u8 resp_code,
3772 enum hclge_mac_vlan_tbl_opcode op)
3773 {
3774 struct hclge_dev *hdev = vport->back;
3775 int return_status = -EIO;
3776
3777 if (cmdq_resp) {
3778 dev_err(&hdev->pdev->dev,
3779 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
3780 cmdq_resp);
3781 return -EIO;
3782 }
3783
3784 if (op == HCLGE_MAC_VLAN_ADD) {
3785 if ((!resp_code) || (resp_code == 1)) {
3786 return_status = 0;
3787 } else if (resp_code == 2) {
3788 return_status = -ENOSPC;
3789 dev_err(&hdev->pdev->dev,
3790 "add mac addr failed for uc_overflow.\n");
3791 } else if (resp_code == 3) {
3792 return_status = -ENOSPC;
3793 dev_err(&hdev->pdev->dev,
3794 "add mac addr failed for mc_overflow.\n");
3795 } else {
3796 dev_err(&hdev->pdev->dev,
3797 "add mac addr failed for undefined, code=%d.\n",
3798 resp_code);
3799 }
3800 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
3801 if (!resp_code) {
3802 return_status = 0;
3803 } else if (resp_code == 1) {
3804 return_status = -ENOENT;
3805 dev_dbg(&hdev->pdev->dev,
3806 "remove mac addr failed for miss.\n");
3807 } else {
3808 dev_err(&hdev->pdev->dev,
3809 "remove mac addr failed for undefined, code=%d.\n",
3810 resp_code);
3811 }
3812 } else if (op == HCLGE_MAC_VLAN_LKUP) {
3813 if (!resp_code) {
3814 return_status = 0;
3815 } else if (resp_code == 1) {
3816 return_status = -ENOENT;
3817 dev_dbg(&hdev->pdev->dev,
3818 "lookup mac addr failed for miss.\n");
3819 } else {
3820 dev_err(&hdev->pdev->dev,
3821 "lookup mac addr failed for undefined, code=%d.\n",
3822 resp_code);
3823 }
3824 } else {
3825 return_status = -EINVAL;
3826 dev_err(&hdev->pdev->dev,
3827 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
3828 op);
3829 }
3830
3831 return return_status;
3832 }
3833
3834 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
3835 {
3836 int word_num;
3837 int bit_num;
3838
3839 if (vfid > 255 || vfid < 0)
3840 return -EIO;
3841
3842 if (vfid >= 0 && vfid <= 191) {
3843 word_num = vfid / 32;
3844 bit_num = vfid % 32;
3845 if (clr)
3846 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
3847 else
3848 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
3849 } else {
3850 word_num = (vfid - 192) / 32;
3851 bit_num = vfid % 32;
3852 if (clr)
3853 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
3854 else
3855 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
3856 }
3857
3858 return 0;
3859 }
3860
3861 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
3862 {
3863 #define HCLGE_DESC_NUMBER 3
3864 #define HCLGE_FUNC_NUMBER_PER_DESC 6
3865 int i, j;
3866
3867 for (i = 0; i < HCLGE_DESC_NUMBER; i++)
3868 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
3869 if (desc[i].data[j])
3870 return false;
3871
3872 return true;
3873 }
3874
3875 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
3876 const u8 *addr)
3877 {
3878 const unsigned char *mac_addr = addr;
3879 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
3880 (mac_addr[0]) | (mac_addr[1] << 8);
3881 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
3882
3883 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
3884 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
3885 }
3886
3887 static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
3888 const u8 *addr)
3889 {
3890 u16 high_val = addr[1] | (addr[0] << 8);
3891 struct hclge_dev *hdev = vport->back;
3892 u32 rsh = 4 - hdev->mta_mac_sel_type;
3893 u16 ret_val = (high_val >> rsh) & 0xfff;
3894
3895 return ret_val;
3896 }
3897
3898 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
3899 enum hclge_mta_dmac_sel_type mta_mac_sel,
3900 bool enable)
3901 {
3902 struct hclge_mta_filter_mode_cmd *req;
3903 struct hclge_desc desc;
3904 int ret;
3905
3906 req = (struct hclge_mta_filter_mode_cmd *)desc.data;
3907 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
3908
3909 hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
3910 enable);
3911 hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
3912 HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
3913
3914 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3915 if (ret) {
3916 dev_err(&hdev->pdev->dev,
3917 "Config mat filter mode failed for cmd_send, ret =%d.\n",
3918 ret);
3919 return ret;
3920 }
3921
3922 return 0;
3923 }
3924
3925 int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
3926 u8 func_id,
3927 bool enable)
3928 {
3929 struct hclge_cfg_func_mta_filter_cmd *req;
3930 struct hclge_desc desc;
3931 int ret;
3932
3933 req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
3934 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
3935
3936 hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
3937 enable);
3938 req->function_id = func_id;
3939
3940 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3941 if (ret) {
3942 dev_err(&hdev->pdev->dev,
3943 "Config func_id enable failed for cmd_send, ret =%d.\n",
3944 ret);
3945 return ret;
3946 }
3947
3948 return 0;
3949 }
3950
3951 static int hclge_set_mta_table_item(struct hclge_vport *vport,
3952 u16 idx,
3953 bool enable)
3954 {
3955 struct hclge_dev *hdev = vport->back;
3956 struct hclge_cfg_func_mta_item_cmd *req;
3957 struct hclge_desc desc;
3958 u16 item_idx = 0;
3959 int ret;
3960
3961 req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
3962 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
3963 hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
3964
3965 hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
3966 HCLGE_CFG_MTA_ITEM_IDX_S, idx);
3967 req->item_idx = cpu_to_le16(item_idx);
3968
3969 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3970 if (ret) {
3971 dev_err(&hdev->pdev->dev,
3972 "Config mta table item failed for cmd_send, ret =%d.\n",
3973 ret);
3974 return ret;
3975 }
3976
3977 return 0;
3978 }
3979
3980 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
3981 struct hclge_mac_vlan_tbl_entry_cmd *req)
3982 {
3983 struct hclge_dev *hdev = vport->back;
3984 struct hclge_desc desc;
3985 u8 resp_code;
3986 u16 retval;
3987 int ret;
3988
3989 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
3990
3991 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
3992
3993 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3994 if (ret) {
3995 dev_err(&hdev->pdev->dev,
3996 "del mac addr failed for cmd_send, ret =%d.\n",
3997 ret);
3998 return ret;
3999 }
4000 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
4001 retval = le16_to_cpu(desc.retval);
4002
4003 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
4004 HCLGE_MAC_VLAN_REMOVE);
4005 }
4006
4007 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
4008 struct hclge_mac_vlan_tbl_entry_cmd *req,
4009 struct hclge_desc *desc,
4010 bool is_mc)
4011 {
4012 struct hclge_dev *hdev = vport->back;
4013 u8 resp_code;
4014 u16 retval;
4015 int ret;
4016
4017 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
4018 if (is_mc) {
4019 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4020 memcpy(desc[0].data,
4021 req,
4022 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4023 hclge_cmd_setup_basic_desc(&desc[1],
4024 HCLGE_OPC_MAC_VLAN_ADD,
4025 true);
4026 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4027 hclge_cmd_setup_basic_desc(&desc[2],
4028 HCLGE_OPC_MAC_VLAN_ADD,
4029 true);
4030 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4031 } else {
4032 memcpy(desc[0].data,
4033 req,
4034 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4035 ret = hclge_cmd_send(&hdev->hw, desc, 1);
4036 }
4037 if (ret) {
4038 dev_err(&hdev->pdev->dev,
4039 "lookup mac addr failed for cmd_send, ret =%d.\n",
4040 ret);
4041 return ret;
4042 }
4043 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
4044 retval = le16_to_cpu(desc[0].retval);
4045
4046 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
4047 HCLGE_MAC_VLAN_LKUP);
4048 }
4049
4050 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
4051 struct hclge_mac_vlan_tbl_entry_cmd *req,
4052 struct hclge_desc *mc_desc)
4053 {
4054 struct hclge_dev *hdev = vport->back;
4055 int cfg_status;
4056 u8 resp_code;
4057 u16 retval;
4058 int ret;
4059
4060 if (!mc_desc) {
4061 struct hclge_desc desc;
4062
4063 hclge_cmd_setup_basic_desc(&desc,
4064 HCLGE_OPC_MAC_VLAN_ADD,
4065 false);
4066 memcpy(desc.data, req,
4067 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4068 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4069 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
4070 retval = le16_to_cpu(desc.retval);
4071
4072 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
4073 resp_code,
4074 HCLGE_MAC_VLAN_ADD);
4075 } else {
4076 hclge_cmd_reuse_desc(&mc_desc[0], false);
4077 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4078 hclge_cmd_reuse_desc(&mc_desc[1], false);
4079 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4080 hclge_cmd_reuse_desc(&mc_desc[2], false);
4081 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
4082 memcpy(mc_desc[0].data, req,
4083 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4084 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
4085 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
4086 retval = le16_to_cpu(mc_desc[0].retval);
4087
4088 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
4089 resp_code,
4090 HCLGE_MAC_VLAN_ADD);
4091 }
4092
4093 if (ret) {
4094 dev_err(&hdev->pdev->dev,
4095 "add mac addr failed for cmd_send, ret =%d.\n",
4096 ret);
4097 return ret;
4098 }
4099
4100 return cfg_status;
4101 }
4102
4103 static int hclge_add_uc_addr(struct hnae3_handle *handle,
4104 const unsigned char *addr)
4105 {
4106 struct hclge_vport *vport = hclge_get_vport(handle);
4107
4108 return hclge_add_uc_addr_common(vport, addr);
4109 }
4110
4111 int hclge_add_uc_addr_common(struct hclge_vport *vport,
4112 const unsigned char *addr)
4113 {
4114 struct hclge_dev *hdev = vport->back;
4115 struct hclge_mac_vlan_tbl_entry_cmd req;
4116 struct hclge_desc desc;
4117 u16 egress_port = 0;
4118 int ret;
4119
4120 /* mac addr check */
4121 if (is_zero_ether_addr(addr) ||
4122 is_broadcast_ether_addr(addr) ||
4123 is_multicast_ether_addr(addr)) {
4124 dev_err(&hdev->pdev->dev,
4125 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
4126 addr,
4127 is_zero_ether_addr(addr),
4128 is_broadcast_ether_addr(addr),
4129 is_multicast_ether_addr(addr));
4130 return -EINVAL;
4131 }
4132
4133 memset(&req, 0, sizeof(req));
4134 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4135 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4136 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0);
4137 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4138
4139 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0);
4140 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0);
4141 hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
4142 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
4143 hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M,
4144 HCLGE_MAC_EPORT_PFID_S, 0);
4145
4146 req.egress_port = cpu_to_le16(egress_port);
4147
4148 hclge_prepare_mac_addr(&req, addr);
4149
4150 /* Lookup the mac address in the mac_vlan table, and add
4151 * it if the entry is inexistent. Repeated unicast entry
4152 * is not allowed in the mac vlan table.
4153 */
4154 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
4155 if (ret == -ENOENT)
4156 return hclge_add_mac_vlan_tbl(vport, &req, NULL);
4157
4158 /* check if we just hit the duplicate */
4159 if (!ret)
4160 ret = -EINVAL;
4161
4162 dev_err(&hdev->pdev->dev,
4163 "PF failed to add unicast entry(%pM) in the MAC table\n",
4164 addr);
4165
4166 return ret;
4167 }
4168
4169 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
4170 const unsigned char *addr)
4171 {
4172 struct hclge_vport *vport = hclge_get_vport(handle);
4173
4174 return hclge_rm_uc_addr_common(vport, addr);
4175 }
4176
4177 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
4178 const unsigned char *addr)
4179 {
4180 struct hclge_dev *hdev = vport->back;
4181 struct hclge_mac_vlan_tbl_entry_cmd req;
4182 int ret;
4183
4184 /* mac addr check */
4185 if (is_zero_ether_addr(addr) ||
4186 is_broadcast_ether_addr(addr) ||
4187 is_multicast_ether_addr(addr)) {
4188 dev_dbg(&hdev->pdev->dev,
4189 "Remove mac err! invalid mac:%pM.\n",
4190 addr);
4191 return -EINVAL;
4192 }
4193
4194 memset(&req, 0, sizeof(req));
4195 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4196 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4197 hclge_prepare_mac_addr(&req, addr);
4198 ret = hclge_remove_mac_vlan_tbl(vport, &req);
4199
4200 return ret;
4201 }
4202
4203 static int hclge_add_mc_addr(struct hnae3_handle *handle,
4204 const unsigned char *addr)
4205 {
4206 struct hclge_vport *vport = hclge_get_vport(handle);
4207
4208 return hclge_add_mc_addr_common(vport, addr);
4209 }
4210
4211 int hclge_add_mc_addr_common(struct hclge_vport *vport,
4212 const unsigned char *addr)
4213 {
4214 struct hclge_dev *hdev = vport->back;
4215 struct hclge_mac_vlan_tbl_entry_cmd req;
4216 struct hclge_desc desc[3];
4217 u16 tbl_idx;
4218 int status;
4219
4220 /* mac addr check */
4221 if (!is_multicast_ether_addr(addr)) {
4222 dev_err(&hdev->pdev->dev,
4223 "Add mc mac err! invalid mac:%pM.\n",
4224 addr);
4225 return -EINVAL;
4226 }
4227 memset(&req, 0, sizeof(req));
4228 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4229 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4230 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
4231 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4232 hclge_prepare_mac_addr(&req, addr);
4233 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
4234 if (!status) {
4235 /* This mac addr exist, update VFID for it */
4236 hclge_update_desc_vfid(desc, vport->vport_id, false);
4237 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
4238 } else {
4239 /* This mac addr do not exist, add new entry for it */
4240 memset(desc[0].data, 0, sizeof(desc[0].data));
4241 memset(desc[1].data, 0, sizeof(desc[0].data));
4242 memset(desc[2].data, 0, sizeof(desc[0].data));
4243 hclge_update_desc_vfid(desc, vport->vport_id, false);
4244 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
4245 }
4246
4247 /* Set MTA table for this MAC address */
4248 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
4249 status = hclge_set_mta_table_item(vport, tbl_idx, true);
4250
4251 return status;
4252 }
4253
4254 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
4255 const unsigned char *addr)
4256 {
4257 struct hclge_vport *vport = hclge_get_vport(handle);
4258
4259 return hclge_rm_mc_addr_common(vport, addr);
4260 }
4261
4262 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
4263 const unsigned char *addr)
4264 {
4265 struct hclge_dev *hdev = vport->back;
4266 struct hclge_mac_vlan_tbl_entry_cmd req;
4267 enum hclge_cmd_status status;
4268 struct hclge_desc desc[3];
4269 u16 tbl_idx;
4270
4271 /* mac addr check */
4272 if (!is_multicast_ether_addr(addr)) {
4273 dev_dbg(&hdev->pdev->dev,
4274 "Remove mc mac err! invalid mac:%pM.\n",
4275 addr);
4276 return -EINVAL;
4277 }
4278
4279 memset(&req, 0, sizeof(req));
4280 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4281 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4282 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
4283 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4284 hclge_prepare_mac_addr(&req, addr);
4285 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
4286 if (!status) {
4287 /* This mac addr exist, remove this handle's VFID for it */
4288 hclge_update_desc_vfid(desc, vport->vport_id, true);
4289
4290 if (hclge_is_all_function_id_zero(desc))
4291 /* All the vfid is zero, so need to delete this entry */
4292 status = hclge_remove_mac_vlan_tbl(vport, &req);
4293 else
4294 /* Not all the vfid is zero, update the vfid */
4295 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
4296
4297 } else {
4298 /* This mac addr do not exist, can't delete it */
4299 dev_err(&hdev->pdev->dev,
4300 "Rm multicast mac addr failed, ret = %d.\n",
4301 status);
4302 return -EIO;
4303 }
4304
4305 /* Set MTB table for this MAC address */
4306 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
4307 status = hclge_set_mta_table_item(vport, tbl_idx, false);
4308
4309 return status;
4310 }
4311
4312 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
4313 u16 cmdq_resp, u8 resp_code)
4314 {
4315 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
4316 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
4317 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
4318 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
4319
4320 int return_status;
4321
4322 if (cmdq_resp) {
4323 dev_err(&hdev->pdev->dev,
4324 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
4325 cmdq_resp);
4326 return -EIO;
4327 }
4328
4329 switch (resp_code) {
4330 case HCLGE_ETHERTYPE_SUCCESS_ADD:
4331 case HCLGE_ETHERTYPE_ALREADY_ADD:
4332 return_status = 0;
4333 break;
4334 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
4335 dev_err(&hdev->pdev->dev,
4336 "add mac ethertype failed for manager table overflow.\n");
4337 return_status = -EIO;
4338 break;
4339 case HCLGE_ETHERTYPE_KEY_CONFLICT:
4340 dev_err(&hdev->pdev->dev,
4341 "add mac ethertype failed for key conflict.\n");
4342 return_status = -EIO;
4343 break;
4344 default:
4345 dev_err(&hdev->pdev->dev,
4346 "add mac ethertype failed for undefined, code=%d.\n",
4347 resp_code);
4348 return_status = -EIO;
4349 }
4350
4351 return return_status;
4352 }
4353
4354 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
4355 const struct hclge_mac_mgr_tbl_entry_cmd *req)
4356 {
4357 struct hclge_desc desc;
4358 u8 resp_code;
4359 u16 retval;
4360 int ret;
4361
4362 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
4363 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
4364
4365 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4366 if (ret) {
4367 dev_err(&hdev->pdev->dev,
4368 "add mac ethertype failed for cmd_send, ret =%d.\n",
4369 ret);
4370 return ret;
4371 }
4372
4373 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
4374 retval = le16_to_cpu(desc.retval);
4375
4376 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
4377 }
4378
4379 static int init_mgr_tbl(struct hclge_dev *hdev)
4380 {
4381 int ret;
4382 int i;
4383
4384 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
4385 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
4386 if (ret) {
4387 dev_err(&hdev->pdev->dev,
4388 "add mac ethertype failed, ret =%d.\n",
4389 ret);
4390 return ret;
4391 }
4392 }
4393
4394 return 0;
4395 }
4396
4397 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
4398 {
4399 struct hclge_vport *vport = hclge_get_vport(handle);
4400 struct hclge_dev *hdev = vport->back;
4401
4402 ether_addr_copy(p, hdev->hw.mac.mac_addr);
4403 }
4404
4405 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
4406 bool is_first)
4407 {
4408 const unsigned char *new_addr = (const unsigned char *)p;
4409 struct hclge_vport *vport = hclge_get_vport(handle);
4410 struct hclge_dev *hdev = vport->back;
4411 int ret;
4412
4413 /* mac addr check */
4414 if (is_zero_ether_addr(new_addr) ||
4415 is_broadcast_ether_addr(new_addr) ||
4416 is_multicast_ether_addr(new_addr)) {
4417 dev_err(&hdev->pdev->dev,
4418 "Change uc mac err! invalid mac:%p.\n",
4419 new_addr);
4420 return -EINVAL;
4421 }
4422
4423 if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
4424 dev_warn(&hdev->pdev->dev,
4425 "remove old uc mac address fail.\n");
4426
4427 ret = hclge_add_uc_addr(handle, new_addr);
4428 if (ret) {
4429 dev_err(&hdev->pdev->dev,
4430 "add uc mac address fail, ret =%d.\n",
4431 ret);
4432
4433 if (!is_first &&
4434 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
4435 dev_err(&hdev->pdev->dev,
4436 "restore uc mac address fail.\n");
4437
4438 return -EIO;
4439 }
4440
4441 ret = hclge_pause_addr_cfg(hdev, new_addr);
4442 if (ret) {
4443 dev_err(&hdev->pdev->dev,
4444 "configure mac pause address fail, ret =%d.\n",
4445 ret);
4446 return -EIO;
4447 }
4448
4449 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
4450
4451 return 0;
4452 }
4453
4454 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
4455 bool filter_en)
4456 {
4457 struct hclge_vlan_filter_ctrl_cmd *req;
4458 struct hclge_desc desc;
4459 int ret;
4460
4461 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
4462
4463 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
4464 req->vlan_type = vlan_type;
4465 req->vlan_fe = filter_en;
4466
4467 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4468 if (ret) {
4469 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
4470 ret);
4471 return ret;
4472 }
4473
4474 return 0;
4475 }
4476
4477 #define HCLGE_FILTER_TYPE_VF 0
4478 #define HCLGE_FILTER_TYPE_PORT 1
4479
4480 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
4481 {
4482 struct hclge_vport *vport = hclge_get_vport(handle);
4483 struct hclge_dev *hdev = vport->back;
4484
4485 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
4486 }
4487
4488 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
4489 bool is_kill, u16 vlan, u8 qos,
4490 __be16 proto)
4491 {
4492 #define HCLGE_MAX_VF_BYTES 16
4493 struct hclge_vlan_filter_vf_cfg_cmd *req0;
4494 struct hclge_vlan_filter_vf_cfg_cmd *req1;
4495 struct hclge_desc desc[2];
4496 u8 vf_byte_val;
4497 u8 vf_byte_off;
4498 int ret;
4499
4500 hclge_cmd_setup_basic_desc(&desc[0],
4501 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
4502 hclge_cmd_setup_basic_desc(&desc[1],
4503 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
4504
4505 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4506
4507 vf_byte_off = vfid / 8;
4508 vf_byte_val = 1 << (vfid % 8);
4509
4510 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
4511 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
4512
4513 req0->vlan_id = cpu_to_le16(vlan);
4514 req0->vlan_cfg = is_kill;
4515
4516 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
4517 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
4518 else
4519 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
4520
4521 ret = hclge_cmd_send(&hdev->hw, desc, 2);
4522 if (ret) {
4523 dev_err(&hdev->pdev->dev,
4524 "Send vf vlan command fail, ret =%d.\n",
4525 ret);
4526 return ret;
4527 }
4528
4529 if (!is_kill) {
4530 #define HCLGE_VF_VLAN_NO_ENTRY 2
4531 if (!req0->resp_code || req0->resp_code == 1)
4532 return 0;
4533
4534 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
4535 dev_warn(&hdev->pdev->dev,
4536 "vf vlan table is full, vf vlan filter is disabled\n");
4537 return 0;
4538 }
4539
4540 dev_err(&hdev->pdev->dev,
4541 "Add vf vlan filter fail, ret =%d.\n",
4542 req0->resp_code);
4543 } else {
4544 if (!req0->resp_code)
4545 return 0;
4546
4547 dev_err(&hdev->pdev->dev,
4548 "Kill vf vlan filter fail, ret =%d.\n",
4549 req0->resp_code);
4550 }
4551
4552 return -EIO;
4553 }
4554
4555 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
4556 u16 vlan_id, bool is_kill)
4557 {
4558 struct hclge_vlan_filter_pf_cfg_cmd *req;
4559 struct hclge_desc desc;
4560 u8 vlan_offset_byte_val;
4561 u8 vlan_offset_byte;
4562 u8 vlan_offset_160;
4563 int ret;
4564
4565 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
4566
4567 vlan_offset_160 = vlan_id / 160;
4568 vlan_offset_byte = (vlan_id % 160) / 8;
4569 vlan_offset_byte_val = 1 << (vlan_id % 8);
4570
4571 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
4572 req->vlan_offset = vlan_offset_160;
4573 req->vlan_cfg = is_kill;
4574 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
4575
4576 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4577 if (ret)
4578 dev_err(&hdev->pdev->dev,
4579 "port vlan command, send fail, ret =%d.\n", ret);
4580 return ret;
4581 }
4582
4583 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
4584 u16 vport_id, u16 vlan_id, u8 qos,
4585 bool is_kill)
4586 {
4587 u16 vport_idx, vport_num = 0;
4588 int ret;
4589
4590 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
4591 0, proto);
4592 if (ret) {
4593 dev_err(&hdev->pdev->dev,
4594 "Set %d vport vlan filter config fail, ret =%d.\n",
4595 vport_id, ret);
4596 return ret;
4597 }
4598
4599 /* vlan 0 may be added twice when 8021q module is enabled */
4600 if (!is_kill && !vlan_id &&
4601 test_bit(vport_id, hdev->vlan_table[vlan_id]))
4602 return 0;
4603
4604 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
4605 dev_err(&hdev->pdev->dev,
4606 "Add port vlan failed, vport %d is already in vlan %d\n",
4607 vport_id, vlan_id);
4608 return -EINVAL;
4609 }
4610
4611 if (is_kill &&
4612 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
4613 dev_err(&hdev->pdev->dev,
4614 "Delete port vlan failed, vport %d is not in vlan %d\n",
4615 vport_id, vlan_id);
4616 return -EINVAL;
4617 }
4618
4619 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], VLAN_N_VID)
4620 vport_num++;
4621
4622 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
4623 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
4624 is_kill);
4625
4626 return ret;
4627 }
4628
4629 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
4630 u16 vlan_id, bool is_kill)
4631 {
4632 struct hclge_vport *vport = hclge_get_vport(handle);
4633 struct hclge_dev *hdev = vport->back;
4634
4635 return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
4636 0, is_kill);
4637 }
4638
4639 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
4640 u16 vlan, u8 qos, __be16 proto)
4641 {
4642 struct hclge_vport *vport = hclge_get_vport(handle);
4643 struct hclge_dev *hdev = vport->back;
4644
4645 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
4646 return -EINVAL;
4647 if (proto != htons(ETH_P_8021Q))
4648 return -EPROTONOSUPPORT;
4649
4650 return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
4651 }
4652
4653 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
4654 {
4655 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
4656 struct hclge_vport_vtag_tx_cfg_cmd *req;
4657 struct hclge_dev *hdev = vport->back;
4658 struct hclge_desc desc;
4659 int status;
4660
4661 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
4662
4663 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
4664 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
4665 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
4666 hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
4667 vcfg->accept_tag1 ? 1 : 0);
4668 hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
4669 vcfg->accept_untag1 ? 1 : 0);
4670 hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
4671 vcfg->accept_tag2 ? 1 : 0);
4672 hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
4673 vcfg->accept_untag2 ? 1 : 0);
4674 hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
4675 vcfg->insert_tag1_en ? 1 : 0);
4676 hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
4677 vcfg->insert_tag2_en ? 1 : 0);
4678 hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
4679
4680 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
4681 req->vf_bitmap[req->vf_offset] =
4682 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
4683
4684 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4685 if (status)
4686 dev_err(&hdev->pdev->dev,
4687 "Send port txvlan cfg command fail, ret =%d\n",
4688 status);
4689
4690 return status;
4691 }
4692
4693 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
4694 {
4695 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
4696 struct hclge_vport_vtag_rx_cfg_cmd *req;
4697 struct hclge_dev *hdev = vport->back;
4698 struct hclge_desc desc;
4699 int status;
4700
4701 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
4702
4703 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
4704 hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
4705 vcfg->strip_tag1_en ? 1 : 0);
4706 hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
4707 vcfg->strip_tag2_en ? 1 : 0);
4708 hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
4709 vcfg->vlan1_vlan_prionly ? 1 : 0);
4710 hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
4711 vcfg->vlan2_vlan_prionly ? 1 : 0);
4712
4713 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
4714 req->vf_bitmap[req->vf_offset] =
4715 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
4716
4717 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4718 if (status)
4719 dev_err(&hdev->pdev->dev,
4720 "Send port rxvlan cfg command fail, ret =%d\n",
4721 status);
4722
4723 return status;
4724 }
4725
4726 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
4727 {
4728 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
4729 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
4730 struct hclge_desc desc;
4731 int status;
4732
4733 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
4734 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
4735 rx_req->ot_fst_vlan_type =
4736 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
4737 rx_req->ot_sec_vlan_type =
4738 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
4739 rx_req->in_fst_vlan_type =
4740 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
4741 rx_req->in_sec_vlan_type =
4742 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
4743
4744 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4745 if (status) {
4746 dev_err(&hdev->pdev->dev,
4747 "Send rxvlan protocol type command fail, ret =%d\n",
4748 status);
4749 return status;
4750 }
4751
4752 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
4753
4754 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data;
4755 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
4756 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
4757
4758 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4759 if (status)
4760 dev_err(&hdev->pdev->dev,
4761 "Send txvlan protocol type command fail, ret =%d\n",
4762 status);
4763
4764 return status;
4765 }
4766
4767 static int hclge_init_vlan_config(struct hclge_dev *hdev)
4768 {
4769 #define HCLGE_DEF_VLAN_TYPE 0x8100
4770
4771 struct hnae3_handle *handle;
4772 struct hclge_vport *vport;
4773 int ret;
4774 int i;
4775
4776 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true);
4777 if (ret)
4778 return ret;
4779
4780 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true);
4781 if (ret)
4782 return ret;
4783
4784 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
4785 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
4786 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
4787 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
4788 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
4789 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
4790
4791 ret = hclge_set_vlan_protocol_type(hdev);
4792 if (ret)
4793 return ret;
4794
4795 for (i = 0; i < hdev->num_alloc_vport; i++) {
4796 vport = &hdev->vport[i];
4797 vport->txvlan_cfg.accept_tag1 = true;
4798 vport->txvlan_cfg.accept_untag1 = true;
4799
4800 /* accept_tag2 and accept_untag2 are not supported on
4801 * pdev revision(0x20), new revision support them. The
4802 * value of this two fields will not return error when driver
4803 * send command to fireware in revision(0x20).
4804 * This two fields can not configured by user.
4805 */
4806 vport->txvlan_cfg.accept_tag2 = true;
4807 vport->txvlan_cfg.accept_untag2 = true;
4808
4809 vport->txvlan_cfg.insert_tag1_en = false;
4810 vport->txvlan_cfg.insert_tag2_en = false;
4811 vport->txvlan_cfg.default_tag1 = 0;
4812 vport->txvlan_cfg.default_tag2 = 0;
4813
4814 ret = hclge_set_vlan_tx_offload_cfg(vport);
4815 if (ret)
4816 return ret;
4817
4818 vport->rxvlan_cfg.strip_tag1_en = false;
4819 vport->rxvlan_cfg.strip_tag2_en = true;
4820 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
4821 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
4822
4823 ret = hclge_set_vlan_rx_offload_cfg(vport);
4824 if (ret)
4825 return ret;
4826 }
4827
4828 handle = &hdev->vport[0].nic;
4829 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
4830 }
4831
4832 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
4833 {
4834 struct hclge_vport *vport = hclge_get_vport(handle);
4835
4836 vport->rxvlan_cfg.strip_tag1_en = false;
4837 vport->rxvlan_cfg.strip_tag2_en = enable;
4838 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
4839 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
4840
4841 return hclge_set_vlan_rx_offload_cfg(vport);
4842 }
4843
4844 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu)
4845 {
4846 struct hclge_config_max_frm_size_cmd *req;
4847 struct hclge_desc desc;
4848 int max_frm_size;
4849 int ret;
4850
4851 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
4852
4853 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
4854 max_frm_size > HCLGE_MAC_MAX_FRAME)
4855 return -EINVAL;
4856
4857 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
4858
4859 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
4860
4861 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
4862 req->max_frm_size = cpu_to_le16(max_frm_size);
4863
4864 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4865 if (ret) {
4866 dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
4867 return ret;
4868 }
4869
4870 hdev->mps = max_frm_size;
4871
4872 return 0;
4873 }
4874
4875 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
4876 {
4877 struct hclge_vport *vport = hclge_get_vport(handle);
4878 struct hclge_dev *hdev = vport->back;
4879 int ret;
4880
4881 ret = hclge_set_mac_mtu(hdev, new_mtu);
4882 if (ret) {
4883 dev_err(&hdev->pdev->dev,
4884 "Change mtu fail, ret =%d\n", ret);
4885 return ret;
4886 }
4887
4888 ret = hclge_buffer_alloc(hdev);
4889 if (ret)
4890 dev_err(&hdev->pdev->dev,
4891 "Allocate buffer fail, ret =%d\n", ret);
4892
4893 return ret;
4894 }
4895
4896 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
4897 bool enable)
4898 {
4899 struct hclge_reset_tqp_queue_cmd *req;
4900 struct hclge_desc desc;
4901 int ret;
4902
4903 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
4904
4905 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
4906 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
4907 hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
4908
4909 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4910 if (ret) {
4911 dev_err(&hdev->pdev->dev,
4912 "Send tqp reset cmd error, status =%d\n", ret);
4913 return ret;
4914 }
4915
4916 return 0;
4917 }
4918
4919 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
4920 {
4921 struct hclge_reset_tqp_queue_cmd *req;
4922 struct hclge_desc desc;
4923 int ret;
4924
4925 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
4926
4927 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
4928 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
4929
4930 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4931 if (ret) {
4932 dev_err(&hdev->pdev->dev,
4933 "Get reset status error, status =%d\n", ret);
4934 return ret;
4935 }
4936
4937 return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
4938 }
4939
4940 static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
4941 u16 queue_id)
4942 {
4943 struct hnae3_queue *queue;
4944 struct hclge_tqp *tqp;
4945
4946 queue = handle->kinfo.tqp[queue_id];
4947 tqp = container_of(queue, struct hclge_tqp, q);
4948
4949 return tqp->index;
4950 }
4951
4952 void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
4953 {
4954 struct hclge_vport *vport = hclge_get_vport(handle);
4955 struct hclge_dev *hdev = vport->back;
4956 int reset_try_times = 0;
4957 int reset_status;
4958 u16 queue_gid;
4959 int ret;
4960
4961 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4962 return;
4963
4964 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
4965
4966 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
4967 if (ret) {
4968 dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
4969 return;
4970 }
4971
4972 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
4973 if (ret) {
4974 dev_warn(&hdev->pdev->dev,
4975 "Send reset tqp cmd fail, ret = %d\n", ret);
4976 return;
4977 }
4978
4979 reset_try_times = 0;
4980 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
4981 /* Wait for tqp hw reset */
4982 msleep(20);
4983 reset_status = hclge_get_reset_status(hdev, queue_gid);
4984 if (reset_status)
4985 break;
4986 }
4987
4988 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
4989 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
4990 return;
4991 }
4992
4993 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
4994 if (ret) {
4995 dev_warn(&hdev->pdev->dev,
4996 "Deassert the soft reset fail, ret = %d\n", ret);
4997 return;
4998 }
4999 }
5000
5001 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
5002 {
5003 struct hclge_dev *hdev = vport->back;
5004 int reset_try_times = 0;
5005 int reset_status;
5006 u16 queue_gid;
5007 int ret;
5008
5009 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
5010
5011 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
5012 if (ret) {
5013 dev_warn(&hdev->pdev->dev,
5014 "Send reset tqp cmd fail, ret = %d\n", ret);
5015 return;
5016 }
5017
5018 reset_try_times = 0;
5019 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
5020 /* Wait for tqp hw reset */
5021 msleep(20);
5022 reset_status = hclge_get_reset_status(hdev, queue_gid);
5023 if (reset_status)
5024 break;
5025 }
5026
5027 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
5028 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
5029 return;
5030 }
5031
5032 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
5033 if (ret)
5034 dev_warn(&hdev->pdev->dev,
5035 "Deassert the soft reset fail, ret = %d\n", ret);
5036 }
5037
5038 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
5039 {
5040 struct hclge_vport *vport = hclge_get_vport(handle);
5041 struct hclge_dev *hdev = vport->back;
5042
5043 return hdev->fw_version;
5044 }
5045
5046 static void hclge_get_flowctrl_adv(struct hnae3_handle *handle,
5047 u32 *flowctrl_adv)
5048 {
5049 struct hclge_vport *vport = hclge_get_vport(handle);
5050 struct hclge_dev *hdev = vport->back;
5051 struct phy_device *phydev = hdev->hw.mac.phydev;
5052
5053 if (!phydev)
5054 return;
5055
5056 *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) |
5057 (phydev->advertising & ADVERTISED_Asym_Pause);
5058 }
5059
5060 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
5061 {
5062 struct phy_device *phydev = hdev->hw.mac.phydev;
5063
5064 if (!phydev)
5065 return;
5066
5067 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
5068
5069 if (rx_en)
5070 phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
5071
5072 if (tx_en)
5073 phydev->advertising ^= ADVERTISED_Asym_Pause;
5074 }
5075
5076 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
5077 {
5078 int ret;
5079
5080 if (rx_en && tx_en)
5081 hdev->fc_mode_last_time = HCLGE_FC_FULL;
5082 else if (rx_en && !tx_en)
5083 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
5084 else if (!rx_en && tx_en)
5085 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
5086 else
5087 hdev->fc_mode_last_time = HCLGE_FC_NONE;
5088
5089 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
5090 return 0;
5091
5092 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
5093 if (ret) {
5094 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
5095 ret);
5096 return ret;
5097 }
5098
5099 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
5100
5101 return 0;
5102 }
5103
5104 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
5105 {
5106 struct phy_device *phydev = hdev->hw.mac.phydev;
5107 u16 remote_advertising = 0;
5108 u16 local_advertising = 0;
5109 u32 rx_pause, tx_pause;
5110 u8 flowctl;
5111
5112 if (!phydev->link || !phydev->autoneg)
5113 return 0;
5114
5115 if (phydev->advertising & ADVERTISED_Pause)
5116 local_advertising = ADVERTISE_PAUSE_CAP;
5117
5118 if (phydev->advertising & ADVERTISED_Asym_Pause)
5119 local_advertising |= ADVERTISE_PAUSE_ASYM;
5120
5121 if (phydev->pause)
5122 remote_advertising = LPA_PAUSE_CAP;
5123
5124 if (phydev->asym_pause)
5125 remote_advertising |= LPA_PAUSE_ASYM;
5126
5127 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
5128 remote_advertising);
5129 tx_pause = flowctl & FLOW_CTRL_TX;
5130 rx_pause = flowctl & FLOW_CTRL_RX;
5131
5132 if (phydev->duplex == HCLGE_MAC_HALF) {
5133 tx_pause = 0;
5134 rx_pause = 0;
5135 }
5136
5137 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
5138 }
5139
5140 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
5141 u32 *rx_en, u32 *tx_en)
5142 {
5143 struct hclge_vport *vport = hclge_get_vport(handle);
5144 struct hclge_dev *hdev = vport->back;
5145
5146 *auto_neg = hclge_get_autoneg(handle);
5147
5148 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
5149 *rx_en = 0;
5150 *tx_en = 0;
5151 return;
5152 }
5153
5154 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
5155 *rx_en = 1;
5156 *tx_en = 0;
5157 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
5158 *tx_en = 1;
5159 *rx_en = 0;
5160 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
5161 *rx_en = 1;
5162 *tx_en = 1;
5163 } else {
5164 *rx_en = 0;
5165 *tx_en = 0;
5166 }
5167 }
5168
5169 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
5170 u32 rx_en, u32 tx_en)
5171 {
5172 struct hclge_vport *vport = hclge_get_vport(handle);
5173 struct hclge_dev *hdev = vport->back;
5174 struct phy_device *phydev = hdev->hw.mac.phydev;
5175 u32 fc_autoneg;
5176
5177 fc_autoneg = hclge_get_autoneg(handle);
5178 if (auto_neg != fc_autoneg) {
5179 dev_info(&hdev->pdev->dev,
5180 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
5181 return -EOPNOTSUPP;
5182 }
5183
5184 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
5185 dev_info(&hdev->pdev->dev,
5186 "Priority flow control enabled. Cannot set link flow control.\n");
5187 return -EOPNOTSUPP;
5188 }
5189
5190 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
5191
5192 if (!fc_autoneg)
5193 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
5194
5195 /* Only support flow control negotiation for netdev with
5196 * phy attached for now.
5197 */
5198 if (!phydev)
5199 return -EOPNOTSUPP;
5200
5201 return phy_start_aneg(phydev);
5202 }
5203
5204 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
5205 u8 *auto_neg, u32 *speed, u8 *duplex)
5206 {
5207 struct hclge_vport *vport = hclge_get_vport(handle);
5208 struct hclge_dev *hdev = vport->back;
5209
5210 if (speed)
5211 *speed = hdev->hw.mac.speed;
5212 if (duplex)
5213 *duplex = hdev->hw.mac.duplex;
5214 if (auto_neg)
5215 *auto_neg = hdev->hw.mac.autoneg;
5216 }
5217
5218 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
5219 {
5220 struct hclge_vport *vport = hclge_get_vport(handle);
5221 struct hclge_dev *hdev = vport->back;
5222
5223 if (media_type)
5224 *media_type = hdev->hw.mac.media_type;
5225 }
5226
5227 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
5228 u8 *tp_mdix_ctrl, u8 *tp_mdix)
5229 {
5230 struct hclge_vport *vport = hclge_get_vport(handle);
5231 struct hclge_dev *hdev = vport->back;
5232 struct phy_device *phydev = hdev->hw.mac.phydev;
5233 int mdix_ctrl, mdix, retval, is_resolved;
5234
5235 if (!phydev) {
5236 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
5237 *tp_mdix = ETH_TP_MDI_INVALID;
5238 return;
5239 }
5240
5241 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
5242
5243 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
5244 mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
5245 HCLGE_PHY_MDIX_CTRL_S);
5246
5247 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
5248 mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
5249 is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
5250
5251 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
5252
5253 switch (mdix_ctrl) {
5254 case 0x0:
5255 *tp_mdix_ctrl = ETH_TP_MDI;
5256 break;
5257 case 0x1:
5258 *tp_mdix_ctrl = ETH_TP_MDI_X;
5259 break;
5260 case 0x3:
5261 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
5262 break;
5263 default:
5264 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
5265 break;
5266 }
5267
5268 if (!is_resolved)
5269 *tp_mdix = ETH_TP_MDI_INVALID;
5270 else if (mdix)
5271 *tp_mdix = ETH_TP_MDI_X;
5272 else
5273 *tp_mdix = ETH_TP_MDI;
5274 }
5275
5276 static int hclge_init_client_instance(struct hnae3_client *client,
5277 struct hnae3_ae_dev *ae_dev)
5278 {
5279 struct hclge_dev *hdev = ae_dev->priv;
5280 struct hclge_vport *vport;
5281 int i, ret;
5282
5283 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
5284 vport = &hdev->vport[i];
5285
5286 switch (client->type) {
5287 case HNAE3_CLIENT_KNIC:
5288
5289 hdev->nic_client = client;
5290 vport->nic.client = client;
5291 ret = client->ops->init_instance(&vport->nic);
5292 if (ret)
5293 return ret;
5294
5295 if (hdev->roce_client &&
5296 hnae3_dev_roce_supported(hdev)) {
5297 struct hnae3_client *rc = hdev->roce_client;
5298
5299 ret = hclge_init_roce_base_info(vport);
5300 if (ret)
5301 return ret;
5302
5303 ret = rc->ops->init_instance(&vport->roce);
5304 if (ret)
5305 return ret;
5306 }
5307
5308 break;
5309 case HNAE3_CLIENT_UNIC:
5310 hdev->nic_client = client;
5311 vport->nic.client = client;
5312
5313 ret = client->ops->init_instance(&vport->nic);
5314 if (ret)
5315 return ret;
5316
5317 break;
5318 case HNAE3_CLIENT_ROCE:
5319 if (hnae3_dev_roce_supported(hdev)) {
5320 hdev->roce_client = client;
5321 vport->roce.client = client;
5322 }
5323
5324 if (hdev->roce_client && hdev->nic_client) {
5325 ret = hclge_init_roce_base_info(vport);
5326 if (ret)
5327 return ret;
5328
5329 ret = client->ops->init_instance(&vport->roce);
5330 if (ret)
5331 return ret;
5332 }
5333 }
5334 }
5335
5336 return 0;
5337 }
5338
5339 static void hclge_uninit_client_instance(struct hnae3_client *client,
5340 struct hnae3_ae_dev *ae_dev)
5341 {
5342 struct hclge_dev *hdev = ae_dev->priv;
5343 struct hclge_vport *vport;
5344 int i;
5345
5346 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
5347 vport = &hdev->vport[i];
5348 if (hdev->roce_client) {
5349 hdev->roce_client->ops->uninit_instance(&vport->roce,
5350 0);
5351 hdev->roce_client = NULL;
5352 vport->roce.client = NULL;
5353 }
5354 if (client->type == HNAE3_CLIENT_ROCE)
5355 return;
5356 if (client->ops->uninit_instance) {
5357 client->ops->uninit_instance(&vport->nic, 0);
5358 hdev->nic_client = NULL;
5359 vport->nic.client = NULL;
5360 }
5361 }
5362 }
5363
5364 static int hclge_pci_init(struct hclge_dev *hdev)
5365 {
5366 struct pci_dev *pdev = hdev->pdev;
5367 struct hclge_hw *hw;
5368 int ret;
5369
5370 ret = pci_enable_device(pdev);
5371 if (ret) {
5372 dev_err(&pdev->dev, "failed to enable PCI device\n");
5373 return ret;
5374 }
5375
5376 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
5377 if (ret) {
5378 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
5379 if (ret) {
5380 dev_err(&pdev->dev,
5381 "can't set consistent PCI DMA");
5382 goto err_disable_device;
5383 }
5384 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
5385 }
5386
5387 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
5388 if (ret) {
5389 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
5390 goto err_disable_device;
5391 }
5392
5393 pci_set_master(pdev);
5394 hw = &hdev->hw;
5395 hw->back = hdev;
5396 hw->io_base = pcim_iomap(pdev, 2, 0);
5397 if (!hw->io_base) {
5398 dev_err(&pdev->dev, "Can't map configuration register space\n");
5399 ret = -ENOMEM;
5400 goto err_clr_master;
5401 }
5402
5403 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
5404
5405 return 0;
5406 err_clr_master:
5407 pci_clear_master(pdev);
5408 pci_release_regions(pdev);
5409 err_disable_device:
5410 pci_disable_device(pdev);
5411
5412 return ret;
5413 }
5414
5415 static void hclge_pci_uninit(struct hclge_dev *hdev)
5416 {
5417 struct pci_dev *pdev = hdev->pdev;
5418
5419 pcim_iounmap(pdev, hdev->hw.io_base);
5420 pci_free_irq_vectors(pdev);
5421 pci_clear_master(pdev);
5422 pci_release_mem_regions(pdev);
5423 pci_disable_device(pdev);
5424 }
5425
5426 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
5427 {
5428 struct pci_dev *pdev = ae_dev->pdev;
5429 struct hclge_dev *hdev;
5430 int ret;
5431
5432 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
5433 if (!hdev) {
5434 ret = -ENOMEM;
5435 goto out;
5436 }
5437
5438 hdev->pdev = pdev;
5439 hdev->ae_dev = ae_dev;
5440 hdev->reset_type = HNAE3_NONE_RESET;
5441 hdev->reset_request = 0;
5442 hdev->reset_pending = 0;
5443 ae_dev->priv = hdev;
5444
5445 ret = hclge_pci_init(hdev);
5446 if (ret) {
5447 dev_err(&pdev->dev, "PCI init failed\n");
5448 goto out;
5449 }
5450
5451 /* Firmware command queue initialize */
5452 ret = hclge_cmd_queue_init(hdev);
5453 if (ret) {
5454 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
5455 goto err_pci_uninit;
5456 }
5457
5458 /* Firmware command initialize */
5459 ret = hclge_cmd_init(hdev);
5460 if (ret)
5461 goto err_cmd_uninit;
5462
5463 ret = hclge_get_cap(hdev);
5464 if (ret) {
5465 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
5466 ret);
5467 goto err_cmd_uninit;
5468 }
5469
5470 ret = hclge_configure(hdev);
5471 if (ret) {
5472 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
5473 goto err_cmd_uninit;
5474 }
5475
5476 ret = hclge_init_msi(hdev);
5477 if (ret) {
5478 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
5479 goto err_cmd_uninit;
5480 }
5481
5482 ret = hclge_misc_irq_init(hdev);
5483 if (ret) {
5484 dev_err(&pdev->dev,
5485 "Misc IRQ(vector0) init error, ret = %d.\n",
5486 ret);
5487 goto err_msi_uninit;
5488 }
5489
5490 ret = hclge_alloc_tqps(hdev);
5491 if (ret) {
5492 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
5493 goto err_msi_irq_uninit;
5494 }
5495
5496 ret = hclge_alloc_vport(hdev);
5497 if (ret) {
5498 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
5499 goto err_msi_irq_uninit;
5500 }
5501
5502 ret = hclge_map_tqp(hdev);
5503 if (ret) {
5504 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
5505 goto err_msi_irq_uninit;
5506 }
5507
5508 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
5509 ret = hclge_mac_mdio_config(hdev);
5510 if (ret) {
5511 dev_err(&hdev->pdev->dev,
5512 "mdio config fail ret=%d\n", ret);
5513 goto err_msi_irq_uninit;
5514 }
5515 }
5516
5517 ret = hclge_mac_init(hdev);
5518 if (ret) {
5519 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
5520 goto err_mdiobus_unreg;
5521 }
5522
5523 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
5524 if (ret) {
5525 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
5526 goto err_mdiobus_unreg;
5527 }
5528
5529 ret = hclge_init_vlan_config(hdev);
5530 if (ret) {
5531 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
5532 goto err_mdiobus_unreg;
5533 }
5534
5535 ret = hclge_tm_schd_init(hdev);
5536 if (ret) {
5537 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
5538 goto err_mdiobus_unreg;
5539 }
5540
5541 hclge_rss_init_cfg(hdev);
5542 ret = hclge_rss_init_hw(hdev);
5543 if (ret) {
5544 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
5545 goto err_mdiobus_unreg;
5546 }
5547
5548 ret = init_mgr_tbl(hdev);
5549 if (ret) {
5550 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
5551 goto err_mdiobus_unreg;
5552 }
5553
5554 hclge_dcb_ops_set(hdev);
5555
5556 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
5557 INIT_WORK(&hdev->service_task, hclge_service_task);
5558 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
5559 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
5560
5561 /* Enable MISC vector(vector0) */
5562 hclge_enable_vector(&hdev->misc_vector, true);
5563
5564 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
5565 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5566 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
5567 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5568 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
5569 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
5570
5571 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
5572 return 0;
5573
5574 err_mdiobus_unreg:
5575 if (hdev->hw.mac.phydev)
5576 mdiobus_unregister(hdev->hw.mac.mdio_bus);
5577 err_msi_irq_uninit:
5578 hclge_misc_irq_uninit(hdev);
5579 err_msi_uninit:
5580 pci_free_irq_vectors(pdev);
5581 err_cmd_uninit:
5582 hclge_destroy_cmd_queue(&hdev->hw);
5583 err_pci_uninit:
5584 pcim_iounmap(pdev, hdev->hw.io_base);
5585 pci_clear_master(pdev);
5586 pci_release_regions(pdev);
5587 pci_disable_device(pdev);
5588 out:
5589 return ret;
5590 }
5591
5592 static void hclge_stats_clear(struct hclge_dev *hdev)
5593 {
5594 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
5595 }
5596
5597 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
5598 {
5599 struct hclge_dev *hdev = ae_dev->priv;
5600 struct pci_dev *pdev = ae_dev->pdev;
5601 int ret;
5602
5603 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5604
5605 hclge_stats_clear(hdev);
5606 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
5607
5608 ret = hclge_cmd_init(hdev);
5609 if (ret) {
5610 dev_err(&pdev->dev, "Cmd queue init failed\n");
5611 return ret;
5612 }
5613
5614 ret = hclge_get_cap(hdev);
5615 if (ret) {
5616 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
5617 ret);
5618 return ret;
5619 }
5620
5621 ret = hclge_configure(hdev);
5622 if (ret) {
5623 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
5624 return ret;
5625 }
5626
5627 ret = hclge_map_tqp(hdev);
5628 if (ret) {
5629 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
5630 return ret;
5631 }
5632
5633 ret = hclge_mac_init(hdev);
5634 if (ret) {
5635 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
5636 return ret;
5637 }
5638
5639 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
5640 if (ret) {
5641 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
5642 return ret;
5643 }
5644
5645 ret = hclge_init_vlan_config(hdev);
5646 if (ret) {
5647 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
5648 return ret;
5649 }
5650
5651 ret = hclge_tm_init_hw(hdev);
5652 if (ret) {
5653 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
5654 return ret;
5655 }
5656
5657 ret = hclge_rss_init_hw(hdev);
5658 if (ret) {
5659 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
5660 return ret;
5661 }
5662
5663 /* Enable MISC vector(vector0) */
5664 hclge_enable_vector(&hdev->misc_vector, true);
5665
5666 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
5667 HCLGE_DRIVER_NAME);
5668
5669 return 0;
5670 }
5671
5672 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
5673 {
5674 struct hclge_dev *hdev = ae_dev->priv;
5675 struct hclge_mac *mac = &hdev->hw.mac;
5676
5677 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5678
5679 if (hdev->service_timer.function)
5680 del_timer_sync(&hdev->service_timer);
5681 if (hdev->service_task.func)
5682 cancel_work_sync(&hdev->service_task);
5683 if (hdev->rst_service_task.func)
5684 cancel_work_sync(&hdev->rst_service_task);
5685 if (hdev->mbx_service_task.func)
5686 cancel_work_sync(&hdev->mbx_service_task);
5687
5688 if (mac->phydev)
5689 mdiobus_unregister(mac->mdio_bus);
5690
5691 /* Disable MISC vector(vector0) */
5692 hclge_enable_vector(&hdev->misc_vector, false);
5693 hclge_destroy_cmd_queue(&hdev->hw);
5694 hclge_misc_irq_uninit(hdev);
5695 hclge_pci_uninit(hdev);
5696 ae_dev->priv = NULL;
5697 }
5698
5699 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
5700 {
5701 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
5702 struct hclge_vport *vport = hclge_get_vport(handle);
5703 struct hclge_dev *hdev = vport->back;
5704
5705 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
5706 }
5707
5708 static void hclge_get_channels(struct hnae3_handle *handle,
5709 struct ethtool_channels *ch)
5710 {
5711 struct hclge_vport *vport = hclge_get_vport(handle);
5712
5713 ch->max_combined = hclge_get_max_channels(handle);
5714 ch->other_count = 1;
5715 ch->max_other = 1;
5716 ch->combined_count = vport->alloc_tqps;
5717 }
5718
5719 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
5720 u16 *free_tqps, u16 *max_rss_size)
5721 {
5722 struct hclge_vport *vport = hclge_get_vport(handle);
5723 struct hclge_dev *hdev = vport->back;
5724 u16 temp_tqps = 0;
5725 int i;
5726
5727 for (i = 0; i < hdev->num_tqps; i++) {
5728 if (!hdev->htqp[i].alloced)
5729 temp_tqps++;
5730 }
5731 *free_tqps = temp_tqps;
5732 *max_rss_size = hdev->rss_size_max;
5733 }
5734
5735 static void hclge_release_tqp(struct hclge_vport *vport)
5736 {
5737 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5738 struct hclge_dev *hdev = vport->back;
5739 int i;
5740
5741 for (i = 0; i < kinfo->num_tqps; i++) {
5742 struct hclge_tqp *tqp =
5743 container_of(kinfo->tqp[i], struct hclge_tqp, q);
5744
5745 tqp->q.handle = NULL;
5746 tqp->q.tqp_index = 0;
5747 tqp->alloced = false;
5748 }
5749
5750 devm_kfree(&hdev->pdev->dev, kinfo->tqp);
5751 kinfo->tqp = NULL;
5752 }
5753
5754 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
5755 {
5756 struct hclge_vport *vport = hclge_get_vport(handle);
5757 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5758 struct hclge_dev *hdev = vport->back;
5759 int cur_rss_size = kinfo->rss_size;
5760 int cur_tqps = kinfo->num_tqps;
5761 u16 tc_offset[HCLGE_MAX_TC_NUM];
5762 u16 tc_valid[HCLGE_MAX_TC_NUM];
5763 u16 tc_size[HCLGE_MAX_TC_NUM];
5764 u16 roundup_size;
5765 u32 *rss_indir;
5766 int ret, i;
5767
5768 hclge_release_tqp(vport);
5769
5770 ret = hclge_knic_setup(vport, new_tqps_num);
5771 if (ret) {
5772 dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
5773 return ret;
5774 }
5775
5776 ret = hclge_map_tqp_to_vport(hdev, vport);
5777 if (ret) {
5778 dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
5779 return ret;
5780 }
5781
5782 ret = hclge_tm_schd_init(hdev);
5783 if (ret) {
5784 dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
5785 return ret;
5786 }
5787
5788 roundup_size = roundup_pow_of_two(kinfo->rss_size);
5789 roundup_size = ilog2(roundup_size);
5790 /* Set the RSS TC mode according to the new RSS size */
5791 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
5792 tc_valid[i] = 0;
5793
5794 if (!(hdev->hw_tc_map & BIT(i)))
5795 continue;
5796
5797 tc_valid[i] = 1;
5798 tc_size[i] = roundup_size;
5799 tc_offset[i] = kinfo->rss_size * i;
5800 }
5801 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5802 if (ret)
5803 return ret;
5804
5805 /* Reinitializes the rss indirect table according to the new RSS size */
5806 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
5807 if (!rss_indir)
5808 return -ENOMEM;
5809
5810 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
5811 rss_indir[i] = i % kinfo->rss_size;
5812
5813 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
5814 if (ret)
5815 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
5816 ret);
5817
5818 kfree(rss_indir);
5819
5820 if (!ret)
5821 dev_info(&hdev->pdev->dev,
5822 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
5823 cur_rss_size, kinfo->rss_size,
5824 cur_tqps, kinfo->rss_size * kinfo->num_tc);
5825
5826 return ret;
5827 }
5828
5829 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
5830 u32 *regs_num_64_bit)
5831 {
5832 struct hclge_desc desc;
5833 u32 total_num;
5834 int ret;
5835
5836 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
5837 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5838 if (ret) {
5839 dev_err(&hdev->pdev->dev,
5840 "Query register number cmd failed, ret = %d.\n", ret);
5841 return ret;
5842 }
5843
5844 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
5845 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
5846
5847 total_num = *regs_num_32_bit + *regs_num_64_bit;
5848 if (!total_num)
5849 return -EINVAL;
5850
5851 return 0;
5852 }
5853
5854 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
5855 void *data)
5856 {
5857 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
5858
5859 struct hclge_desc *desc;
5860 u32 *reg_val = data;
5861 __le32 *desc_data;
5862 int cmd_num;
5863 int i, k, n;
5864 int ret;
5865
5866 if (regs_num == 0)
5867 return 0;
5868
5869 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
5870 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
5871 if (!desc)
5872 return -ENOMEM;
5873
5874 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
5875 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
5876 if (ret) {
5877 dev_err(&hdev->pdev->dev,
5878 "Query 32 bit register cmd failed, ret = %d.\n", ret);
5879 kfree(desc);
5880 return ret;
5881 }
5882
5883 for (i = 0; i < cmd_num; i++) {
5884 if (i == 0) {
5885 desc_data = (__le32 *)(&desc[i].data[0]);
5886 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
5887 } else {
5888 desc_data = (__le32 *)(&desc[i]);
5889 n = HCLGE_32_BIT_REG_RTN_DATANUM;
5890 }
5891 for (k = 0; k < n; k++) {
5892 *reg_val++ = le32_to_cpu(*desc_data++);
5893
5894 regs_num--;
5895 if (!regs_num)
5896 break;
5897 }
5898 }
5899
5900 kfree(desc);
5901 return 0;
5902 }
5903
5904 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
5905 void *data)
5906 {
5907 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
5908
5909 struct hclge_desc *desc;
5910 u64 *reg_val = data;
5911 __le64 *desc_data;
5912 int cmd_num;
5913 int i, k, n;
5914 int ret;
5915
5916 if (regs_num == 0)
5917 return 0;
5918
5919 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
5920 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
5921 if (!desc)
5922 return -ENOMEM;
5923
5924 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
5925 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
5926 if (ret) {
5927 dev_err(&hdev->pdev->dev,
5928 "Query 64 bit register cmd failed, ret = %d.\n", ret);
5929 kfree(desc);
5930 return ret;
5931 }
5932
5933 for (i = 0; i < cmd_num; i++) {
5934 if (i == 0) {
5935 desc_data = (__le64 *)(&desc[i].data[0]);
5936 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
5937 } else {
5938 desc_data = (__le64 *)(&desc[i]);
5939 n = HCLGE_64_BIT_REG_RTN_DATANUM;
5940 }
5941 for (k = 0; k < n; k++) {
5942 *reg_val++ = le64_to_cpu(*desc_data++);
5943
5944 regs_num--;
5945 if (!regs_num)
5946 break;
5947 }
5948 }
5949
5950 kfree(desc);
5951 return 0;
5952 }
5953
5954 static int hclge_get_regs_len(struct hnae3_handle *handle)
5955 {
5956 struct hclge_vport *vport = hclge_get_vport(handle);
5957 struct hclge_dev *hdev = vport->back;
5958 u32 regs_num_32_bit, regs_num_64_bit;
5959 int ret;
5960
5961 ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
5962 if (ret) {
5963 dev_err(&hdev->pdev->dev,
5964 "Get register number failed, ret = %d.\n", ret);
5965 return -EOPNOTSUPP;
5966 }
5967
5968 return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
5969 }
5970
5971 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
5972 void *data)
5973 {
5974 struct hclge_vport *vport = hclge_get_vport(handle);
5975 struct hclge_dev *hdev = vport->back;
5976 u32 regs_num_32_bit, regs_num_64_bit;
5977 int ret;
5978
5979 *version = hdev->fw_version;
5980
5981 ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
5982 if (ret) {
5983 dev_err(&hdev->pdev->dev,
5984 "Get register number failed, ret = %d.\n", ret);
5985 return;
5986 }
5987
5988 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data);
5989 if (ret) {
5990 dev_err(&hdev->pdev->dev,
5991 "Get 32 bit register failed, ret = %d.\n", ret);
5992 return;
5993 }
5994
5995 data = (u32 *)data + regs_num_32_bit;
5996 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit,
5997 data);
5998 if (ret)
5999 dev_err(&hdev->pdev->dev,
6000 "Get 64 bit register failed, ret = %d.\n", ret);
6001 }
6002
6003 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
6004 {
6005 struct hclge_set_led_state_cmd *req;
6006 struct hclge_desc desc;
6007 int ret;
6008
6009 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
6010
6011 req = (struct hclge_set_led_state_cmd *)desc.data;
6012 hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
6013 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
6014
6015 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6016 if (ret)
6017 dev_err(&hdev->pdev->dev,
6018 "Send set led state cmd error, ret =%d\n", ret);
6019
6020 return ret;
6021 }
6022
6023 enum hclge_led_status {
6024 HCLGE_LED_OFF,
6025 HCLGE_LED_ON,
6026 HCLGE_LED_NO_CHANGE = 0xFF,
6027 };
6028
6029 static int hclge_set_led_id(struct hnae3_handle *handle,
6030 enum ethtool_phys_id_state status)
6031 {
6032 struct hclge_vport *vport = hclge_get_vport(handle);
6033 struct hclge_dev *hdev = vport->back;
6034
6035 switch (status) {
6036 case ETHTOOL_ID_ACTIVE:
6037 return hclge_set_led_status(hdev, HCLGE_LED_ON);
6038 case ETHTOOL_ID_INACTIVE:
6039 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
6040 default:
6041 return -EINVAL;
6042 }
6043 }
6044
6045 static void hclge_get_link_mode(struct hnae3_handle *handle,
6046 unsigned long *supported,
6047 unsigned long *advertising)
6048 {
6049 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
6050 struct hclge_vport *vport = hclge_get_vport(handle);
6051 struct hclge_dev *hdev = vport->back;
6052 unsigned int idx = 0;
6053
6054 for (; idx < size; idx++) {
6055 supported[idx] = hdev->hw.mac.supported[idx];
6056 advertising[idx] = hdev->hw.mac.advertising[idx];
6057 }
6058 }
6059
6060 static void hclge_get_port_type(struct hnae3_handle *handle,
6061 u8 *port_type)
6062 {
6063 struct hclge_vport *vport = hclge_get_vport(handle);
6064 struct hclge_dev *hdev = vport->back;
6065 u8 media_type = hdev->hw.mac.media_type;
6066
6067 switch (media_type) {
6068 case HNAE3_MEDIA_TYPE_FIBER:
6069 *port_type = PORT_FIBRE;
6070 break;
6071 case HNAE3_MEDIA_TYPE_COPPER:
6072 *port_type = PORT_TP;
6073 break;
6074 case HNAE3_MEDIA_TYPE_UNKNOWN:
6075 default:
6076 *port_type = PORT_OTHER;
6077 break;
6078 }
6079 }
6080
6081 static const struct hnae3_ae_ops hclge_ops = {
6082 .init_ae_dev = hclge_init_ae_dev,
6083 .uninit_ae_dev = hclge_uninit_ae_dev,
6084 .init_client_instance = hclge_init_client_instance,
6085 .uninit_client_instance = hclge_uninit_client_instance,
6086 .map_ring_to_vector = hclge_map_ring_to_vector,
6087 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
6088 .get_vector = hclge_get_vector,
6089 .put_vector = hclge_put_vector,
6090 .set_promisc_mode = hclge_set_promisc_mode,
6091 .set_loopback = hclge_set_loopback,
6092 .start = hclge_ae_start,
6093 .stop = hclge_ae_stop,
6094 .get_status = hclge_get_status,
6095 .get_ksettings_an_result = hclge_get_ksettings_an_result,
6096 .update_speed_duplex_h = hclge_update_speed_duplex_h,
6097 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
6098 .get_media_type = hclge_get_media_type,
6099 .get_rss_key_size = hclge_get_rss_key_size,
6100 .get_rss_indir_size = hclge_get_rss_indir_size,
6101 .get_rss = hclge_get_rss,
6102 .set_rss = hclge_set_rss,
6103 .set_rss_tuple = hclge_set_rss_tuple,
6104 .get_rss_tuple = hclge_get_rss_tuple,
6105 .get_tc_size = hclge_get_tc_size,
6106 .get_mac_addr = hclge_get_mac_addr,
6107 .set_mac_addr = hclge_set_mac_addr,
6108 .add_uc_addr = hclge_add_uc_addr,
6109 .rm_uc_addr = hclge_rm_uc_addr,
6110 .add_mc_addr = hclge_add_mc_addr,
6111 .rm_mc_addr = hclge_rm_mc_addr,
6112 .set_autoneg = hclge_set_autoneg,
6113 .get_autoneg = hclge_get_autoneg,
6114 .get_pauseparam = hclge_get_pauseparam,
6115 .set_pauseparam = hclge_set_pauseparam,
6116 .set_mtu = hclge_set_mtu,
6117 .reset_queue = hclge_reset_tqp,
6118 .get_stats = hclge_get_stats,
6119 .update_stats = hclge_update_stats,
6120 .get_strings = hclge_get_strings,
6121 .get_sset_count = hclge_get_sset_count,
6122 .get_fw_version = hclge_get_fw_version,
6123 .get_mdix_mode = hclge_get_mdix_mode,
6124 .enable_vlan_filter = hclge_enable_vlan_filter,
6125 .set_vlan_filter = hclge_set_vlan_filter,
6126 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
6127 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
6128 .reset_event = hclge_reset_event,
6129 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
6130 .set_channels = hclge_set_channels,
6131 .get_channels = hclge_get_channels,
6132 .get_flowctrl_adv = hclge_get_flowctrl_adv,
6133 .get_regs_len = hclge_get_regs_len,
6134 .get_regs = hclge_get_regs,
6135 .set_led_id = hclge_set_led_id,
6136 .get_link_mode = hclge_get_link_mode,
6137 .get_port_type = hclge_get_port_type,
6138 };
6139
6140 static struct hnae3_ae_algo ae_algo = {
6141 .ops = &hclge_ops,
6142 .name = HCLGE_NAME,
6143 .pdev_id_table = ae_algo_pci_tbl,
6144 };
6145
6146 static int hclge_init(void)
6147 {
6148 pr_info("%s is initializing\n", HCLGE_NAME);
6149
6150 hnae3_register_ae_algo(&ae_algo);
6151
6152 return 0;
6153 }
6154
6155 static void hclge_exit(void)
6156 {
6157 hnae3_unregister_ae_algo(&ae_algo);
6158 }
6159 module_init(hclge_init);
6160 module_exit(hclge_exit);
6161
6162 MODULE_LICENSE("GPL");
6163 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
6164 MODULE_DESCRIPTION("HCLGE Driver");
6165 MODULE_VERSION(HCLGE_MOD_VERSION);