]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
Merge branch 'work.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 /*
2 * Copyright (c) 2016-2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10 #include <linux/acpi.h>
11 #include <linux/device.h>
12 #include <linux/etherdevice.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/pci.h>
19 #include <linux/platform_device.h>
20
21 #include "hclge_cmd.h"
22 #include "hclge_main.h"
23 #include "hclge_mdio.h"
24 #include "hclge_tm.h"
25 #include "hnae3.h"
26
27 #define HCLGE_NAME "hclge"
28 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
31 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
32
33 static int hclge_rss_init_hw(struct hclge_dev *hdev);
34 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
35 enum hclge_mta_dmac_sel_type mta_mac_sel,
36 bool enable);
37 static int hclge_init_vlan_config(struct hclge_dev *hdev);
38
39 static struct hnae3_ae_algo ae_algo;
40
41 static const struct pci_device_id ae_algo_pci_tbl[] = {
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
49 /* Required last entry */
50 {0, }
51 };
52
53 static const struct pci_device_id roce_pci_tbl[] = {
54 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
55 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
56 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
57 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
58 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
59 /* Required last entry */
60 {0, }
61 };
62
63 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
64 "Mac Loopback test",
65 "Serdes Loopback test",
66 "Phy Loopback test"
67 };
68
69 static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = {
70 {"igu_rx_oversize_pkt",
71 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)},
72 {"igu_rx_undersize_pkt",
73 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)},
74 {"igu_rx_out_all_pkt",
75 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)},
76 {"igu_rx_uni_pkt",
77 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)},
78 {"igu_rx_multi_pkt",
79 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)},
80 {"igu_rx_broad_pkt",
81 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)},
82 {"egu_tx_out_all_pkt",
83 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)},
84 {"egu_tx_uni_pkt",
85 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)},
86 {"egu_tx_multi_pkt",
87 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)},
88 {"egu_tx_broad_pkt",
89 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)},
90 {"ssu_ppp_mac_key_num",
91 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)},
92 {"ssu_ppp_host_key_num",
93 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)},
94 {"ppp_ssu_mac_rlt_num",
95 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)},
96 {"ppp_ssu_host_rlt_num",
97 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)},
98 {"ssu_tx_in_num",
99 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)},
100 {"ssu_tx_out_num",
101 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)},
102 {"ssu_rx_in_num",
103 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)},
104 {"ssu_rx_out_num",
105 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)}
106 };
107
108 static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = {
109 {"igu_rx_err_pkt",
110 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)},
111 {"igu_rx_no_eof_pkt",
112 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)},
113 {"igu_rx_no_sof_pkt",
114 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)},
115 {"egu_tx_1588_pkt",
116 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)},
117 {"ssu_full_drop_num",
118 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)},
119 {"ssu_part_drop_num",
120 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)},
121 {"ppp_key_drop_num",
122 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)},
123 {"ppp_rlt_drop_num",
124 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)},
125 {"ssu_key_drop_num",
126 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)},
127 {"pkt_curr_buf_cnt",
128 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)},
129 {"qcn_fb_rcv_cnt",
130 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)},
131 {"qcn_fb_drop_cnt",
132 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)},
133 {"qcn_fb_invaild_cnt",
134 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)},
135 {"rx_packet_tc0_in_cnt",
136 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)},
137 {"rx_packet_tc1_in_cnt",
138 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)},
139 {"rx_packet_tc2_in_cnt",
140 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)},
141 {"rx_packet_tc3_in_cnt",
142 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)},
143 {"rx_packet_tc4_in_cnt",
144 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)},
145 {"rx_packet_tc5_in_cnt",
146 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)},
147 {"rx_packet_tc6_in_cnt",
148 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)},
149 {"rx_packet_tc7_in_cnt",
150 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)},
151 {"rx_packet_tc0_out_cnt",
152 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)},
153 {"rx_packet_tc1_out_cnt",
154 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)},
155 {"rx_packet_tc2_out_cnt",
156 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)},
157 {"rx_packet_tc3_out_cnt",
158 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)},
159 {"rx_packet_tc4_out_cnt",
160 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)},
161 {"rx_packet_tc5_out_cnt",
162 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)},
163 {"rx_packet_tc6_out_cnt",
164 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)},
165 {"rx_packet_tc7_out_cnt",
166 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)},
167 {"tx_packet_tc0_in_cnt",
168 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)},
169 {"tx_packet_tc1_in_cnt",
170 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)},
171 {"tx_packet_tc2_in_cnt",
172 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)},
173 {"tx_packet_tc3_in_cnt",
174 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)},
175 {"tx_packet_tc4_in_cnt",
176 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)},
177 {"tx_packet_tc5_in_cnt",
178 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)},
179 {"tx_packet_tc6_in_cnt",
180 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)},
181 {"tx_packet_tc7_in_cnt",
182 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)},
183 {"tx_packet_tc0_out_cnt",
184 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)},
185 {"tx_packet_tc1_out_cnt",
186 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)},
187 {"tx_packet_tc2_out_cnt",
188 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)},
189 {"tx_packet_tc3_out_cnt",
190 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)},
191 {"tx_packet_tc4_out_cnt",
192 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)},
193 {"tx_packet_tc5_out_cnt",
194 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)},
195 {"tx_packet_tc6_out_cnt",
196 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)},
197 {"tx_packet_tc7_out_cnt",
198 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)},
199 {"pkt_curr_buf_tc0_cnt",
200 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)},
201 {"pkt_curr_buf_tc1_cnt",
202 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)},
203 {"pkt_curr_buf_tc2_cnt",
204 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)},
205 {"pkt_curr_buf_tc3_cnt",
206 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)},
207 {"pkt_curr_buf_tc4_cnt",
208 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)},
209 {"pkt_curr_buf_tc5_cnt",
210 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)},
211 {"pkt_curr_buf_tc6_cnt",
212 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)},
213 {"pkt_curr_buf_tc7_cnt",
214 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)},
215 {"mb_uncopy_num",
216 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)},
217 {"lo_pri_unicast_rlt_drop_num",
218 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)},
219 {"hi_pri_multicast_rlt_drop_num",
220 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)},
221 {"lo_pri_multicast_rlt_drop_num",
222 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)},
223 {"rx_oq_drop_pkt_cnt",
224 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)},
225 {"tx_oq_drop_pkt_cnt",
226 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)},
227 {"nic_l2_err_drop_pkt_cnt",
228 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)},
229 {"roc_l2_err_drop_pkt_cnt",
230 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)}
231 };
232
233 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
234 {"mac_tx_mac_pause_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
236 {"mac_rx_mac_pause_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
238 {"mac_tx_pfc_pri0_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
240 {"mac_tx_pfc_pri1_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
242 {"mac_tx_pfc_pri2_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
244 {"mac_tx_pfc_pri3_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
246 {"mac_tx_pfc_pri4_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
248 {"mac_tx_pfc_pri5_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
250 {"mac_tx_pfc_pri6_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
252 {"mac_tx_pfc_pri7_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
254 {"mac_rx_pfc_pri0_pkt_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
256 {"mac_rx_pfc_pri1_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
258 {"mac_rx_pfc_pri2_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
260 {"mac_rx_pfc_pri3_pkt_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
262 {"mac_rx_pfc_pri4_pkt_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
264 {"mac_rx_pfc_pri5_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
266 {"mac_rx_pfc_pri6_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
268 {"mac_rx_pfc_pri7_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
270 {"mac_tx_total_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
272 {"mac_tx_total_oct_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
274 {"mac_tx_good_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
276 {"mac_tx_bad_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
278 {"mac_tx_good_oct_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
280 {"mac_tx_bad_oct_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
282 {"mac_tx_uni_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
284 {"mac_tx_multi_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
286 {"mac_tx_broad_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
288 {"mac_tx_undersize_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
290 {"mac_tx_overrsize_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)},
292 {"mac_tx_64_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
294 {"mac_tx_65_127_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
296 {"mac_tx_128_255_oct_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
298 {"mac_tx_256_511_oct_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
300 {"mac_tx_512_1023_oct_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
302 {"mac_tx_1024_1518_oct_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
304 {"mac_tx_1519_max_oct_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)},
306 {"mac_rx_total_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
308 {"mac_rx_total_oct_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
310 {"mac_rx_good_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
312 {"mac_rx_bad_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
314 {"mac_rx_good_oct_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
316 {"mac_rx_bad_oct_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
318 {"mac_rx_uni_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
320 {"mac_rx_multi_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
322 {"mac_rx_broad_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
324 {"mac_rx_undersize_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
326 {"mac_rx_overrsize_pkt_num",
327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)},
328 {"mac_rx_64_oct_pkt_num",
329 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
330 {"mac_rx_65_127_oct_pkt_num",
331 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
332 {"mac_rx_128_255_oct_pkt_num",
333 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
334 {"mac_rx_256_511_oct_pkt_num",
335 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
336 {"mac_rx_512_1023_oct_pkt_num",
337 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
338 {"mac_rx_1024_1518_oct_pkt_num",
339 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
340 {"mac_rx_1519_max_oct_pkt_num",
341 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)},
342
343 {"mac_trans_fragment_pkt_num",
344 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)},
345 {"mac_trans_undermin_pkt_num",
346 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)},
347 {"mac_trans_jabber_pkt_num",
348 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)},
349 {"mac_trans_err_all_pkt_num",
350 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)},
351 {"mac_trans_from_app_good_pkt_num",
352 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)},
353 {"mac_trans_from_app_bad_pkt_num",
354 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)},
355 {"mac_rcv_fragment_pkt_num",
356 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)},
357 {"mac_rcv_undermin_pkt_num",
358 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)},
359 {"mac_rcv_jabber_pkt_num",
360 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)},
361 {"mac_rcv_fcs_err_pkt_num",
362 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)},
363 {"mac_rcv_send_app_good_pkt_num",
364 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)},
365 {"mac_rcv_send_app_bad_pkt_num",
366 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)}
367 };
368
369 static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
370 {
371 #define HCLGE_64_BIT_CMD_NUM 5
372 #define HCLGE_64_BIT_RTN_DATANUM 4
373 u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats);
374 struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM];
375 u64 *desc_data;
376 int i, k, n;
377 int ret;
378
379 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true);
380 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM);
381 if (ret) {
382 dev_err(&hdev->pdev->dev,
383 "Get 64 bit pkt stats fail, status = %d.\n", ret);
384 return ret;
385 }
386
387 for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) {
388 if (unlikely(i == 0)) {
389 desc_data = (u64 *)(&desc[i].data[0]);
390 n = HCLGE_64_BIT_RTN_DATANUM - 1;
391 } else {
392 desc_data = (u64 *)(&desc[i]);
393 n = HCLGE_64_BIT_RTN_DATANUM;
394 }
395 for (k = 0; k < n; k++) {
396 *data++ += cpu_to_le64(*desc_data);
397 desc_data++;
398 }
399 }
400
401 return 0;
402 }
403
404 static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats)
405 {
406 stats->pkt_curr_buf_cnt = 0;
407 stats->pkt_curr_buf_tc0_cnt = 0;
408 stats->pkt_curr_buf_tc1_cnt = 0;
409 stats->pkt_curr_buf_tc2_cnt = 0;
410 stats->pkt_curr_buf_tc3_cnt = 0;
411 stats->pkt_curr_buf_tc4_cnt = 0;
412 stats->pkt_curr_buf_tc5_cnt = 0;
413 stats->pkt_curr_buf_tc6_cnt = 0;
414 stats->pkt_curr_buf_tc7_cnt = 0;
415 }
416
417 static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
418 {
419 #define HCLGE_32_BIT_CMD_NUM 8
420 #define HCLGE_32_BIT_RTN_DATANUM 8
421
422 struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM];
423 struct hclge_32_bit_stats *all_32_bit_stats;
424 u32 *desc_data;
425 int i, k, n;
426 u64 *data;
427 int ret;
428
429 all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats;
430 data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt);
431
432 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true);
433 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM);
434 if (ret) {
435 dev_err(&hdev->pdev->dev,
436 "Get 32 bit pkt stats fail, status = %d.\n", ret);
437
438 return ret;
439 }
440
441 hclge_reset_partial_32bit_counter(all_32_bit_stats);
442 for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) {
443 if (unlikely(i == 0)) {
444 all_32_bit_stats->igu_rx_err_pkt +=
445 cpu_to_le32(desc[i].data[0]);
446 all_32_bit_stats->igu_rx_no_eof_pkt +=
447 cpu_to_le32(desc[i].data[1] & 0xffff);
448 all_32_bit_stats->igu_rx_no_sof_pkt +=
449 cpu_to_le32((desc[i].data[1] >> 16) & 0xffff);
450
451 desc_data = (u32 *)(&desc[i].data[2]);
452 n = HCLGE_32_BIT_RTN_DATANUM - 4;
453 } else {
454 desc_data = (u32 *)(&desc[i]);
455 n = HCLGE_32_BIT_RTN_DATANUM;
456 }
457 for (k = 0; k < n; k++) {
458 *data++ += cpu_to_le32(*desc_data);
459 desc_data++;
460 }
461 }
462
463 return 0;
464 }
465
466 static int hclge_mac_update_stats(struct hclge_dev *hdev)
467 {
468 #define HCLGE_MAC_CMD_NUM 17
469 #define HCLGE_RTN_DATA_NUM 4
470
471 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
472 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
473 u64 *desc_data;
474 int i, k, n;
475 int ret;
476
477 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
478 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
479 if (ret) {
480 dev_err(&hdev->pdev->dev,
481 "Get MAC pkt stats fail, status = %d.\n", ret);
482
483 return ret;
484 }
485
486 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
487 if (unlikely(i == 0)) {
488 desc_data = (u64 *)(&desc[i].data[0]);
489 n = HCLGE_RTN_DATA_NUM - 2;
490 } else {
491 desc_data = (u64 *)(&desc[i]);
492 n = HCLGE_RTN_DATA_NUM;
493 }
494 for (k = 0; k < n; k++) {
495 *data++ += cpu_to_le64(*desc_data);
496 desc_data++;
497 }
498 }
499
500 return 0;
501 }
502
503 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
504 {
505 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
506 struct hclge_vport *vport = hclge_get_vport(handle);
507 struct hclge_dev *hdev = vport->back;
508 struct hnae3_queue *queue;
509 struct hclge_desc desc[1];
510 struct hclge_tqp *tqp;
511 int ret, i;
512
513 for (i = 0; i < kinfo->num_tqps; i++) {
514 queue = handle->kinfo.tqp[i];
515 tqp = container_of(queue, struct hclge_tqp, q);
516 /* command : HCLGE_OPC_QUERY_IGU_STAT */
517 hclge_cmd_setup_basic_desc(&desc[0],
518 HCLGE_OPC_QUERY_RX_STATUS,
519 true);
520
521 desc[0].data[0] = (tqp->index & 0x1ff);
522 ret = hclge_cmd_send(&hdev->hw, desc, 1);
523 if (ret) {
524 dev_err(&hdev->pdev->dev,
525 "Query tqp stat fail, status = %d,queue = %d\n",
526 ret, i);
527 return ret;
528 }
529 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
530 cpu_to_le32(desc[0].data[4]);
531 }
532
533 for (i = 0; i < kinfo->num_tqps; i++) {
534 queue = handle->kinfo.tqp[i];
535 tqp = container_of(queue, struct hclge_tqp, q);
536 /* command : HCLGE_OPC_QUERY_IGU_STAT */
537 hclge_cmd_setup_basic_desc(&desc[0],
538 HCLGE_OPC_QUERY_TX_STATUS,
539 true);
540
541 desc[0].data[0] = (tqp->index & 0x1ff);
542 ret = hclge_cmd_send(&hdev->hw, desc, 1);
543 if (ret) {
544 dev_err(&hdev->pdev->dev,
545 "Query tqp stat fail, status = %d,queue = %d\n",
546 ret, i);
547 return ret;
548 }
549 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
550 cpu_to_le32(desc[0].data[4]);
551 }
552
553 return 0;
554 }
555
556 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
557 {
558 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
559 struct hclge_tqp *tqp;
560 u64 *buff = data;
561 int i;
562
563 for (i = 0; i < kinfo->num_tqps; i++) {
564 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
565 *buff++ = cpu_to_le64(tqp->tqp_stats.rcb_tx_ring_pktnum_rcd);
566 }
567
568 for (i = 0; i < kinfo->num_tqps; i++) {
569 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
570 *buff++ = cpu_to_le64(tqp->tqp_stats.rcb_rx_ring_pktnum_rcd);
571 }
572
573 return buff;
574 }
575
576 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
577 {
578 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
579
580 return kinfo->num_tqps * (2);
581 }
582
583 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
584 {
585 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
586 u8 *buff = data;
587 int i = 0;
588
589 for (i = 0; i < kinfo->num_tqps; i++) {
590 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
591 struct hclge_tqp, q);
592 snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd",
593 tqp->index);
594 buff = buff + ETH_GSTRING_LEN;
595 }
596
597 for (i = 0; i < kinfo->num_tqps; i++) {
598 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
599 struct hclge_tqp, q);
600 snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd",
601 tqp->index);
602 buff = buff + ETH_GSTRING_LEN;
603 }
604
605 return buff;
606 }
607
608 static u64 *hclge_comm_get_stats(void *comm_stats,
609 const struct hclge_comm_stats_str strs[],
610 int size, u64 *data)
611 {
612 u64 *buf = data;
613 u32 i;
614
615 for (i = 0; i < size; i++)
616 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
617
618 return buf + size;
619 }
620
621 static u8 *hclge_comm_get_strings(u32 stringset,
622 const struct hclge_comm_stats_str strs[],
623 int size, u8 *data)
624 {
625 char *buff = (char *)data;
626 u32 i;
627
628 if (stringset != ETH_SS_STATS)
629 return buff;
630
631 for (i = 0; i < size; i++) {
632 snprintf(buff, ETH_GSTRING_LEN,
633 strs[i].desc);
634 buff = buff + ETH_GSTRING_LEN;
635 }
636
637 return (u8 *)buff;
638 }
639
640 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
641 struct net_device_stats *net_stats)
642 {
643 net_stats->tx_dropped = 0;
644 net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num;
645 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
646 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
647
648 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
649 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
650 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt;
651 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
652 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
653 net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
654
655 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
656 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
657
658 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
659 net_stats->rx_length_errors =
660 hw_stats->mac_stats.mac_rx_undersize_pkt_num;
661 net_stats->rx_length_errors +=
662 hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
663 net_stats->rx_over_errors =
664 hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
665 }
666
667 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
668 {
669 struct hnae3_handle *handle;
670 int status;
671
672 handle = &hdev->vport[0].nic;
673 if (handle->client) {
674 status = hclge_tqps_update_stats(handle);
675 if (status) {
676 dev_err(&hdev->pdev->dev,
677 "Update TQPS stats fail, status = %d.\n",
678 status);
679 }
680 }
681
682 status = hclge_mac_update_stats(hdev);
683 if (status)
684 dev_err(&hdev->pdev->dev,
685 "Update MAC stats fail, status = %d.\n", status);
686
687 status = hclge_32_bit_update_stats(hdev);
688 if (status)
689 dev_err(&hdev->pdev->dev,
690 "Update 32 bit stats fail, status = %d.\n",
691 status);
692
693 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
694 }
695
696 static void hclge_update_stats(struct hnae3_handle *handle,
697 struct net_device_stats *net_stats)
698 {
699 struct hclge_vport *vport = hclge_get_vport(handle);
700 struct hclge_dev *hdev = vport->back;
701 struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
702 int status;
703
704 status = hclge_mac_update_stats(hdev);
705 if (status)
706 dev_err(&hdev->pdev->dev,
707 "Update MAC stats fail, status = %d.\n",
708 status);
709
710 status = hclge_32_bit_update_stats(hdev);
711 if (status)
712 dev_err(&hdev->pdev->dev,
713 "Update 32 bit stats fail, status = %d.\n",
714 status);
715
716 status = hclge_64_bit_update_stats(hdev);
717 if (status)
718 dev_err(&hdev->pdev->dev,
719 "Update 64 bit stats fail, status = %d.\n",
720 status);
721
722 status = hclge_tqps_update_stats(handle);
723 if (status)
724 dev_err(&hdev->pdev->dev,
725 "Update TQPS stats fail, status = %d.\n",
726 status);
727
728 hclge_update_netstat(hw_stats, net_stats);
729 }
730
731 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
732 {
733 #define HCLGE_LOOPBACK_TEST_FLAGS 0x7
734
735 struct hclge_vport *vport = hclge_get_vport(handle);
736 struct hclge_dev *hdev = vport->back;
737 int count = 0;
738
739 /* Loopback test support rules:
740 * mac: only GE mode support
741 * serdes: all mac mode will support include GE/XGE/LGE/CGE
742 * phy: only support when phy device exist on board
743 */
744 if (stringset == ETH_SS_TEST) {
745 /* clear loopback bit flags at first */
746 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
747 if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
748 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
749 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
750 count += 1;
751 handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK;
752 } else {
753 count = -EOPNOTSUPP;
754 }
755 } else if (stringset == ETH_SS_STATS) {
756 count = ARRAY_SIZE(g_mac_stats_string) +
757 ARRAY_SIZE(g_all_32bit_stats_string) +
758 ARRAY_SIZE(g_all_64bit_stats_string) +
759 hclge_tqps_get_sset_count(handle, stringset);
760 }
761
762 return count;
763 }
764
765 static void hclge_get_strings(struct hnae3_handle *handle,
766 u32 stringset,
767 u8 *data)
768 {
769 u8 *p = (char *)data;
770 int size;
771
772 if (stringset == ETH_SS_STATS) {
773 size = ARRAY_SIZE(g_mac_stats_string);
774 p = hclge_comm_get_strings(stringset,
775 g_mac_stats_string,
776 size,
777 p);
778 size = ARRAY_SIZE(g_all_32bit_stats_string);
779 p = hclge_comm_get_strings(stringset,
780 g_all_32bit_stats_string,
781 size,
782 p);
783 size = ARRAY_SIZE(g_all_64bit_stats_string);
784 p = hclge_comm_get_strings(stringset,
785 g_all_64bit_stats_string,
786 size,
787 p);
788 p = hclge_tqps_get_strings(handle, p);
789 } else if (stringset == ETH_SS_TEST) {
790 if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) {
791 memcpy(p,
792 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC],
793 ETH_GSTRING_LEN);
794 p += ETH_GSTRING_LEN;
795 }
796 if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) {
797 memcpy(p,
798 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES],
799 ETH_GSTRING_LEN);
800 p += ETH_GSTRING_LEN;
801 }
802 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
803 memcpy(p,
804 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY],
805 ETH_GSTRING_LEN);
806 p += ETH_GSTRING_LEN;
807 }
808 }
809 }
810
811 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
812 {
813 struct hclge_vport *vport = hclge_get_vport(handle);
814 struct hclge_dev *hdev = vport->back;
815 u64 *p;
816
817 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
818 g_mac_stats_string,
819 ARRAY_SIZE(g_mac_stats_string),
820 data);
821 p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats,
822 g_all_32bit_stats_string,
823 ARRAY_SIZE(g_all_32bit_stats_string),
824 p);
825 p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats,
826 g_all_64bit_stats_string,
827 ARRAY_SIZE(g_all_64bit_stats_string),
828 p);
829 p = hclge_tqps_get_stats(handle, p);
830 }
831
832 static int hclge_parse_func_status(struct hclge_dev *hdev,
833 struct hclge_func_status *status)
834 {
835 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
836 return -EINVAL;
837
838 /* Set the pf to main pf */
839 if (status->pf_state & HCLGE_PF_STATE_MAIN)
840 hdev->flag |= HCLGE_FLAG_MAIN;
841 else
842 hdev->flag &= ~HCLGE_FLAG_MAIN;
843
844 hdev->num_req_vfs = status->vf_num / status->pf_num;
845 return 0;
846 }
847
848 static int hclge_query_function_status(struct hclge_dev *hdev)
849 {
850 struct hclge_func_status *req;
851 struct hclge_desc desc;
852 int timeout = 0;
853 int ret;
854
855 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
856 req = (struct hclge_func_status *)desc.data;
857
858 do {
859 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
860 if (ret) {
861 dev_err(&hdev->pdev->dev,
862 "query function status failed %d.\n",
863 ret);
864
865 return ret;
866 }
867
868 /* Check pf reset is done */
869 if (req->pf_state)
870 break;
871 usleep_range(1000, 2000);
872 } while (timeout++ < 5);
873
874 ret = hclge_parse_func_status(hdev, req);
875
876 return ret;
877 }
878
879 static int hclge_query_pf_resource(struct hclge_dev *hdev)
880 {
881 struct hclge_pf_res *req;
882 struct hclge_desc desc;
883 int ret;
884
885 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
886 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
887 if (ret) {
888 dev_err(&hdev->pdev->dev,
889 "query pf resource failed %d.\n", ret);
890 return ret;
891 }
892
893 req = (struct hclge_pf_res *)desc.data;
894 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
895 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
896
897 if (hnae_get_bit(hdev->ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B)) {
898 hdev->num_roce_msix =
899 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
900 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
901
902 /* PF should have NIC vectors and Roce vectors,
903 * NIC vectors are queued before Roce vectors.
904 */
905 hdev->num_msi = hdev->num_roce_msix + HCLGE_ROCE_VECTOR_OFFSET;
906 } else {
907 hdev->num_msi =
908 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
909 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
910 }
911
912 return 0;
913 }
914
915 static int hclge_parse_speed(int speed_cmd, int *speed)
916 {
917 switch (speed_cmd) {
918 case 6:
919 *speed = HCLGE_MAC_SPEED_10M;
920 break;
921 case 7:
922 *speed = HCLGE_MAC_SPEED_100M;
923 break;
924 case 0:
925 *speed = HCLGE_MAC_SPEED_1G;
926 break;
927 case 1:
928 *speed = HCLGE_MAC_SPEED_10G;
929 break;
930 case 2:
931 *speed = HCLGE_MAC_SPEED_25G;
932 break;
933 case 3:
934 *speed = HCLGE_MAC_SPEED_40G;
935 break;
936 case 4:
937 *speed = HCLGE_MAC_SPEED_50G;
938 break;
939 case 5:
940 *speed = HCLGE_MAC_SPEED_100G;
941 break;
942 default:
943 return -EINVAL;
944 }
945
946 return 0;
947 }
948
949 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
950 {
951 struct hclge_cfg_param *req;
952 u64 mac_addr_tmp_high;
953 u64 mac_addr_tmp;
954 int i;
955
956 req = (struct hclge_cfg_param *)desc[0].data;
957
958 /* get the configuration */
959 cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]),
960 HCLGE_CFG_VMDQ_M,
961 HCLGE_CFG_VMDQ_S);
962 cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
963 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
964 cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
965 HCLGE_CFG_TQP_DESC_N_M,
966 HCLGE_CFG_TQP_DESC_N_S);
967
968 cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]),
969 HCLGE_CFG_PHY_ADDR_M,
970 HCLGE_CFG_PHY_ADDR_S);
971 cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]),
972 HCLGE_CFG_MEDIA_TP_M,
973 HCLGE_CFG_MEDIA_TP_S);
974 cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]),
975 HCLGE_CFG_RX_BUF_LEN_M,
976 HCLGE_CFG_RX_BUF_LEN_S);
977 /* get mac_address */
978 mac_addr_tmp = __le32_to_cpu(req->param[2]);
979 mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]),
980 HCLGE_CFG_MAC_ADDR_H_M,
981 HCLGE_CFG_MAC_ADDR_H_S);
982
983 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
984
985 cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
986 HCLGE_CFG_DEFAULT_SPEED_M,
987 HCLGE_CFG_DEFAULT_SPEED_S);
988 for (i = 0; i < ETH_ALEN; i++)
989 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
990
991 req = (struct hclge_cfg_param *)desc[1].data;
992 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
993 }
994
995 /* hclge_get_cfg: query the static parameter from flash
996 * @hdev: pointer to struct hclge_dev
997 * @hcfg: the config structure to be getted
998 */
999 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1000 {
1001 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1002 struct hclge_cfg_param *req;
1003 int i, ret;
1004
1005 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1006 req = (struct hclge_cfg_param *)desc[i].data;
1007 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1008 true);
1009 hnae_set_field(req->offset, HCLGE_CFG_OFFSET_M,
1010 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1011 /* Len should be united by 4 bytes when send to hardware */
1012 hnae_set_field(req->offset, HCLGE_CFG_RD_LEN_M,
1013 HCLGE_CFG_RD_LEN_S,
1014 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1015 req->offset = cpu_to_le32(req->offset);
1016 }
1017
1018 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1019 if (ret) {
1020 dev_err(&hdev->pdev->dev,
1021 "get config failed %d.\n", ret);
1022 return ret;
1023 }
1024
1025 hclge_parse_cfg(hcfg, desc);
1026 return 0;
1027 }
1028
1029 static int hclge_get_cap(struct hclge_dev *hdev)
1030 {
1031 int ret;
1032
1033 ret = hclge_query_function_status(hdev);
1034 if (ret) {
1035 dev_err(&hdev->pdev->dev,
1036 "query function status error %d.\n", ret);
1037 return ret;
1038 }
1039
1040 /* get pf resource */
1041 ret = hclge_query_pf_resource(hdev);
1042 if (ret) {
1043 dev_err(&hdev->pdev->dev,
1044 "query pf resource error %d.\n", ret);
1045 return ret;
1046 }
1047
1048 return 0;
1049 }
1050
1051 static int hclge_configure(struct hclge_dev *hdev)
1052 {
1053 struct hclge_cfg cfg;
1054 int ret, i;
1055
1056 ret = hclge_get_cfg(hdev, &cfg);
1057 if (ret) {
1058 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1059 return ret;
1060 }
1061
1062 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1063 hdev->base_tqp_pid = 0;
1064 hdev->rss_size_max = 1;
1065 hdev->rx_buf_len = cfg.rx_buf_len;
1066 for (i = 0; i < ETH_ALEN; i++)
1067 hdev->hw.mac.mac_addr[i] = cfg.mac_addr[i];
1068 hdev->hw.mac.media_type = cfg.media_type;
1069 hdev->num_desc = cfg.tqp_desc_num;
1070 hdev->tm_info.num_pg = 1;
1071 hdev->tm_info.num_tc = cfg.tc_num;
1072 hdev->tm_info.hw_pfc_map = 0;
1073
1074 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1075 if (ret) {
1076 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1077 return ret;
1078 }
1079
1080 if ((hdev->tm_info.num_tc > HNAE3_MAX_TC) ||
1081 (hdev->tm_info.num_tc < 1)) {
1082 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1083 hdev->tm_info.num_tc);
1084 hdev->tm_info.num_tc = 1;
1085 }
1086
1087 /* Currently not support uncontiuous tc */
1088 for (i = 0; i < cfg.tc_num; i++)
1089 hnae_set_bit(hdev->hw_tc_map, i, 1);
1090
1091 if (!hdev->num_vmdq_vport && !hdev->num_req_vfs)
1092 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1093 else
1094 hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE;
1095
1096 return ret;
1097 }
1098
1099 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1100 int tso_mss_max)
1101 {
1102 struct hclge_cfg_tso_status *req;
1103 struct hclge_desc desc;
1104
1105 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1106
1107 req = (struct hclge_cfg_tso_status *)desc.data;
1108 hnae_set_field(req->tso_mss_min, HCLGE_TSO_MSS_MIN_M,
1109 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1110 hnae_set_field(req->tso_mss_max, HCLGE_TSO_MSS_MIN_M,
1111 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1112
1113 return hclge_cmd_send(&hdev->hw, &desc, 1);
1114 }
1115
1116 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1117 {
1118 struct hclge_tqp *tqp;
1119 int i;
1120
1121 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1122 sizeof(struct hclge_tqp), GFP_KERNEL);
1123 if (!hdev->htqp)
1124 return -ENOMEM;
1125
1126 tqp = hdev->htqp;
1127
1128 for (i = 0; i < hdev->num_tqps; i++) {
1129 tqp->dev = &hdev->pdev->dev;
1130 tqp->index = i;
1131
1132 tqp->q.ae_algo = &ae_algo;
1133 tqp->q.buf_size = hdev->rx_buf_len;
1134 tqp->q.desc_num = hdev->num_desc;
1135 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1136 i * HCLGE_TQP_REG_SIZE;
1137
1138 tqp++;
1139 }
1140
1141 return 0;
1142 }
1143
1144 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1145 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1146 {
1147 struct hclge_tqp_map *req;
1148 struct hclge_desc desc;
1149 int ret;
1150
1151 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1152
1153 req = (struct hclge_tqp_map *)desc.data;
1154 req->tqp_id = cpu_to_le16(tqp_pid);
1155 req->tqp_vf = cpu_to_le16(func_id);
1156 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1157 1 << HCLGE_TQP_MAP_EN_B;
1158 req->tqp_vid = cpu_to_le16(tqp_vid);
1159
1160 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1161 if (ret) {
1162 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n",
1163 ret);
1164 return ret;
1165 }
1166
1167 return 0;
1168 }
1169
1170 static int hclge_assign_tqp(struct hclge_vport *vport,
1171 struct hnae3_queue **tqp, u16 num_tqps)
1172 {
1173 struct hclge_dev *hdev = vport->back;
1174 int i, alloced, func_id, ret;
1175 bool is_pf;
1176
1177 func_id = vport->vport_id;
1178 is_pf = (vport->vport_id == 0) ? true : false;
1179
1180 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1181 alloced < num_tqps; i++) {
1182 if (!hdev->htqp[i].alloced) {
1183 hdev->htqp[i].q.handle = &vport->nic;
1184 hdev->htqp[i].q.tqp_index = alloced;
1185 tqp[alloced] = &hdev->htqp[i].q;
1186 hdev->htqp[i].alloced = true;
1187 ret = hclge_map_tqps_to_func(hdev, func_id,
1188 hdev->htqp[i].index,
1189 alloced, is_pf);
1190 if (ret)
1191 return ret;
1192
1193 alloced++;
1194 }
1195 }
1196 vport->alloc_tqps = num_tqps;
1197
1198 return 0;
1199 }
1200
1201 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps)
1202 {
1203 struct hnae3_handle *nic = &vport->nic;
1204 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1205 struct hclge_dev *hdev = vport->back;
1206 int i, ret;
1207
1208 kinfo->num_desc = hdev->num_desc;
1209 kinfo->rx_buf_len = hdev->rx_buf_len;
1210 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1211 kinfo->rss_size
1212 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1213 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
1214
1215 for (i = 0; i < HNAE3_MAX_TC; i++) {
1216 if (hdev->hw_tc_map & BIT(i)) {
1217 kinfo->tc_info[i].enable = true;
1218 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1219 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1220 kinfo->tc_info[i].tc = i;
1221 } else {
1222 /* Set to default queue if TC is disable */
1223 kinfo->tc_info[i].enable = false;
1224 kinfo->tc_info[i].tqp_offset = 0;
1225 kinfo->tc_info[i].tqp_count = 1;
1226 kinfo->tc_info[i].tc = 0;
1227 }
1228 }
1229
1230 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1231 sizeof(struct hnae3_queue *), GFP_KERNEL);
1232 if (!kinfo->tqp)
1233 return -ENOMEM;
1234
1235 ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps);
1236 if (ret) {
1237 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1238 return -EINVAL;
1239 }
1240
1241 return 0;
1242 }
1243
1244 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1245 {
1246 /* this would be initialized later */
1247 }
1248
1249 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1250 {
1251 struct hnae3_handle *nic = &vport->nic;
1252 struct hclge_dev *hdev = vport->back;
1253 int ret;
1254
1255 nic->pdev = hdev->pdev;
1256 nic->ae_algo = &ae_algo;
1257 nic->numa_node_mask = hdev->numa_node_mask;
1258
1259 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1260 ret = hclge_knic_setup(vport, num_tqps);
1261 if (ret) {
1262 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1263 ret);
1264 return ret;
1265 }
1266 } else {
1267 hclge_unic_setup(vport, num_tqps);
1268 }
1269
1270 return 0;
1271 }
1272
1273 static int hclge_alloc_vport(struct hclge_dev *hdev)
1274 {
1275 struct pci_dev *pdev = hdev->pdev;
1276 struct hclge_vport *vport;
1277 u32 tqp_main_vport;
1278 u32 tqp_per_vport;
1279 int num_vport, i;
1280 int ret;
1281
1282 /* We need to alloc a vport for main NIC of PF */
1283 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1284
1285 if (hdev->num_tqps < num_vport)
1286 num_vport = hdev->num_tqps;
1287
1288 /* Alloc the same number of TQPs for every vport */
1289 tqp_per_vport = hdev->num_tqps / num_vport;
1290 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1291
1292 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1293 GFP_KERNEL);
1294 if (!vport)
1295 return -ENOMEM;
1296
1297 hdev->vport = vport;
1298 hdev->num_alloc_vport = num_vport;
1299
1300 #ifdef CONFIG_PCI_IOV
1301 /* Enable SRIOV */
1302 if (hdev->num_req_vfs) {
1303 dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n",
1304 hdev->num_req_vfs);
1305 ret = pci_enable_sriov(hdev->pdev, hdev->num_req_vfs);
1306 if (ret) {
1307 hdev->num_alloc_vfs = 0;
1308 dev_err(&pdev->dev, "SRIOV enable failed %d\n",
1309 ret);
1310 return ret;
1311 }
1312 }
1313 hdev->num_alloc_vfs = hdev->num_req_vfs;
1314 #endif
1315
1316 for (i = 0; i < num_vport; i++) {
1317 vport->back = hdev;
1318 vport->vport_id = i;
1319
1320 if (i == 0)
1321 ret = hclge_vport_setup(vport, tqp_main_vport);
1322 else
1323 ret = hclge_vport_setup(vport, tqp_per_vport);
1324 if (ret) {
1325 dev_err(&pdev->dev,
1326 "vport setup failed for vport %d, %d\n",
1327 i, ret);
1328 return ret;
1329 }
1330
1331 vport++;
1332 }
1333
1334 return 0;
1335 }
1336
1337 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, u16 buf_size)
1338 {
1339 /* TX buffer size is unit by 128 byte */
1340 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1341 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1342 struct hclge_tx_buff_alloc *req;
1343 struct hclge_desc desc;
1344 int ret;
1345 u8 i;
1346
1347 req = (struct hclge_tx_buff_alloc *)desc.data;
1348
1349 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1350 for (i = 0; i < HCLGE_TC_NUM; i++)
1351 req->tx_pkt_buff[i] =
1352 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1353 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1354
1355 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1356 if (ret) {
1357 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1358 ret);
1359 return ret;
1360 }
1361
1362 return 0;
1363 }
1364
1365 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, u32 buf_size)
1366 {
1367 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_size);
1368
1369 if (ret) {
1370 dev_err(&hdev->pdev->dev,
1371 "tx buffer alloc failed %d\n", ret);
1372 return ret;
1373 }
1374
1375 return 0;
1376 }
1377
1378 static int hclge_get_tc_num(struct hclge_dev *hdev)
1379 {
1380 int i, cnt = 0;
1381
1382 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1383 if (hdev->hw_tc_map & BIT(i))
1384 cnt++;
1385 return cnt;
1386 }
1387
1388 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1389 {
1390 int i, cnt = 0;
1391
1392 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1393 if (hdev->hw_tc_map & BIT(i) &&
1394 hdev->tm_info.hw_pfc_map & BIT(i))
1395 cnt++;
1396 return cnt;
1397 }
1398
1399 /* Get the number of pfc enabled TCs, which have private buffer */
1400 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev)
1401 {
1402 struct hclge_priv_buf *priv;
1403 int i, cnt = 0;
1404
1405 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1406 priv = &hdev->priv_buf[i];
1407 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1408 priv->enable)
1409 cnt++;
1410 }
1411
1412 return cnt;
1413 }
1414
1415 /* Get the number of pfc disabled TCs, which have private buffer */
1416 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev)
1417 {
1418 struct hclge_priv_buf *priv;
1419 int i, cnt = 0;
1420
1421 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1422 priv = &hdev->priv_buf[i];
1423 if (hdev->hw_tc_map & BIT(i) &&
1424 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1425 priv->enable)
1426 cnt++;
1427 }
1428
1429 return cnt;
1430 }
1431
1432 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_dev *hdev)
1433 {
1434 struct hclge_priv_buf *priv;
1435 u32 rx_priv = 0;
1436 int i;
1437
1438 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1439 priv = &hdev->priv_buf[i];
1440 if (priv->enable)
1441 rx_priv += priv->buf_size;
1442 }
1443 return rx_priv;
1444 }
1445
1446 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all)
1447 {
1448 u32 shared_buf_min, shared_buf_tc, shared_std;
1449 int tc_num, pfc_enable_num;
1450 u32 shared_buf;
1451 u32 rx_priv;
1452 int i;
1453
1454 tc_num = hclge_get_tc_num(hdev);
1455 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1456
1457 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1458 shared_buf_tc = pfc_enable_num * hdev->mps +
1459 (tc_num - pfc_enable_num) * hdev->mps / 2 +
1460 hdev->mps;
1461 shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1462
1463 rx_priv = hclge_get_rx_priv_buff_alloced(hdev);
1464 if (rx_all <= rx_priv + shared_std)
1465 return false;
1466
1467 shared_buf = rx_all - rx_priv;
1468 hdev->s_buf.buf_size = shared_buf;
1469 hdev->s_buf.self.high = shared_buf;
1470 hdev->s_buf.self.low = 2 * hdev->mps;
1471
1472 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1473 if ((hdev->hw_tc_map & BIT(i)) &&
1474 (hdev->tm_info.hw_pfc_map & BIT(i))) {
1475 hdev->s_buf.tc_thrd[i].low = hdev->mps;
1476 hdev->s_buf.tc_thrd[i].high = 2 * hdev->mps;
1477 } else {
1478 hdev->s_buf.tc_thrd[i].low = 0;
1479 hdev->s_buf.tc_thrd[i].high = hdev->mps;
1480 }
1481 }
1482
1483 return true;
1484 }
1485
1486 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1487 * @hdev: pointer to struct hclge_dev
1488 * @tx_size: the allocated tx buffer for all TCs
1489 * @return: 0: calculate sucessful, negative: fail
1490 */
1491 int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
1492 {
1493 u32 rx_all = hdev->pkt_buf_size - tx_size;
1494 int no_pfc_priv_num, pfc_priv_num;
1495 struct hclge_priv_buf *priv;
1496 int i;
1497
1498 /* step 1, try to alloc private buffer for all enabled tc */
1499 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1500 priv = &hdev->priv_buf[i];
1501 if (hdev->hw_tc_map & BIT(i)) {
1502 priv->enable = 1;
1503 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1504 priv->wl.low = hdev->mps;
1505 priv->wl.high = priv->wl.low + hdev->mps;
1506 priv->buf_size = priv->wl.high +
1507 HCLGE_DEFAULT_DV;
1508 } else {
1509 priv->wl.low = 0;
1510 priv->wl.high = 2 * hdev->mps;
1511 priv->buf_size = priv->wl.high;
1512 }
1513 }
1514 }
1515
1516 if (hclge_is_rx_buf_ok(hdev, rx_all))
1517 return 0;
1518
1519 /* step 2, try to decrease the buffer size of
1520 * no pfc TC's private buffer
1521 */
1522 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1523 priv = &hdev->priv_buf[i];
1524
1525 if (hdev->hw_tc_map & BIT(i))
1526 priv->enable = 1;
1527
1528 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1529 priv->wl.low = 128;
1530 priv->wl.high = priv->wl.low + hdev->mps;
1531 priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
1532 } else {
1533 priv->wl.low = 0;
1534 priv->wl.high = hdev->mps;
1535 priv->buf_size = priv->wl.high;
1536 }
1537 }
1538
1539 if (hclge_is_rx_buf_ok(hdev, rx_all))
1540 return 0;
1541
1542 /* step 3, try to reduce the number of pfc disabled TCs,
1543 * which have private buffer
1544 */
1545 /* get the total no pfc enable TC number, which have private buffer */
1546 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev);
1547
1548 /* let the last to be cleared first */
1549 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1550 priv = &hdev->priv_buf[i];
1551
1552 if (hdev->hw_tc_map & BIT(i) &&
1553 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1554 /* Clear the no pfc TC private buffer */
1555 priv->wl.low = 0;
1556 priv->wl.high = 0;
1557 priv->buf_size = 0;
1558 priv->enable = 0;
1559 no_pfc_priv_num--;
1560 }
1561
1562 if (hclge_is_rx_buf_ok(hdev, rx_all) ||
1563 no_pfc_priv_num == 0)
1564 break;
1565 }
1566
1567 if (hclge_is_rx_buf_ok(hdev, rx_all))
1568 return 0;
1569
1570 /* step 4, try to reduce the number of pfc enabled TCs
1571 * which have private buffer.
1572 */
1573 pfc_priv_num = hclge_get_pfc_priv_num(hdev);
1574
1575 /* let the last to be cleared first */
1576 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1577 priv = &hdev->priv_buf[i];
1578
1579 if (hdev->hw_tc_map & BIT(i) &&
1580 hdev->tm_info.hw_pfc_map & BIT(i)) {
1581 /* Reduce the number of pfc TC with private buffer */
1582 priv->wl.low = 0;
1583 priv->enable = 0;
1584 priv->wl.high = 0;
1585 priv->buf_size = 0;
1586 pfc_priv_num--;
1587 }
1588
1589 if (hclge_is_rx_buf_ok(hdev, rx_all) ||
1590 pfc_priv_num == 0)
1591 break;
1592 }
1593 if (hclge_is_rx_buf_ok(hdev, rx_all))
1594 return 0;
1595
1596 return -ENOMEM;
1597 }
1598
1599 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev)
1600 {
1601 struct hclge_rx_priv_buff *req;
1602 struct hclge_desc desc;
1603 int ret;
1604 int i;
1605
1606 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1607 req = (struct hclge_rx_priv_buff *)desc.data;
1608
1609 /* Alloc private buffer TCs */
1610 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1611 struct hclge_priv_buf *priv = &hdev->priv_buf[i];
1612
1613 req->buf_num[i] =
1614 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1615 req->buf_num[i] |=
1616 cpu_to_le16(true << HCLGE_TC0_PRI_BUF_EN_B);
1617 }
1618
1619 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1620 if (ret) {
1621 dev_err(&hdev->pdev->dev,
1622 "rx private buffer alloc cmd failed %d\n", ret);
1623 return ret;
1624 }
1625
1626 return 0;
1627 }
1628
1629 #define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
1630
1631 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev)
1632 {
1633 struct hclge_rx_priv_wl_buf *req;
1634 struct hclge_priv_buf *priv;
1635 struct hclge_desc desc[2];
1636 int i, j;
1637 int ret;
1638
1639 for (i = 0; i < 2; i++) {
1640 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1641 false);
1642 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1643
1644 /* The first descriptor set the NEXT bit to 1 */
1645 if (i == 0)
1646 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1647 else
1648 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1649
1650 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1651 priv = &hdev->priv_buf[i * HCLGE_TC_NUM_ONE_DESC + j];
1652 req->tc_wl[j].high =
1653 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1654 req->tc_wl[j].high |=
1655 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) <<
1656 HCLGE_RX_PRIV_EN_B);
1657 req->tc_wl[j].low =
1658 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1659 req->tc_wl[j].low |=
1660 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) <<
1661 HCLGE_RX_PRIV_EN_B);
1662 }
1663 }
1664
1665 /* Send 2 descriptor at one time */
1666 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1667 if (ret) {
1668 dev_err(&hdev->pdev->dev,
1669 "rx private waterline config cmd failed %d\n",
1670 ret);
1671 return ret;
1672 }
1673 return 0;
1674 }
1675
1676 static int hclge_common_thrd_config(struct hclge_dev *hdev)
1677 {
1678 struct hclge_shared_buf *s_buf = &hdev->s_buf;
1679 struct hclge_rx_com_thrd *req;
1680 struct hclge_desc desc[2];
1681 struct hclge_tc_thrd *tc;
1682 int i, j;
1683 int ret;
1684
1685 for (i = 0; i < 2; i++) {
1686 hclge_cmd_setup_basic_desc(&desc[i],
1687 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1688 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1689
1690 /* The first descriptor set the NEXT bit to 1 */
1691 if (i == 0)
1692 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1693 else
1694 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1695
1696 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1697 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1698
1699 req->com_thrd[j].high =
1700 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1701 req->com_thrd[j].high |=
1702 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) <<
1703 HCLGE_RX_PRIV_EN_B);
1704 req->com_thrd[j].low =
1705 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1706 req->com_thrd[j].low |=
1707 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) <<
1708 HCLGE_RX_PRIV_EN_B);
1709 }
1710 }
1711
1712 /* Send 2 descriptors at one time */
1713 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1714 if (ret) {
1715 dev_err(&hdev->pdev->dev,
1716 "common threshold config cmd failed %d\n", ret);
1717 return ret;
1718 }
1719 return 0;
1720 }
1721
1722 static int hclge_common_wl_config(struct hclge_dev *hdev)
1723 {
1724 struct hclge_shared_buf *buf = &hdev->s_buf;
1725 struct hclge_rx_com_wl *req;
1726 struct hclge_desc desc;
1727 int ret;
1728
1729 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1730
1731 req = (struct hclge_rx_com_wl *)desc.data;
1732 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1733 req->com_wl.high |=
1734 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) <<
1735 HCLGE_RX_PRIV_EN_B);
1736
1737 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1738 req->com_wl.low |=
1739 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) <<
1740 HCLGE_RX_PRIV_EN_B);
1741
1742 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1743 if (ret) {
1744 dev_err(&hdev->pdev->dev,
1745 "common waterline config cmd failed %d\n", ret);
1746 return ret;
1747 }
1748
1749 return 0;
1750 }
1751
1752 int hclge_buffer_alloc(struct hclge_dev *hdev)
1753 {
1754 u32 tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1755 int ret;
1756
1757 hdev->priv_buf = devm_kmalloc_array(&hdev->pdev->dev, HCLGE_MAX_TC_NUM,
1758 sizeof(struct hclge_priv_buf),
1759 GFP_KERNEL | __GFP_ZERO);
1760 if (!hdev->priv_buf)
1761 return -ENOMEM;
1762
1763 ret = hclge_tx_buffer_alloc(hdev, tx_buf_size);
1764 if (ret) {
1765 dev_err(&hdev->pdev->dev,
1766 "could not alloc tx buffers %d\n", ret);
1767 return ret;
1768 }
1769
1770 ret = hclge_rx_buffer_calc(hdev, tx_buf_size);
1771 if (ret) {
1772 dev_err(&hdev->pdev->dev,
1773 "could not calc rx priv buffer size for all TCs %d\n",
1774 ret);
1775 return ret;
1776 }
1777
1778 ret = hclge_rx_priv_buf_alloc(hdev);
1779 if (ret) {
1780 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1781 ret);
1782 return ret;
1783 }
1784
1785 ret = hclge_rx_priv_wl_config(hdev);
1786 if (ret) {
1787 dev_err(&hdev->pdev->dev,
1788 "could not configure rx private waterline %d\n", ret);
1789 return ret;
1790 }
1791
1792 ret = hclge_common_thrd_config(hdev);
1793 if (ret) {
1794 dev_err(&hdev->pdev->dev,
1795 "could not configure common threshold %d\n", ret);
1796 return ret;
1797 }
1798
1799 ret = hclge_common_wl_config(hdev);
1800 if (ret) {
1801 dev_err(&hdev->pdev->dev,
1802 "could not configure common waterline %d\n", ret);
1803 return ret;
1804 }
1805
1806 return 0;
1807 }
1808
1809 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1810 {
1811 struct hnae3_handle *roce = &vport->roce;
1812 struct hnae3_handle *nic = &vport->nic;
1813
1814 roce->rinfo.num_vectors = vport->back->num_roce_msix;
1815
1816 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1817 vport->back->num_msi_left == 0)
1818 return -EINVAL;
1819
1820 roce->rinfo.base_vector = vport->back->roce_base_vector;
1821
1822 roce->rinfo.netdev = nic->kinfo.netdev;
1823 roce->rinfo.roce_io_base = vport->back->hw.io_base;
1824
1825 roce->pdev = nic->pdev;
1826 roce->ae_algo = nic->ae_algo;
1827 roce->numa_node_mask = nic->numa_node_mask;
1828
1829 return 0;
1830 }
1831
1832 static int hclge_init_msix(struct hclge_dev *hdev)
1833 {
1834 struct pci_dev *pdev = hdev->pdev;
1835 int ret, i;
1836
1837 hdev->msix_entries = devm_kcalloc(&pdev->dev, hdev->num_msi,
1838 sizeof(struct msix_entry),
1839 GFP_KERNEL);
1840 if (!hdev->msix_entries)
1841 return -ENOMEM;
1842
1843 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1844 sizeof(u16), GFP_KERNEL);
1845 if (!hdev->vector_status)
1846 return -ENOMEM;
1847
1848 for (i = 0; i < hdev->num_msi; i++) {
1849 hdev->msix_entries[i].entry = i;
1850 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1851 }
1852
1853 hdev->num_msi_left = hdev->num_msi;
1854 hdev->base_msi_vector = hdev->pdev->irq;
1855 hdev->roce_base_vector = hdev->base_msi_vector +
1856 HCLGE_ROCE_VECTOR_OFFSET;
1857
1858 ret = pci_enable_msix_range(hdev->pdev, hdev->msix_entries,
1859 hdev->num_msi, hdev->num_msi);
1860 if (ret < 0) {
1861 dev_info(&hdev->pdev->dev,
1862 "MSI-X vector alloc failed: %d\n", ret);
1863 return ret;
1864 }
1865
1866 return 0;
1867 }
1868
1869 static int hclge_init_msi(struct hclge_dev *hdev)
1870 {
1871 struct pci_dev *pdev = hdev->pdev;
1872 int vectors;
1873 int i;
1874
1875 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1876 sizeof(u16), GFP_KERNEL);
1877 if (!hdev->vector_status)
1878 return -ENOMEM;
1879
1880 for (i = 0; i < hdev->num_msi; i++)
1881 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1882
1883 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, PCI_IRQ_MSI);
1884 if (vectors < 0) {
1885 dev_err(&pdev->dev, "MSI vectors enable failed %d\n", vectors);
1886 return -EINVAL;
1887 }
1888 hdev->num_msi = vectors;
1889 hdev->num_msi_left = vectors;
1890 hdev->base_msi_vector = pdev->irq;
1891 hdev->roce_base_vector = hdev->base_msi_vector +
1892 HCLGE_ROCE_VECTOR_OFFSET;
1893
1894 return 0;
1895 }
1896
1897 static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed)
1898 {
1899 struct hclge_mac *mac = &hdev->hw.mac;
1900
1901 if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M))
1902 mac->duplex = (u8)duplex;
1903 else
1904 mac->duplex = HCLGE_MAC_FULL;
1905
1906 mac->speed = speed;
1907 }
1908
1909 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
1910 {
1911 struct hclge_config_mac_speed_dup *req;
1912 struct hclge_desc desc;
1913 int ret;
1914
1915 req = (struct hclge_config_mac_speed_dup *)desc.data;
1916
1917 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
1918
1919 hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
1920
1921 switch (speed) {
1922 case HCLGE_MAC_SPEED_10M:
1923 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1924 HCLGE_CFG_SPEED_S, 6);
1925 break;
1926 case HCLGE_MAC_SPEED_100M:
1927 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1928 HCLGE_CFG_SPEED_S, 7);
1929 break;
1930 case HCLGE_MAC_SPEED_1G:
1931 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1932 HCLGE_CFG_SPEED_S, 0);
1933 break;
1934 case HCLGE_MAC_SPEED_10G:
1935 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1936 HCLGE_CFG_SPEED_S, 1);
1937 break;
1938 case HCLGE_MAC_SPEED_25G:
1939 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1940 HCLGE_CFG_SPEED_S, 2);
1941 break;
1942 case HCLGE_MAC_SPEED_40G:
1943 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1944 HCLGE_CFG_SPEED_S, 3);
1945 break;
1946 case HCLGE_MAC_SPEED_50G:
1947 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1948 HCLGE_CFG_SPEED_S, 4);
1949 break;
1950 case HCLGE_MAC_SPEED_100G:
1951 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1952 HCLGE_CFG_SPEED_S, 5);
1953 break;
1954 default:
1955 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
1956 return -EINVAL;
1957 }
1958
1959 hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1960 1);
1961
1962 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1963 if (ret) {
1964 dev_err(&hdev->pdev->dev,
1965 "mac speed/duplex config cmd failed %d.\n", ret);
1966 return ret;
1967 }
1968
1969 hclge_check_speed_dup(hdev, duplex, speed);
1970
1971 return 0;
1972 }
1973
1974 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
1975 u8 duplex)
1976 {
1977 struct hclge_vport *vport = hclge_get_vport(handle);
1978 struct hclge_dev *hdev = vport->back;
1979
1980 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
1981 }
1982
1983 static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
1984 u8 *duplex)
1985 {
1986 struct hclge_query_an_speed_dup *req;
1987 struct hclge_desc desc;
1988 int speed_tmp;
1989 int ret;
1990
1991 req = (struct hclge_query_an_speed_dup *)desc.data;
1992
1993 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
1994 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1995 if (ret) {
1996 dev_err(&hdev->pdev->dev,
1997 "mac speed/autoneg/duplex query cmd failed %d\n",
1998 ret);
1999 return ret;
2000 }
2001
2002 *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
2003 speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
2004 HCLGE_QUERY_SPEED_S);
2005
2006 ret = hclge_parse_speed(speed_tmp, speed);
2007 if (ret) {
2008 dev_err(&hdev->pdev->dev,
2009 "could not parse speed(=%d), %d\n", speed_tmp, ret);
2010 return -EIO;
2011 }
2012
2013 return 0;
2014 }
2015
2016 static int hclge_query_autoneg_result(struct hclge_dev *hdev)
2017 {
2018 struct hclge_mac *mac = &hdev->hw.mac;
2019 struct hclge_query_an_speed_dup *req;
2020 struct hclge_desc desc;
2021 int ret;
2022
2023 req = (struct hclge_query_an_speed_dup *)desc.data;
2024
2025 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
2026 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2027 if (ret) {
2028 dev_err(&hdev->pdev->dev,
2029 "autoneg result query cmd failed %d.\n", ret);
2030 return ret;
2031 }
2032
2033 mac->autoneg = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_AN_B);
2034
2035 return 0;
2036 }
2037
2038 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2039 {
2040 struct hclge_config_auto_neg *req;
2041 struct hclge_desc desc;
2042 int ret;
2043
2044 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2045
2046 req = (struct hclge_config_auto_neg *)desc.data;
2047 hnae_set_bit(req->cfg_an_cmd_flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2048
2049 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2050 if (ret) {
2051 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2052 ret);
2053 return ret;
2054 }
2055
2056 return 0;
2057 }
2058
2059 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2060 {
2061 struct hclge_vport *vport = hclge_get_vport(handle);
2062 struct hclge_dev *hdev = vport->back;
2063
2064 return hclge_set_autoneg_en(hdev, enable);
2065 }
2066
2067 static int hclge_get_autoneg(struct hnae3_handle *handle)
2068 {
2069 struct hclge_vport *vport = hclge_get_vport(handle);
2070 struct hclge_dev *hdev = vport->back;
2071
2072 hclge_query_autoneg_result(hdev);
2073
2074 return hdev->hw.mac.autoneg;
2075 }
2076
2077 static int hclge_mac_init(struct hclge_dev *hdev)
2078 {
2079 struct hclge_mac *mac = &hdev->hw.mac;
2080 int ret;
2081
2082 ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
2083 if (ret) {
2084 dev_err(&hdev->pdev->dev,
2085 "Config mac speed dup fail ret=%d\n", ret);
2086 return ret;
2087 }
2088
2089 mac->link = 0;
2090
2091 ret = hclge_mac_mdio_config(hdev);
2092 if (ret) {
2093 dev_warn(&hdev->pdev->dev,
2094 "mdio config fail ret=%d\n", ret);
2095 return ret;
2096 }
2097
2098 /* Initialize the MTA table work mode */
2099 hdev->accept_mta_mc = true;
2100 hdev->enable_mta = true;
2101 hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36;
2102
2103 ret = hclge_set_mta_filter_mode(hdev,
2104 hdev->mta_mac_sel_type,
2105 hdev->enable_mta);
2106 if (ret) {
2107 dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n",
2108 ret);
2109 return ret;
2110 }
2111
2112 return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
2113 }
2114
2115 static void hclge_task_schedule(struct hclge_dev *hdev)
2116 {
2117 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2118 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2119 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2120 (void)schedule_work(&hdev->service_task);
2121 }
2122
2123 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2124 {
2125 struct hclge_link_status *req;
2126 struct hclge_desc desc;
2127 int link_status;
2128 int ret;
2129
2130 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2131 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2132 if (ret) {
2133 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2134 ret);
2135 return ret;
2136 }
2137
2138 req = (struct hclge_link_status *)desc.data;
2139 link_status = req->status & HCLGE_LINK_STATUS;
2140
2141 return !!link_status;
2142 }
2143
2144 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2145 {
2146 int mac_state;
2147 int link_stat;
2148
2149 mac_state = hclge_get_mac_link_status(hdev);
2150
2151 if (hdev->hw.mac.phydev) {
2152 if (!genphy_read_status(hdev->hw.mac.phydev))
2153 link_stat = mac_state &
2154 hdev->hw.mac.phydev->link;
2155 else
2156 link_stat = 0;
2157
2158 } else {
2159 link_stat = mac_state;
2160 }
2161
2162 return !!link_stat;
2163 }
2164
2165 static void hclge_update_link_status(struct hclge_dev *hdev)
2166 {
2167 struct hnae3_client *client = hdev->nic_client;
2168 struct hnae3_handle *handle;
2169 int state;
2170 int i;
2171
2172 if (!client)
2173 return;
2174 state = hclge_get_mac_phy_link(hdev);
2175 if (state != hdev->hw.mac.link) {
2176 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2177 handle = &hdev->vport[i].nic;
2178 client->ops->link_status_change(handle, state);
2179 }
2180 hdev->hw.mac.link = state;
2181 }
2182 }
2183
2184 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2185 {
2186 struct hclge_mac mac = hdev->hw.mac;
2187 u8 duplex;
2188 int speed;
2189 int ret;
2190
2191 /* get the speed and duplex as autoneg'result from mac cmd when phy
2192 * doesn't exit.
2193 */
2194 if (mac.phydev)
2195 return 0;
2196
2197 /* update mac->antoneg. */
2198 ret = hclge_query_autoneg_result(hdev);
2199 if (ret) {
2200 dev_err(&hdev->pdev->dev,
2201 "autoneg result query failed %d\n", ret);
2202 return ret;
2203 }
2204
2205 if (!mac.autoneg)
2206 return 0;
2207
2208 ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex);
2209 if (ret) {
2210 dev_err(&hdev->pdev->dev,
2211 "mac autoneg/speed/duplex query failed %d\n", ret);
2212 return ret;
2213 }
2214
2215 if ((mac.speed != speed) || (mac.duplex != duplex)) {
2216 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2217 if (ret) {
2218 dev_err(&hdev->pdev->dev,
2219 "mac speed/duplex config failed %d\n", ret);
2220 return ret;
2221 }
2222 }
2223
2224 return 0;
2225 }
2226
2227 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2228 {
2229 struct hclge_vport *vport = hclge_get_vport(handle);
2230 struct hclge_dev *hdev = vport->back;
2231
2232 return hclge_update_speed_duplex(hdev);
2233 }
2234
2235 static int hclge_get_status(struct hnae3_handle *handle)
2236 {
2237 struct hclge_vport *vport = hclge_get_vport(handle);
2238 struct hclge_dev *hdev = vport->back;
2239
2240 hclge_update_link_status(hdev);
2241
2242 return hdev->hw.mac.link;
2243 }
2244
2245 static void hclge_service_timer(unsigned long data)
2246 {
2247 struct hclge_dev *hdev = (struct hclge_dev *)data;
2248 (void)mod_timer(&hdev->service_timer, jiffies + HZ);
2249
2250 hclge_task_schedule(hdev);
2251 }
2252
2253 static void hclge_service_complete(struct hclge_dev *hdev)
2254 {
2255 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2256
2257 /* Flush memory before next watchdog */
2258 smp_mb__before_atomic();
2259 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2260 }
2261
2262 static void hclge_service_task(struct work_struct *work)
2263 {
2264 struct hclge_dev *hdev =
2265 container_of(work, struct hclge_dev, service_task);
2266
2267 hclge_update_speed_duplex(hdev);
2268 hclge_update_link_status(hdev);
2269 hclge_update_stats_for_all(hdev);
2270 hclge_service_complete(hdev);
2271 }
2272
2273 static void hclge_disable_sriov(struct hclge_dev *hdev)
2274 {
2275 /* If our VFs are assigned we cannot shut down SR-IOV
2276 * without causing issues, so just leave the hardware
2277 * available but disabled
2278 */
2279 if (pci_vfs_assigned(hdev->pdev)) {
2280 dev_warn(&hdev->pdev->dev,
2281 "disabling driver while VFs are assigned\n");
2282 return;
2283 }
2284
2285 pci_disable_sriov(hdev->pdev);
2286 }
2287
2288 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
2289 {
2290 /* VF handle has no client */
2291 if (!handle->client)
2292 return container_of(handle, struct hclge_vport, nic);
2293 else if (handle->client->type == HNAE3_CLIENT_ROCE)
2294 return container_of(handle, struct hclge_vport, roce);
2295 else
2296 return container_of(handle, struct hclge_vport, nic);
2297 }
2298
2299 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
2300 struct hnae3_vector_info *vector_info)
2301 {
2302 struct hclge_vport *vport = hclge_get_vport(handle);
2303 struct hnae3_vector_info *vector = vector_info;
2304 struct hclge_dev *hdev = vport->back;
2305 int alloc = 0;
2306 int i, j;
2307
2308 vector_num = min(hdev->num_msi_left, vector_num);
2309
2310 for (j = 0; j < vector_num; j++) {
2311 for (i = 1; i < hdev->num_msi; i++) {
2312 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
2313 vector->vector = pci_irq_vector(hdev->pdev, i);
2314 vector->io_addr = hdev->hw.io_base +
2315 HCLGE_VECTOR_REG_BASE +
2316 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
2317 vport->vport_id *
2318 HCLGE_VECTOR_VF_OFFSET;
2319 hdev->vector_status[i] = vport->vport_id;
2320
2321 vector++;
2322 alloc++;
2323
2324 break;
2325 }
2326 }
2327 }
2328 hdev->num_msi_left -= alloc;
2329 hdev->num_msi_used += alloc;
2330
2331 return alloc;
2332 }
2333
2334 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
2335 {
2336 int i;
2337
2338 for (i = 0; i < hdev->num_msi; i++) {
2339 if (hdev->msix_entries) {
2340 if (vector == hdev->msix_entries[i].vector)
2341 return i;
2342 } else {
2343 if (vector == (hdev->base_msi_vector + i))
2344 return i;
2345 }
2346 }
2347 return -EINVAL;
2348 }
2349
2350 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
2351 {
2352 return HCLGE_RSS_KEY_SIZE;
2353 }
2354
2355 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
2356 {
2357 return HCLGE_RSS_IND_TBL_SIZE;
2358 }
2359
2360 static int hclge_get_rss_algo(struct hclge_dev *hdev)
2361 {
2362 struct hclge_rss_config *req;
2363 struct hclge_desc desc;
2364 int rss_hash_algo;
2365 int ret;
2366
2367 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, true);
2368
2369 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2370 if (ret) {
2371 dev_err(&hdev->pdev->dev,
2372 "Get link status error, status =%d\n", ret);
2373 return ret;
2374 }
2375
2376 req = (struct hclge_rss_config *)desc.data;
2377 rss_hash_algo = (req->hash_config & HCLGE_RSS_HASH_ALGO_MASK);
2378
2379 if (rss_hash_algo == HCLGE_RSS_HASH_ALGO_TOEPLITZ)
2380 return ETH_RSS_HASH_TOP;
2381
2382 return -EINVAL;
2383 }
2384
2385 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
2386 const u8 hfunc, const u8 *key)
2387 {
2388 struct hclge_rss_config *req;
2389 struct hclge_desc desc;
2390 int key_offset;
2391 int key_size;
2392 int ret;
2393
2394 req = (struct hclge_rss_config *)desc.data;
2395
2396 for (key_offset = 0; key_offset < 3; key_offset++) {
2397 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
2398 false);
2399
2400 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
2401 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
2402
2403 if (key_offset == 2)
2404 key_size =
2405 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
2406 else
2407 key_size = HCLGE_RSS_HASH_KEY_NUM;
2408
2409 memcpy(req->hash_key,
2410 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
2411
2412 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2413 if (ret) {
2414 dev_err(&hdev->pdev->dev,
2415 "Configure RSS config fail, status = %d\n",
2416 ret);
2417 return ret;
2418 }
2419 }
2420 return 0;
2421 }
2422
2423 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir)
2424 {
2425 struct hclge_rss_indirection_table *req;
2426 struct hclge_desc desc;
2427 int i, j;
2428 int ret;
2429
2430 req = (struct hclge_rss_indirection_table *)desc.data;
2431
2432 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
2433 hclge_cmd_setup_basic_desc
2434 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
2435
2436 req->start_table_index = i * HCLGE_RSS_CFG_TBL_SIZE;
2437 req->rss_set_bitmap = HCLGE_RSS_SET_BITMAP_MSK;
2438
2439 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
2440 req->rss_result[j] =
2441 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
2442
2443 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2444 if (ret) {
2445 dev_err(&hdev->pdev->dev,
2446 "Configure rss indir table fail,status = %d\n",
2447 ret);
2448 return ret;
2449 }
2450 }
2451 return 0;
2452 }
2453
2454 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
2455 u16 *tc_size, u16 *tc_offset)
2456 {
2457 struct hclge_rss_tc_mode *req;
2458 struct hclge_desc desc;
2459 int ret;
2460 int i;
2461
2462 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
2463 req = (struct hclge_rss_tc_mode *)desc.data;
2464
2465 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2466 hnae_set_bit(req->rss_tc_mode[i], HCLGE_RSS_TC_VALID_B,
2467 (tc_valid[i] & 0x1));
2468 hnae_set_field(req->rss_tc_mode[i], HCLGE_RSS_TC_SIZE_M,
2469 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
2470 hnae_set_field(req->rss_tc_mode[i], HCLGE_RSS_TC_OFFSET_M,
2471 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
2472 }
2473
2474 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2475 if (ret) {
2476 dev_err(&hdev->pdev->dev,
2477 "Configure rss tc mode fail, status = %d\n", ret);
2478 return ret;
2479 }
2480
2481 return 0;
2482 }
2483
2484 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
2485 {
2486 #define HCLGE_RSS_INPUT_TUPLE_OTHER 0xf
2487 #define HCLGE_RSS_INPUT_TUPLE_SCTP 0x1f
2488 struct hclge_rss_input_tuple *req;
2489 struct hclge_desc desc;
2490 int ret;
2491
2492 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
2493
2494 req = (struct hclge_rss_input_tuple *)desc.data;
2495 req->ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2496 req->ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2497 req->ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
2498 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2499 req->ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2500 req->ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2501 req->ipv6_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
2502 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2503 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2504 if (ret) {
2505 dev_err(&hdev->pdev->dev,
2506 "Configure rss input fail, status = %d\n", ret);
2507 return ret;
2508 }
2509
2510 return 0;
2511 }
2512
2513 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
2514 u8 *key, u8 *hfunc)
2515 {
2516 struct hclge_vport *vport = hclge_get_vport(handle);
2517 struct hclge_dev *hdev = vport->back;
2518 int i;
2519
2520 /* Get hash algorithm */
2521 if (hfunc)
2522 *hfunc = hclge_get_rss_algo(hdev);
2523
2524 /* Get the RSS Key required by the user */
2525 if (key)
2526 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
2527
2528 /* Get indirect table */
2529 if (indir)
2530 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
2531 indir[i] = vport->rss_indirection_tbl[i];
2532
2533 return 0;
2534 }
2535
2536 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
2537 const u8 *key, const u8 hfunc)
2538 {
2539 struct hclge_vport *vport = hclge_get_vport(handle);
2540 struct hclge_dev *hdev = vport->back;
2541 u8 hash_algo;
2542 int ret, i;
2543
2544 /* Set the RSS Hash Key if specififed by the user */
2545 if (key) {
2546 /* Update the shadow RSS key with user specified qids */
2547 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
2548
2549 if (hfunc == ETH_RSS_HASH_TOP ||
2550 hfunc == ETH_RSS_HASH_NO_CHANGE)
2551 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
2552 else
2553 return -EINVAL;
2554 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
2555 if (ret)
2556 return ret;
2557 }
2558
2559 /* Update the shadow RSS table with user specified qids */
2560 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
2561 vport->rss_indirection_tbl[i] = indir[i];
2562
2563 /* Update the hardware */
2564 ret = hclge_set_rss_indir_table(hdev, indir);
2565 return ret;
2566 }
2567
2568 static int hclge_get_tc_size(struct hnae3_handle *handle)
2569 {
2570 struct hclge_vport *vport = hclge_get_vport(handle);
2571 struct hclge_dev *hdev = vport->back;
2572
2573 return hdev->rss_size_max;
2574 }
2575
2576 static int hclge_rss_init_hw(struct hclge_dev *hdev)
2577 {
2578 const u8 hfunc = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
2579 struct hclge_vport *vport = hdev->vport;
2580 u16 tc_offset[HCLGE_MAX_TC_NUM];
2581 u8 rss_key[HCLGE_RSS_KEY_SIZE];
2582 u16 tc_valid[HCLGE_MAX_TC_NUM];
2583 u16 tc_size[HCLGE_MAX_TC_NUM];
2584 u32 *rss_indir = NULL;
2585 const u8 *key;
2586 int i, ret, j;
2587
2588 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
2589 if (!rss_indir)
2590 return -ENOMEM;
2591
2592 /* Get default RSS key */
2593 netdev_rss_key_fill(rss_key, HCLGE_RSS_KEY_SIZE);
2594
2595 /* Initialize RSS indirect table for each vport */
2596 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
2597 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) {
2598 vport[j].rss_indirection_tbl[i] =
2599 i % hdev->rss_size_max;
2600 rss_indir[i] = vport[j].rss_indirection_tbl[i];
2601 }
2602 }
2603 ret = hclge_set_rss_indir_table(hdev, rss_indir);
2604 if (ret)
2605 goto err;
2606
2607 key = rss_key;
2608 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
2609 if (ret)
2610 goto err;
2611
2612 ret = hclge_set_rss_input_tuple(hdev);
2613 if (ret)
2614 goto err;
2615
2616 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2617 if (hdev->hw_tc_map & BIT(i))
2618 tc_valid[i] = 1;
2619 else
2620 tc_valid[i] = 0;
2621
2622 switch (hdev->rss_size_max) {
2623 case HCLGE_RSS_TC_SIZE_0:
2624 tc_size[i] = 0;
2625 break;
2626 case HCLGE_RSS_TC_SIZE_1:
2627 tc_size[i] = 1;
2628 break;
2629 case HCLGE_RSS_TC_SIZE_2:
2630 tc_size[i] = 2;
2631 break;
2632 case HCLGE_RSS_TC_SIZE_3:
2633 tc_size[i] = 3;
2634 break;
2635 case HCLGE_RSS_TC_SIZE_4:
2636 tc_size[i] = 4;
2637 break;
2638 case HCLGE_RSS_TC_SIZE_5:
2639 tc_size[i] = 5;
2640 break;
2641 case HCLGE_RSS_TC_SIZE_6:
2642 tc_size[i] = 6;
2643 break;
2644 case HCLGE_RSS_TC_SIZE_7:
2645 tc_size[i] = 7;
2646 break;
2647 default:
2648 break;
2649 }
2650 tc_offset[i] = hdev->rss_size_max * i;
2651 }
2652 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
2653
2654 err:
2655 kfree(rss_indir);
2656
2657 return ret;
2658 }
2659
2660 int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
2661 struct hnae3_ring_chain_node *ring_chain)
2662 {
2663 struct hclge_dev *hdev = vport->back;
2664 struct hclge_ctrl_vector_chain *req;
2665 struct hnae3_ring_chain_node *node;
2666 struct hclge_desc desc;
2667 int ret;
2668 int i;
2669
2670 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false);
2671
2672 req = (struct hclge_ctrl_vector_chain *)desc.data;
2673 req->int_vector_id = vector_id;
2674
2675 i = 0;
2676 for (node = ring_chain; node; node = node->next) {
2677 hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_TYPE_M,
2678 HCLGE_INT_TYPE_S,
2679 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
2680 hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
2681 HCLGE_TQP_ID_S, node->tqp_index);
2682 req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
2683
2684 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
2685 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
2686
2687 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2688 if (ret) {
2689 dev_err(&hdev->pdev->dev,
2690 "Map TQP fail, status is %d.\n",
2691 ret);
2692 return ret;
2693 }
2694 i = 0;
2695
2696 hclge_cmd_setup_basic_desc(&desc,
2697 HCLGE_OPC_ADD_RING_TO_VECTOR,
2698 false);
2699 req->int_vector_id = vector_id;
2700 }
2701 }
2702
2703 if (i > 0) {
2704 req->int_cause_num = i;
2705
2706 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2707 if (ret) {
2708 dev_err(&hdev->pdev->dev,
2709 "Map TQP fail, status is %d.\n", ret);
2710 return ret;
2711 }
2712 }
2713
2714 return 0;
2715 }
2716
2717 int hclge_map_handle_ring_to_vector(struct hnae3_handle *handle,
2718 int vector,
2719 struct hnae3_ring_chain_node *ring_chain)
2720 {
2721 struct hclge_vport *vport = hclge_get_vport(handle);
2722 struct hclge_dev *hdev = vport->back;
2723 int vector_id;
2724
2725 vector_id = hclge_get_vector_index(hdev, vector);
2726 if (vector_id < 0) {
2727 dev_err(&hdev->pdev->dev,
2728 "Get vector index fail. ret =%d\n", vector_id);
2729 return vector_id;
2730 }
2731
2732 return hclge_map_vport_ring_to_vector(vport, vector_id, ring_chain);
2733 }
2734
2735 static int hclge_unmap_ring_from_vector(
2736 struct hnae3_handle *handle, int vector,
2737 struct hnae3_ring_chain_node *ring_chain)
2738 {
2739 struct hclge_vport *vport = hclge_get_vport(handle);
2740 struct hclge_dev *hdev = vport->back;
2741 struct hclge_ctrl_vector_chain *req;
2742 struct hnae3_ring_chain_node *node;
2743 struct hclge_desc desc;
2744 int i, vector_id;
2745 int ret;
2746
2747 vector_id = hclge_get_vector_index(hdev, vector);
2748 if (vector_id < 0) {
2749 dev_err(&handle->pdev->dev,
2750 "Get vector index fail. ret =%d\n", vector_id);
2751 return vector_id;
2752 }
2753
2754 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false);
2755
2756 req = (struct hclge_ctrl_vector_chain *)desc.data;
2757 req->int_vector_id = vector_id;
2758
2759 i = 0;
2760 for (node = ring_chain; node; node = node->next) {
2761 hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_TYPE_M,
2762 HCLGE_INT_TYPE_S,
2763 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
2764 hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
2765 HCLGE_TQP_ID_S, node->tqp_index);
2766
2767 req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
2768
2769 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
2770 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
2771
2772 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2773 if (ret) {
2774 dev_err(&hdev->pdev->dev,
2775 "Unmap TQP fail, status is %d.\n",
2776 ret);
2777 return ret;
2778 }
2779 i = 0;
2780 hclge_cmd_setup_basic_desc(&desc,
2781 HCLGE_OPC_ADD_RING_TO_VECTOR,
2782 false);
2783 req->int_vector_id = vector_id;
2784 }
2785 }
2786
2787 if (i > 0) {
2788 req->int_cause_num = i;
2789
2790 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2791 if (ret) {
2792 dev_err(&hdev->pdev->dev,
2793 "Unmap TQP fail, status is %d.\n", ret);
2794 return ret;
2795 }
2796 }
2797
2798 return 0;
2799 }
2800
2801 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
2802 struct hclge_promisc_param *param)
2803 {
2804 struct hclge_promisc_cfg *req;
2805 struct hclge_desc desc;
2806 int ret;
2807
2808 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
2809
2810 req = (struct hclge_promisc_cfg *)desc.data;
2811 req->vf_id = param->vf_id;
2812 req->flag = (param->enable << HCLGE_PROMISC_EN_B);
2813
2814 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2815 if (ret) {
2816 dev_err(&hdev->pdev->dev,
2817 "Set promisc mode fail, status is %d.\n", ret);
2818 return ret;
2819 }
2820 return 0;
2821 }
2822
2823 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
2824 bool en_mc, bool en_bc, int vport_id)
2825 {
2826 if (!param)
2827 return;
2828
2829 memset(param, 0, sizeof(struct hclge_promisc_param));
2830 if (en_uc)
2831 param->enable = HCLGE_PROMISC_EN_UC;
2832 if (en_mc)
2833 param->enable |= HCLGE_PROMISC_EN_MC;
2834 if (en_bc)
2835 param->enable |= HCLGE_PROMISC_EN_BC;
2836 param->vf_id = vport_id;
2837 }
2838
2839 static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en)
2840 {
2841 struct hclge_vport *vport = hclge_get_vport(handle);
2842 struct hclge_dev *hdev = vport->back;
2843 struct hclge_promisc_param param;
2844
2845 hclge_promisc_param_init(&param, en, en, true, vport->vport_id);
2846 hclge_cmd_set_promisc_mode(hdev, &param);
2847 }
2848
2849 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
2850 {
2851 struct hclge_desc desc;
2852 struct hclge_config_mac_mode *req =
2853 (struct hclge_config_mac_mode *)desc.data;
2854 int ret;
2855
2856 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
2857 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_TX_EN_B, enable);
2858 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_RX_EN_B, enable);
2859 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_PAD_TX_B, enable);
2860 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_PAD_RX_B, enable);
2861 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_1588_TX_B, 0);
2862 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_1588_RX_B, 0);
2863 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_APP_LP_B, 0);
2864 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_LINE_LP_B, 0);
2865 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_FCS_TX_B, enable);
2866 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_RX_FCS_B, enable);
2867 hnae_set_bit(req->txrx_pad_fcs_loop_en,
2868 HCLGE_MAC_RX_FCS_STRIP_B, enable);
2869 hnae_set_bit(req->txrx_pad_fcs_loop_en,
2870 HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
2871 hnae_set_bit(req->txrx_pad_fcs_loop_en,
2872 HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
2873 hnae_set_bit(req->txrx_pad_fcs_loop_en,
2874 HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
2875
2876 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2877 if (ret)
2878 dev_err(&hdev->pdev->dev,
2879 "mac enable fail, ret =%d.\n", ret);
2880 }
2881
2882 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
2883 int stream_id, bool enable)
2884 {
2885 struct hclge_desc desc;
2886 struct hclge_cfg_com_tqp_queue *req =
2887 (struct hclge_cfg_com_tqp_queue *)desc.data;
2888 int ret;
2889
2890 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
2891 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
2892 req->stream_id = cpu_to_le16(stream_id);
2893 req->enable |= enable << HCLGE_TQP_ENABLE_B;
2894
2895 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2896 if (ret)
2897 dev_err(&hdev->pdev->dev,
2898 "Tqp enable fail, status =%d.\n", ret);
2899 return ret;
2900 }
2901
2902 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
2903 {
2904 struct hclge_vport *vport = hclge_get_vport(handle);
2905 struct hnae3_queue *queue;
2906 struct hclge_tqp *tqp;
2907 int i;
2908
2909 for (i = 0; i < vport->alloc_tqps; i++) {
2910 queue = handle->kinfo.tqp[i];
2911 tqp = container_of(queue, struct hclge_tqp, q);
2912 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
2913 }
2914 }
2915
2916 static int hclge_ae_start(struct hnae3_handle *handle)
2917 {
2918 struct hclge_vport *vport = hclge_get_vport(handle);
2919 struct hclge_dev *hdev = vport->back;
2920 int i, queue_id, ret;
2921
2922 for (i = 0; i < vport->alloc_tqps; i++) {
2923 /* todo clear interrupt */
2924 /* ring enable */
2925 queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]);
2926 if (queue_id < 0) {
2927 dev_warn(&hdev->pdev->dev,
2928 "Get invalid queue id, ignore it\n");
2929 continue;
2930 }
2931
2932 hclge_tqp_enable(hdev, queue_id, 0, true);
2933 }
2934 /* mac enable */
2935 hclge_cfg_mac_mode(hdev, true);
2936 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
2937 (void)mod_timer(&hdev->service_timer, jiffies + HZ);
2938
2939 ret = hclge_mac_start_phy(hdev);
2940 if (ret)
2941 return ret;
2942
2943 /* reset tqp stats */
2944 hclge_reset_tqp_stats(handle);
2945
2946 return 0;
2947 }
2948
2949 static void hclge_ae_stop(struct hnae3_handle *handle)
2950 {
2951 struct hclge_vport *vport = hclge_get_vport(handle);
2952 struct hclge_dev *hdev = vport->back;
2953 int i, queue_id;
2954
2955 for (i = 0; i < vport->alloc_tqps; i++) {
2956 /* Ring disable */
2957 queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]);
2958 if (queue_id < 0) {
2959 dev_warn(&hdev->pdev->dev,
2960 "Get invalid queue id, ignore it\n");
2961 continue;
2962 }
2963
2964 hclge_tqp_enable(hdev, queue_id, 0, false);
2965 }
2966 /* Mac disable */
2967 hclge_cfg_mac_mode(hdev, false);
2968
2969 hclge_mac_stop_phy(hdev);
2970
2971 /* reset tqp stats */
2972 hclge_reset_tqp_stats(handle);
2973 }
2974
2975 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
2976 u16 cmdq_resp, u8 resp_code,
2977 enum hclge_mac_vlan_tbl_opcode op)
2978 {
2979 struct hclge_dev *hdev = vport->back;
2980 int return_status = -EIO;
2981
2982 if (cmdq_resp) {
2983 dev_err(&hdev->pdev->dev,
2984 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
2985 cmdq_resp);
2986 return -EIO;
2987 }
2988
2989 if (op == HCLGE_MAC_VLAN_ADD) {
2990 if ((!resp_code) || (resp_code == 1)) {
2991 return_status = 0;
2992 } else if (resp_code == 2) {
2993 return_status = -EIO;
2994 dev_err(&hdev->pdev->dev,
2995 "add mac addr failed for uc_overflow.\n");
2996 } else if (resp_code == 3) {
2997 return_status = -EIO;
2998 dev_err(&hdev->pdev->dev,
2999 "add mac addr failed for mc_overflow.\n");
3000 } else {
3001 dev_err(&hdev->pdev->dev,
3002 "add mac addr failed for undefined, code=%d.\n",
3003 resp_code);
3004 }
3005 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
3006 if (!resp_code) {
3007 return_status = 0;
3008 } else if (resp_code == 1) {
3009 return_status = -EIO;
3010 dev_dbg(&hdev->pdev->dev,
3011 "remove mac addr failed for miss.\n");
3012 } else {
3013 dev_err(&hdev->pdev->dev,
3014 "remove mac addr failed for undefined, code=%d.\n",
3015 resp_code);
3016 }
3017 } else if (op == HCLGE_MAC_VLAN_LKUP) {
3018 if (!resp_code) {
3019 return_status = 0;
3020 } else if (resp_code == 1) {
3021 return_status = -EIO;
3022 dev_dbg(&hdev->pdev->dev,
3023 "lookup mac addr failed for miss.\n");
3024 } else {
3025 dev_err(&hdev->pdev->dev,
3026 "lookup mac addr failed for undefined, code=%d.\n",
3027 resp_code);
3028 }
3029 } else {
3030 return_status = -EIO;
3031 dev_err(&hdev->pdev->dev,
3032 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
3033 op);
3034 }
3035
3036 return return_status;
3037 }
3038
3039 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
3040 {
3041 int word_num;
3042 int bit_num;
3043
3044 if (vfid > 255 || vfid < 0)
3045 return -EIO;
3046
3047 if (vfid >= 0 && vfid <= 191) {
3048 word_num = vfid / 32;
3049 bit_num = vfid % 32;
3050 if (clr)
3051 desc[1].data[word_num] &= ~(1 << bit_num);
3052 else
3053 desc[1].data[word_num] |= (1 << bit_num);
3054 } else {
3055 word_num = (vfid - 192) / 32;
3056 bit_num = vfid % 32;
3057 if (clr)
3058 desc[2].data[word_num] &= ~(1 << bit_num);
3059 else
3060 desc[2].data[word_num] |= (1 << bit_num);
3061 }
3062
3063 return 0;
3064 }
3065
3066 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
3067 {
3068 #define HCLGE_DESC_NUMBER 3
3069 #define HCLGE_FUNC_NUMBER_PER_DESC 6
3070 int i, j;
3071
3072 for (i = 0; i < HCLGE_DESC_NUMBER; i++)
3073 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
3074 if (desc[i].data[j])
3075 return false;
3076
3077 return true;
3078 }
3079
3080 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry *new_req,
3081 const u8 *addr)
3082 {
3083 const unsigned char *mac_addr = addr;
3084 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
3085 (mac_addr[0]) | (mac_addr[1] << 8);
3086 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
3087
3088 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
3089 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
3090 }
3091
3092 u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
3093 const u8 *addr)
3094 {
3095 u16 high_val = addr[1] | (addr[0] << 8);
3096 struct hclge_dev *hdev = vport->back;
3097 u32 rsh = 4 - hdev->mta_mac_sel_type;
3098 u16 ret_val = (high_val >> rsh) & 0xfff;
3099
3100 return ret_val;
3101 }
3102
3103 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
3104 enum hclge_mta_dmac_sel_type mta_mac_sel,
3105 bool enable)
3106 {
3107 struct hclge_mta_filter_mode *req;
3108 struct hclge_desc desc;
3109 int ret;
3110
3111 req = (struct hclge_mta_filter_mode *)desc.data;
3112 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
3113
3114 hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
3115 enable);
3116 hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
3117 HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
3118
3119 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3120 if (ret) {
3121 dev_err(&hdev->pdev->dev,
3122 "Config mat filter mode failed for cmd_send, ret =%d.\n",
3123 ret);
3124 return ret;
3125 }
3126
3127 return 0;
3128 }
3129
3130 int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
3131 u8 func_id,
3132 bool enable)
3133 {
3134 struct hclge_cfg_func_mta_filter *req;
3135 struct hclge_desc desc;
3136 int ret;
3137
3138 req = (struct hclge_cfg_func_mta_filter *)desc.data;
3139 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
3140
3141 hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
3142 enable);
3143 req->function_id = func_id;
3144
3145 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3146 if (ret) {
3147 dev_err(&hdev->pdev->dev,
3148 "Config func_id enable failed for cmd_send, ret =%d.\n",
3149 ret);
3150 return ret;
3151 }
3152
3153 return 0;
3154 }
3155
3156 static int hclge_set_mta_table_item(struct hclge_vport *vport,
3157 u16 idx,
3158 bool enable)
3159 {
3160 struct hclge_dev *hdev = vport->back;
3161 struct hclge_cfg_func_mta_item *req;
3162 struct hclge_desc desc;
3163 int ret;
3164
3165 req = (struct hclge_cfg_func_mta_item *)desc.data;
3166 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
3167 hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
3168
3169 hnae_set_field(req->item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
3170 HCLGE_CFG_MTA_ITEM_IDX_S, idx);
3171 req->item_idx = cpu_to_le16(req->item_idx);
3172
3173 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3174 if (ret) {
3175 dev_err(&hdev->pdev->dev,
3176 "Config mta table item failed for cmd_send, ret =%d.\n",
3177 ret);
3178 return ret;
3179 }
3180
3181 return 0;
3182 }
3183
3184 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
3185 struct hclge_mac_vlan_tbl_entry *req)
3186 {
3187 struct hclge_dev *hdev = vport->back;
3188 struct hclge_desc desc;
3189 u8 resp_code;
3190 int ret;
3191
3192 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
3193
3194 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry));
3195
3196 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3197 if (ret) {
3198 dev_err(&hdev->pdev->dev,
3199 "del mac addr failed for cmd_send, ret =%d.\n",
3200 ret);
3201 return ret;
3202 }
3203 resp_code = (desc.data[0] >> 8) & 0xff;
3204
3205 return hclge_get_mac_vlan_cmd_status(vport, desc.retval, resp_code,
3206 HCLGE_MAC_VLAN_REMOVE);
3207 }
3208
3209 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
3210 struct hclge_mac_vlan_tbl_entry *req,
3211 struct hclge_desc *desc,
3212 bool is_mc)
3213 {
3214 struct hclge_dev *hdev = vport->back;
3215 u8 resp_code;
3216 int ret;
3217
3218 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
3219 if (is_mc) {
3220 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3221 memcpy(desc[0].data,
3222 req,
3223 sizeof(struct hclge_mac_vlan_tbl_entry));
3224 hclge_cmd_setup_basic_desc(&desc[1],
3225 HCLGE_OPC_MAC_VLAN_ADD,
3226 true);
3227 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3228 hclge_cmd_setup_basic_desc(&desc[2],
3229 HCLGE_OPC_MAC_VLAN_ADD,
3230 true);
3231 ret = hclge_cmd_send(&hdev->hw, desc, 3);
3232 } else {
3233 memcpy(desc[0].data,
3234 req,
3235 sizeof(struct hclge_mac_vlan_tbl_entry));
3236 ret = hclge_cmd_send(&hdev->hw, desc, 1);
3237 }
3238 if (ret) {
3239 dev_err(&hdev->pdev->dev,
3240 "lookup mac addr failed for cmd_send, ret =%d.\n",
3241 ret);
3242 return ret;
3243 }
3244 resp_code = (desc[0].data[0] >> 8) & 0xff;
3245
3246 return hclge_get_mac_vlan_cmd_status(vport, desc[0].retval, resp_code,
3247 HCLGE_MAC_VLAN_LKUP);
3248 }
3249
3250 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
3251 struct hclge_mac_vlan_tbl_entry *req,
3252 struct hclge_desc *mc_desc)
3253 {
3254 struct hclge_dev *hdev = vport->back;
3255 int cfg_status;
3256 u8 resp_code;
3257 int ret;
3258
3259 if (!mc_desc) {
3260 struct hclge_desc desc;
3261
3262 hclge_cmd_setup_basic_desc(&desc,
3263 HCLGE_OPC_MAC_VLAN_ADD,
3264 false);
3265 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry));
3266 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3267 resp_code = (desc.data[0] >> 8) & 0xff;
3268 cfg_status = hclge_get_mac_vlan_cmd_status(vport, desc.retval,
3269 resp_code,
3270 HCLGE_MAC_VLAN_ADD);
3271 } else {
3272 mc_desc[0].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
3273 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3274 mc_desc[1].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
3275 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3276 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
3277 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
3278 memcpy(mc_desc[0].data, req,
3279 sizeof(struct hclge_mac_vlan_tbl_entry));
3280 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
3281 resp_code = (mc_desc[0].data[0] >> 8) & 0xff;
3282 cfg_status = hclge_get_mac_vlan_cmd_status(vport,
3283 mc_desc[0].retval,
3284 resp_code,
3285 HCLGE_MAC_VLAN_ADD);
3286 }
3287
3288 if (ret) {
3289 dev_err(&hdev->pdev->dev,
3290 "add mac addr failed for cmd_send, ret =%d.\n",
3291 ret);
3292 return ret;
3293 }
3294
3295 return cfg_status;
3296 }
3297
3298 static int hclge_add_uc_addr(struct hnae3_handle *handle,
3299 const unsigned char *addr)
3300 {
3301 struct hclge_vport *vport = hclge_get_vport(handle);
3302
3303 return hclge_add_uc_addr_common(vport, addr);
3304 }
3305
3306 int hclge_add_uc_addr_common(struct hclge_vport *vport,
3307 const unsigned char *addr)
3308 {
3309 struct hclge_dev *hdev = vport->back;
3310 struct hclge_mac_vlan_tbl_entry req;
3311 enum hclge_cmd_status status;
3312
3313 /* mac addr check */
3314 if (is_zero_ether_addr(addr) ||
3315 is_broadcast_ether_addr(addr) ||
3316 is_multicast_ether_addr(addr)) {
3317 dev_err(&hdev->pdev->dev,
3318 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
3319 addr,
3320 is_zero_ether_addr(addr),
3321 is_broadcast_ether_addr(addr),
3322 is_multicast_ether_addr(addr));
3323 return -EINVAL;
3324 }
3325
3326 memset(&req, 0, sizeof(req));
3327 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3328 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3329 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0);
3330 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3331 hnae_set_bit(req.egress_port,
3332 HCLGE_MAC_EPORT_SW_EN_B, 0);
3333 hnae_set_bit(req.egress_port,
3334 HCLGE_MAC_EPORT_TYPE_B, 0);
3335 hnae_set_field(req.egress_port, HCLGE_MAC_EPORT_VFID_M,
3336 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
3337 hnae_set_field(req.egress_port, HCLGE_MAC_EPORT_PFID_M,
3338 HCLGE_MAC_EPORT_PFID_S, 0);
3339 req.egress_port = cpu_to_le16(req.egress_port);
3340
3341 hclge_prepare_mac_addr(&req, addr);
3342
3343 status = hclge_add_mac_vlan_tbl(vport, &req, NULL);
3344
3345 return status;
3346 }
3347
3348 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
3349 const unsigned char *addr)
3350 {
3351 struct hclge_vport *vport = hclge_get_vport(handle);
3352
3353 return hclge_rm_uc_addr_common(vport, addr);
3354 }
3355
3356 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
3357 const unsigned char *addr)
3358 {
3359 struct hclge_dev *hdev = vport->back;
3360 struct hclge_mac_vlan_tbl_entry req;
3361 enum hclge_cmd_status status;
3362
3363 /* mac addr check */
3364 if (is_zero_ether_addr(addr) ||
3365 is_broadcast_ether_addr(addr) ||
3366 is_multicast_ether_addr(addr)) {
3367 dev_dbg(&hdev->pdev->dev,
3368 "Remove mac err! invalid mac:%pM.\n",
3369 addr);
3370 return -EINVAL;
3371 }
3372
3373 memset(&req, 0, sizeof(req));
3374 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3375 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3376 hclge_prepare_mac_addr(&req, addr);
3377 status = hclge_remove_mac_vlan_tbl(vport, &req);
3378
3379 return status;
3380 }
3381
3382 static int hclge_add_mc_addr(struct hnae3_handle *handle,
3383 const unsigned char *addr)
3384 {
3385 struct hclge_vport *vport = hclge_get_vport(handle);
3386
3387 return hclge_add_mc_addr_common(vport, addr);
3388 }
3389
3390 int hclge_add_mc_addr_common(struct hclge_vport *vport,
3391 const unsigned char *addr)
3392 {
3393 struct hclge_dev *hdev = vport->back;
3394 struct hclge_mac_vlan_tbl_entry req;
3395 struct hclge_desc desc[3];
3396 u16 tbl_idx;
3397 int status;
3398
3399 /* mac addr check */
3400 if (!is_multicast_ether_addr(addr)) {
3401 dev_err(&hdev->pdev->dev,
3402 "Add mc mac err! invalid mac:%pM.\n",
3403 addr);
3404 return -EINVAL;
3405 }
3406 memset(&req, 0, sizeof(req));
3407 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3408 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3409 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
3410 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3411 hclge_prepare_mac_addr(&req, addr);
3412 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
3413 if (!status) {
3414 /* This mac addr exist, update VFID for it */
3415 hclge_update_desc_vfid(desc, vport->vport_id, false);
3416 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
3417 } else {
3418 /* This mac addr do not exist, add new entry for it */
3419 memset(desc[0].data, 0, sizeof(desc[0].data));
3420 memset(desc[1].data, 0, sizeof(desc[0].data));
3421 memset(desc[2].data, 0, sizeof(desc[0].data));
3422 hclge_update_desc_vfid(desc, vport->vport_id, false);
3423 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
3424 }
3425
3426 /* Set MTA table for this MAC address */
3427 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
3428 status = hclge_set_mta_table_item(vport, tbl_idx, true);
3429
3430 return status;
3431 }
3432
3433 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
3434 const unsigned char *addr)
3435 {
3436 struct hclge_vport *vport = hclge_get_vport(handle);
3437
3438 return hclge_rm_mc_addr_common(vport, addr);
3439 }
3440
3441 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
3442 const unsigned char *addr)
3443 {
3444 struct hclge_dev *hdev = vport->back;
3445 struct hclge_mac_vlan_tbl_entry req;
3446 enum hclge_cmd_status status;
3447 struct hclge_desc desc[3];
3448 u16 tbl_idx;
3449
3450 /* mac addr check */
3451 if (!is_multicast_ether_addr(addr)) {
3452 dev_dbg(&hdev->pdev->dev,
3453 "Remove mc mac err! invalid mac:%pM.\n",
3454 addr);
3455 return -EINVAL;
3456 }
3457
3458 memset(&req, 0, sizeof(req));
3459 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3460 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3461 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
3462 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3463 hclge_prepare_mac_addr(&req, addr);
3464 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
3465 if (!status) {
3466 /* This mac addr exist, remove this handle's VFID for it */
3467 hclge_update_desc_vfid(desc, vport->vport_id, true);
3468
3469 if (hclge_is_all_function_id_zero(desc))
3470 /* All the vfid is zero, so need to delete this entry */
3471 status = hclge_remove_mac_vlan_tbl(vport, &req);
3472 else
3473 /* Not all the vfid is zero, update the vfid */
3474 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
3475
3476 } else {
3477 /* This mac addr do not exist, can't delete it */
3478 dev_err(&hdev->pdev->dev,
3479 "Rm multicast mac addr failed, ret = %d.\n",
3480 status);
3481 return -EIO;
3482 }
3483
3484 /* Set MTB table for this MAC address */
3485 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
3486 status = hclge_set_mta_table_item(vport, tbl_idx, false);
3487
3488 return status;
3489 }
3490
3491 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
3492 {
3493 struct hclge_vport *vport = hclge_get_vport(handle);
3494 struct hclge_dev *hdev = vport->back;
3495
3496 ether_addr_copy(p, hdev->hw.mac.mac_addr);
3497 }
3498
3499 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
3500 {
3501 const unsigned char *new_addr = (const unsigned char *)p;
3502 struct hclge_vport *vport = hclge_get_vport(handle);
3503 struct hclge_dev *hdev = vport->back;
3504
3505 /* mac addr check */
3506 if (is_zero_ether_addr(new_addr) ||
3507 is_broadcast_ether_addr(new_addr) ||
3508 is_multicast_ether_addr(new_addr)) {
3509 dev_err(&hdev->pdev->dev,
3510 "Change uc mac err! invalid mac:%p.\n",
3511 new_addr);
3512 return -EINVAL;
3513 }
3514
3515 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
3516
3517 if (!hclge_add_uc_addr(handle, new_addr)) {
3518 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
3519 return 0;
3520 }
3521
3522 return -EIO;
3523 }
3524
3525 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
3526 bool filter_en)
3527 {
3528 struct hclge_vlan_filter_ctrl *req;
3529 struct hclge_desc desc;
3530 int ret;
3531
3532 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
3533
3534 req = (struct hclge_vlan_filter_ctrl *)desc.data;
3535 req->vlan_type = vlan_type;
3536 req->vlan_fe = filter_en;
3537
3538 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3539 if (ret) {
3540 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
3541 ret);
3542 return ret;
3543 }
3544
3545 return 0;
3546 }
3547
3548 int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
3549 bool is_kill, u16 vlan, u8 qos, __be16 proto)
3550 {
3551 #define HCLGE_MAX_VF_BYTES 16
3552 struct hclge_vlan_filter_vf_cfg *req0;
3553 struct hclge_vlan_filter_vf_cfg *req1;
3554 struct hclge_desc desc[2];
3555 u8 vf_byte_val;
3556 u8 vf_byte_off;
3557 int ret;
3558
3559 hclge_cmd_setup_basic_desc(&desc[0],
3560 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
3561 hclge_cmd_setup_basic_desc(&desc[1],
3562 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
3563
3564 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3565
3566 vf_byte_off = vfid / 8;
3567 vf_byte_val = 1 << (vfid % 8);
3568
3569 req0 = (struct hclge_vlan_filter_vf_cfg *)desc[0].data;
3570 req1 = (struct hclge_vlan_filter_vf_cfg *)desc[1].data;
3571
3572 req0->vlan_id = vlan;
3573 req0->vlan_cfg = is_kill;
3574
3575 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
3576 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
3577 else
3578 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
3579
3580 ret = hclge_cmd_send(&hdev->hw, desc, 2);
3581 if (ret) {
3582 dev_err(&hdev->pdev->dev,
3583 "Send vf vlan command fail, ret =%d.\n",
3584 ret);
3585 return ret;
3586 }
3587
3588 if (!is_kill) {
3589 if (!req0->resp_code || req0->resp_code == 1)
3590 return 0;
3591
3592 dev_err(&hdev->pdev->dev,
3593 "Add vf vlan filter fail, ret =%d.\n",
3594 req0->resp_code);
3595 } else {
3596 if (!req0->resp_code)
3597 return 0;
3598
3599 dev_err(&hdev->pdev->dev,
3600 "Kill vf vlan filter fail, ret =%d.\n",
3601 req0->resp_code);
3602 }
3603
3604 return -EIO;
3605 }
3606
3607 static int hclge_set_port_vlan_filter(struct hnae3_handle *handle,
3608 __be16 proto, u16 vlan_id,
3609 bool is_kill)
3610 {
3611 struct hclge_vport *vport = hclge_get_vport(handle);
3612 struct hclge_dev *hdev = vport->back;
3613 struct hclge_vlan_filter_pf_cfg *req;
3614 struct hclge_desc desc;
3615 u8 vlan_offset_byte_val;
3616 u8 vlan_offset_byte;
3617 u8 vlan_offset_160;
3618 int ret;
3619
3620 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
3621
3622 vlan_offset_160 = vlan_id / 160;
3623 vlan_offset_byte = (vlan_id % 160) / 8;
3624 vlan_offset_byte_val = 1 << (vlan_id % 8);
3625
3626 req = (struct hclge_vlan_filter_pf_cfg *)desc.data;
3627 req->vlan_offset = vlan_offset_160;
3628 req->vlan_cfg = is_kill;
3629 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
3630
3631 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3632 if (ret) {
3633 dev_err(&hdev->pdev->dev,
3634 "port vlan command, send fail, ret =%d.\n",
3635 ret);
3636 return ret;
3637 }
3638
3639 ret = hclge_set_vf_vlan_common(hdev, 0, is_kill, vlan_id, 0, proto);
3640 if (ret) {
3641 dev_err(&hdev->pdev->dev,
3642 "Set pf vlan filter config fail, ret =%d.\n",
3643 ret);
3644 return -EIO;
3645 }
3646
3647 return 0;
3648 }
3649
3650 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
3651 u16 vlan, u8 qos, __be16 proto)
3652 {
3653 struct hclge_vport *vport = hclge_get_vport(handle);
3654 struct hclge_dev *hdev = vport->back;
3655
3656 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
3657 return -EINVAL;
3658 if (proto != htons(ETH_P_8021Q))
3659 return -EPROTONOSUPPORT;
3660
3661 return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto);
3662 }
3663
3664 static int hclge_init_vlan_config(struct hclge_dev *hdev)
3665 {
3666 #define HCLGE_VLAN_TYPE_VF_TABLE 0
3667 #define HCLGE_VLAN_TYPE_PORT_TABLE 1
3668 int ret;
3669
3670 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE,
3671 true);
3672 if (ret)
3673 return ret;
3674
3675 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE,
3676 true);
3677
3678 return ret;
3679 }
3680
3681 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
3682 {
3683 struct hclge_vport *vport = hclge_get_vport(handle);
3684 struct hclge_config_max_frm_size *req;
3685 struct hclge_dev *hdev = vport->back;
3686 struct hclge_desc desc;
3687 int ret;
3688
3689 if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU))
3690 return -EINVAL;
3691
3692 hdev->mps = new_mtu;
3693 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
3694
3695 req = (struct hclge_config_max_frm_size *)desc.data;
3696 req->max_frm_size = cpu_to_le16(new_mtu);
3697
3698 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3699 if (ret) {
3700 dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
3701 return ret;
3702 }
3703
3704 return 0;
3705 }
3706
3707 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
3708 bool enable)
3709 {
3710 struct hclge_reset_tqp_queue *req;
3711 struct hclge_desc desc;
3712 int ret;
3713
3714 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
3715
3716 req = (struct hclge_reset_tqp_queue *)desc.data;
3717 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
3718 hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
3719
3720 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3721 if (ret) {
3722 dev_err(&hdev->pdev->dev,
3723 "Send tqp reset cmd error, status =%d\n", ret);
3724 return ret;
3725 }
3726
3727 return 0;
3728 }
3729
3730 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
3731 {
3732 struct hclge_reset_tqp_queue *req;
3733 struct hclge_desc desc;
3734 int ret;
3735
3736 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
3737
3738 req = (struct hclge_reset_tqp_queue *)desc.data;
3739 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
3740
3741 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3742 if (ret) {
3743 dev_err(&hdev->pdev->dev,
3744 "Get reset status error, status =%d\n", ret);
3745 return ret;
3746 }
3747
3748 return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
3749 }
3750
3751 static void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
3752 {
3753 struct hclge_vport *vport = hclge_get_vport(handle);
3754 struct hclge_dev *hdev = vport->back;
3755 int reset_try_times = 0;
3756 int reset_status;
3757 int ret;
3758
3759 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
3760 if (ret) {
3761 dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
3762 return;
3763 }
3764
3765 ret = hclge_send_reset_tqp_cmd(hdev, queue_id, true);
3766 if (ret) {
3767 dev_warn(&hdev->pdev->dev,
3768 "Send reset tqp cmd fail, ret = %d\n", ret);
3769 return;
3770 }
3771
3772 reset_try_times = 0;
3773 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
3774 /* Wait for tqp hw reset */
3775 msleep(20);
3776 reset_status = hclge_get_reset_status(hdev, queue_id);
3777 if (reset_status)
3778 break;
3779 }
3780
3781 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
3782 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
3783 return;
3784 }
3785
3786 ret = hclge_send_reset_tqp_cmd(hdev, queue_id, false);
3787 if (ret) {
3788 dev_warn(&hdev->pdev->dev,
3789 "Deassert the soft reset fail, ret = %d\n", ret);
3790 return;
3791 }
3792 }
3793
3794 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
3795 {
3796 struct hclge_vport *vport = hclge_get_vport(handle);
3797 struct hclge_dev *hdev = vport->back;
3798
3799 return hdev->fw_version;
3800 }
3801
3802 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
3803 u32 *rx_en, u32 *tx_en)
3804 {
3805 struct hclge_vport *vport = hclge_get_vport(handle);
3806 struct hclge_dev *hdev = vport->back;
3807
3808 *auto_neg = hclge_get_autoneg(handle);
3809
3810 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
3811 *rx_en = 0;
3812 *tx_en = 0;
3813 return;
3814 }
3815
3816 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
3817 *rx_en = 1;
3818 *tx_en = 0;
3819 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
3820 *tx_en = 1;
3821 *rx_en = 0;
3822 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
3823 *rx_en = 1;
3824 *tx_en = 1;
3825 } else {
3826 *rx_en = 0;
3827 *tx_en = 0;
3828 }
3829 }
3830
3831 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
3832 u8 *auto_neg, u32 *speed, u8 *duplex)
3833 {
3834 struct hclge_vport *vport = hclge_get_vport(handle);
3835 struct hclge_dev *hdev = vport->back;
3836
3837 if (speed)
3838 *speed = hdev->hw.mac.speed;
3839 if (duplex)
3840 *duplex = hdev->hw.mac.duplex;
3841 if (auto_neg)
3842 *auto_neg = hdev->hw.mac.autoneg;
3843 }
3844
3845 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
3846 {
3847 struct hclge_vport *vport = hclge_get_vport(handle);
3848 struct hclge_dev *hdev = vport->back;
3849
3850 if (media_type)
3851 *media_type = hdev->hw.mac.media_type;
3852 }
3853
3854 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
3855 u8 *tp_mdix_ctrl, u8 *tp_mdix)
3856 {
3857 struct hclge_vport *vport = hclge_get_vport(handle);
3858 struct hclge_dev *hdev = vport->back;
3859 struct phy_device *phydev = hdev->hw.mac.phydev;
3860 int mdix_ctrl, mdix, retval, is_resolved;
3861
3862 if (!phydev) {
3863 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
3864 *tp_mdix = ETH_TP_MDI_INVALID;
3865 return;
3866 }
3867
3868 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
3869
3870 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
3871 mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
3872 HCLGE_PHY_MDIX_CTRL_S);
3873
3874 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
3875 mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
3876 is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
3877
3878 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
3879
3880 switch (mdix_ctrl) {
3881 case 0x0:
3882 *tp_mdix_ctrl = ETH_TP_MDI;
3883 break;
3884 case 0x1:
3885 *tp_mdix_ctrl = ETH_TP_MDI_X;
3886 break;
3887 case 0x3:
3888 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
3889 break;
3890 default:
3891 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
3892 break;
3893 }
3894
3895 if (!is_resolved)
3896 *tp_mdix = ETH_TP_MDI_INVALID;
3897 else if (mdix)
3898 *tp_mdix = ETH_TP_MDI_X;
3899 else
3900 *tp_mdix = ETH_TP_MDI;
3901 }
3902
3903 static int hclge_init_client_instance(struct hnae3_client *client,
3904 struct hnae3_ae_dev *ae_dev)
3905 {
3906 struct hclge_dev *hdev = ae_dev->priv;
3907 struct hclge_vport *vport;
3908 int i, ret;
3909
3910 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3911 vport = &hdev->vport[i];
3912
3913 switch (client->type) {
3914 case HNAE3_CLIENT_KNIC:
3915
3916 hdev->nic_client = client;
3917 vport->nic.client = client;
3918 ret = client->ops->init_instance(&vport->nic);
3919 if (ret)
3920 goto err;
3921
3922 if (hdev->roce_client &&
3923 hnae_get_bit(hdev->ae_dev->flag,
3924 HNAE_DEV_SUPPORT_ROCE_B)) {
3925 struct hnae3_client *rc = hdev->roce_client;
3926
3927 ret = hclge_init_roce_base_info(vport);
3928 if (ret)
3929 goto err;
3930
3931 ret = rc->ops->init_instance(&vport->roce);
3932 if (ret)
3933 goto err;
3934 }
3935
3936 break;
3937 case HNAE3_CLIENT_UNIC:
3938 hdev->nic_client = client;
3939 vport->nic.client = client;
3940
3941 ret = client->ops->init_instance(&vport->nic);
3942 if (ret)
3943 goto err;
3944
3945 break;
3946 case HNAE3_CLIENT_ROCE:
3947 if (hnae_get_bit(hdev->ae_dev->flag,
3948 HNAE_DEV_SUPPORT_ROCE_B)) {
3949 hdev->roce_client = client;
3950 vport->roce.client = client;
3951 }
3952
3953 if (hdev->roce_client) {
3954 ret = hclge_init_roce_base_info(vport);
3955 if (ret)
3956 goto err;
3957
3958 ret = client->ops->init_instance(&vport->roce);
3959 if (ret)
3960 goto err;
3961 }
3962 }
3963 }
3964
3965 return 0;
3966 err:
3967 return ret;
3968 }
3969
3970 static void hclge_uninit_client_instance(struct hnae3_client *client,
3971 struct hnae3_ae_dev *ae_dev)
3972 {
3973 struct hclge_dev *hdev = ae_dev->priv;
3974 struct hclge_vport *vport;
3975 int i;
3976
3977 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3978 vport = &hdev->vport[i];
3979 if (hdev->roce_client)
3980 hdev->roce_client->ops->uninit_instance(&vport->roce,
3981 0);
3982 if (client->type == HNAE3_CLIENT_ROCE)
3983 return;
3984 if (client->ops->uninit_instance)
3985 client->ops->uninit_instance(&vport->nic, 0);
3986 }
3987 }
3988
3989 static int hclge_pci_init(struct hclge_dev *hdev)
3990 {
3991 struct pci_dev *pdev = hdev->pdev;
3992 struct hclge_hw *hw;
3993 int ret;
3994
3995 ret = pci_enable_device(pdev);
3996 if (ret) {
3997 dev_err(&pdev->dev, "failed to enable PCI device\n");
3998 goto err_no_drvdata;
3999 }
4000
4001 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4002 if (ret) {
4003 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4004 if (ret) {
4005 dev_err(&pdev->dev,
4006 "can't set consistent PCI DMA");
4007 goto err_disable_device;
4008 }
4009 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
4010 }
4011
4012 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
4013 if (ret) {
4014 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
4015 goto err_disable_device;
4016 }
4017
4018 pci_set_master(pdev);
4019 hw = &hdev->hw;
4020 hw->back = hdev;
4021 hw->io_base = pcim_iomap(pdev, 2, 0);
4022 if (!hw->io_base) {
4023 dev_err(&pdev->dev, "Can't map configuration register space\n");
4024 ret = -ENOMEM;
4025 goto err_clr_master;
4026 }
4027
4028 return 0;
4029 err_clr_master:
4030 pci_clear_master(pdev);
4031 pci_release_regions(pdev);
4032 err_disable_device:
4033 pci_disable_device(pdev);
4034 err_no_drvdata:
4035 pci_set_drvdata(pdev, NULL);
4036
4037 return ret;
4038 }
4039
4040 static void hclge_pci_uninit(struct hclge_dev *hdev)
4041 {
4042 struct pci_dev *pdev = hdev->pdev;
4043
4044 if (hdev->flag & HCLGE_FLAG_USE_MSIX) {
4045 pci_disable_msix(pdev);
4046 devm_kfree(&pdev->dev, hdev->msix_entries);
4047 hdev->msix_entries = NULL;
4048 } else {
4049 pci_disable_msi(pdev);
4050 }
4051
4052 pci_clear_master(pdev);
4053 pci_release_mem_regions(pdev);
4054 pci_disable_device(pdev);
4055 }
4056
4057 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
4058 {
4059 struct pci_dev *pdev = ae_dev->pdev;
4060 const struct pci_device_id *id;
4061 struct hclge_dev *hdev;
4062 int ret;
4063
4064 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
4065 if (!hdev) {
4066 ret = -ENOMEM;
4067 goto err_hclge_dev;
4068 }
4069
4070 hdev->flag |= HCLGE_FLAG_USE_MSIX;
4071 hdev->pdev = pdev;
4072 hdev->ae_dev = ae_dev;
4073 ae_dev->priv = hdev;
4074
4075 id = pci_match_id(roce_pci_tbl, ae_dev->pdev);
4076 if (id)
4077 hnae_set_bit(ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B, 1);
4078
4079 ret = hclge_pci_init(hdev);
4080 if (ret) {
4081 dev_err(&pdev->dev, "PCI init failed\n");
4082 goto err_pci_init;
4083 }
4084
4085 /* Command queue initialize */
4086 ret = hclge_cmd_init(hdev);
4087 if (ret)
4088 goto err_cmd_init;
4089
4090 ret = hclge_get_cap(hdev);
4091 if (ret) {
4092 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
4093 ret);
4094 return ret;
4095 }
4096
4097 ret = hclge_configure(hdev);
4098 if (ret) {
4099 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
4100 return ret;
4101 }
4102
4103 if (hdev->flag & HCLGE_FLAG_USE_MSIX)
4104 ret = hclge_init_msix(hdev);
4105 else
4106 ret = hclge_init_msi(hdev);
4107 if (ret) {
4108 dev_err(&pdev->dev, "Init msix/msi error, ret = %d.\n", ret);
4109 return ret;
4110 }
4111
4112 ret = hclge_alloc_tqps(hdev);
4113 if (ret) {
4114 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
4115 return ret;
4116 }
4117
4118 ret = hclge_alloc_vport(hdev);
4119 if (ret) {
4120 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
4121 return ret;
4122 }
4123
4124 ret = hclge_mac_init(hdev);
4125 if (ret) {
4126 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
4127 return ret;
4128 }
4129 ret = hclge_buffer_alloc(hdev);
4130 if (ret) {
4131 dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret);
4132 return ret;
4133 }
4134
4135 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
4136 if (ret) {
4137 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
4138 return ret;
4139 }
4140
4141 ret = hclge_rss_init_hw(hdev);
4142 if (ret) {
4143 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
4144 return ret;
4145 }
4146
4147 ret = hclge_init_vlan_config(hdev);
4148 if (ret) {
4149 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
4150 return ret;
4151 }
4152
4153 ret = hclge_tm_schd_init(hdev);
4154 if (ret) {
4155 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
4156 return ret;
4157 }
4158
4159 setup_timer(&hdev->service_timer, hclge_service_timer,
4160 (unsigned long)hdev);
4161 INIT_WORK(&hdev->service_task, hclge_service_task);
4162
4163 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
4164 set_bit(HCLGE_STATE_DOWN, &hdev->state);
4165
4166 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
4167 return 0;
4168
4169 err_cmd_init:
4170 pci_release_regions(pdev);
4171 err_pci_init:
4172 pci_set_drvdata(pdev, NULL);
4173 err_hclge_dev:
4174 return ret;
4175 }
4176
4177 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
4178 {
4179 struct hclge_dev *hdev = ae_dev->priv;
4180 struct hclge_mac *mac = &hdev->hw.mac;
4181
4182 set_bit(HCLGE_STATE_DOWN, &hdev->state);
4183
4184 if (IS_ENABLED(CONFIG_PCI_IOV))
4185 hclge_disable_sriov(hdev);
4186
4187 if (hdev->service_timer.data)
4188 del_timer_sync(&hdev->service_timer);
4189 if (hdev->service_task.func)
4190 cancel_work_sync(&hdev->service_task);
4191
4192 if (mac->phydev)
4193 mdiobus_unregister(mac->mdio_bus);
4194
4195 hclge_destroy_cmd_queue(&hdev->hw);
4196 hclge_pci_uninit(hdev);
4197 ae_dev->priv = NULL;
4198 }
4199
4200 static const struct hnae3_ae_ops hclge_ops = {
4201 .init_ae_dev = hclge_init_ae_dev,
4202 .uninit_ae_dev = hclge_uninit_ae_dev,
4203 .init_client_instance = hclge_init_client_instance,
4204 .uninit_client_instance = hclge_uninit_client_instance,
4205 .map_ring_to_vector = hclge_map_handle_ring_to_vector,
4206 .unmap_ring_from_vector = hclge_unmap_ring_from_vector,
4207 .get_vector = hclge_get_vector,
4208 .set_promisc_mode = hclge_set_promisc_mode,
4209 .start = hclge_ae_start,
4210 .stop = hclge_ae_stop,
4211 .get_status = hclge_get_status,
4212 .get_ksettings_an_result = hclge_get_ksettings_an_result,
4213 .update_speed_duplex_h = hclge_update_speed_duplex_h,
4214 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
4215 .get_media_type = hclge_get_media_type,
4216 .get_rss_key_size = hclge_get_rss_key_size,
4217 .get_rss_indir_size = hclge_get_rss_indir_size,
4218 .get_rss = hclge_get_rss,
4219 .set_rss = hclge_set_rss,
4220 .get_tc_size = hclge_get_tc_size,
4221 .get_mac_addr = hclge_get_mac_addr,
4222 .set_mac_addr = hclge_set_mac_addr,
4223 .add_uc_addr = hclge_add_uc_addr,
4224 .rm_uc_addr = hclge_rm_uc_addr,
4225 .add_mc_addr = hclge_add_mc_addr,
4226 .rm_mc_addr = hclge_rm_mc_addr,
4227 .set_autoneg = hclge_set_autoneg,
4228 .get_autoneg = hclge_get_autoneg,
4229 .get_pauseparam = hclge_get_pauseparam,
4230 .set_mtu = hclge_set_mtu,
4231 .reset_queue = hclge_reset_tqp,
4232 .get_stats = hclge_get_stats,
4233 .update_stats = hclge_update_stats,
4234 .get_strings = hclge_get_strings,
4235 .get_sset_count = hclge_get_sset_count,
4236 .get_fw_version = hclge_get_fw_version,
4237 .get_mdix_mode = hclge_get_mdix_mode,
4238 .set_vlan_filter = hclge_set_port_vlan_filter,
4239 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
4240 };
4241
4242 static struct hnae3_ae_algo ae_algo = {
4243 .ops = &hclge_ops,
4244 .name = HCLGE_NAME,
4245 .pdev_id_table = ae_algo_pci_tbl,
4246 };
4247
4248 static int hclge_init(void)
4249 {
4250 pr_info("%s is initializing\n", HCLGE_NAME);
4251
4252 return hnae3_register_ae_algo(&ae_algo);
4253 }
4254
4255 static void hclge_exit(void)
4256 {
4257 hnae3_unregister_ae_algo(&ae_algo);
4258 }
4259 module_init(hclge_init);
4260 module_exit(hclge_exit);
4261
4262 MODULE_LICENSE("GPL");
4263 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
4264 MODULE_DESCRIPTION("HCLGE Driver");
4265 MODULE_VERSION(HCLGE_MOD_VERSION);