]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
net: seeq: fix timer conversion
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
CommitLineData
46a3df9f
S
1/*
2 * Copyright (c) 2016-2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/acpi.h>
11#include <linux/device.h>
12#include <linux/etherdevice.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/netdevice.h>
18#include <linux/pci.h>
19#include <linux/platform_device.h>
20
21#include "hclge_cmd.h"
cacde272 22#include "hclge_dcb.h"
46a3df9f
S
23#include "hclge_main.h"
24#include "hclge_mdio.h"
25#include "hclge_tm.h"
26#include "hnae3.h"
27
28#define HCLGE_NAME "hclge"
29#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
30#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
31#define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
32#define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
33
46a3df9f
S
34static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
35 enum hclge_mta_dmac_sel_type mta_mac_sel,
36 bool enable);
37static int hclge_init_vlan_config(struct hclge_dev *hdev);
38
39static struct hnae3_ae_algo ae_algo;
40
41static const struct pci_device_id ae_algo_pci_tbl[] = {
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
e92a0843 49 /* required last entry */
46a3df9f
S
50 {0, }
51};
52
53static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
54 "Mac Loopback test",
55 "Serdes Loopback test",
56 "Phy Loopback test"
57};
58
59static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = {
60 {"igu_rx_oversize_pkt",
61 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)},
62 {"igu_rx_undersize_pkt",
63 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)},
64 {"igu_rx_out_all_pkt",
65 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)},
66 {"igu_rx_uni_pkt",
67 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)},
68 {"igu_rx_multi_pkt",
69 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)},
70 {"igu_rx_broad_pkt",
71 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)},
72 {"egu_tx_out_all_pkt",
73 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)},
74 {"egu_tx_uni_pkt",
75 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)},
76 {"egu_tx_multi_pkt",
77 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)},
78 {"egu_tx_broad_pkt",
79 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)},
80 {"ssu_ppp_mac_key_num",
81 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)},
82 {"ssu_ppp_host_key_num",
83 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)},
84 {"ppp_ssu_mac_rlt_num",
85 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)},
86 {"ppp_ssu_host_rlt_num",
87 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)},
88 {"ssu_tx_in_num",
89 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)},
90 {"ssu_tx_out_num",
91 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)},
92 {"ssu_rx_in_num",
93 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)},
94 {"ssu_rx_out_num",
95 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)}
96};
97
98static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = {
99 {"igu_rx_err_pkt",
100 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)},
101 {"igu_rx_no_eof_pkt",
102 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)},
103 {"igu_rx_no_sof_pkt",
104 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)},
105 {"egu_tx_1588_pkt",
106 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)},
107 {"ssu_full_drop_num",
108 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)},
109 {"ssu_part_drop_num",
110 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)},
111 {"ppp_key_drop_num",
112 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)},
113 {"ppp_rlt_drop_num",
114 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)},
115 {"ssu_key_drop_num",
116 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)},
117 {"pkt_curr_buf_cnt",
118 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)},
119 {"qcn_fb_rcv_cnt",
120 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)},
121 {"qcn_fb_drop_cnt",
122 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)},
123 {"qcn_fb_invaild_cnt",
124 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)},
125 {"rx_packet_tc0_in_cnt",
126 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)},
127 {"rx_packet_tc1_in_cnt",
128 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)},
129 {"rx_packet_tc2_in_cnt",
130 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)},
131 {"rx_packet_tc3_in_cnt",
132 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)},
133 {"rx_packet_tc4_in_cnt",
134 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)},
135 {"rx_packet_tc5_in_cnt",
136 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)},
137 {"rx_packet_tc6_in_cnt",
138 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)},
139 {"rx_packet_tc7_in_cnt",
140 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)},
141 {"rx_packet_tc0_out_cnt",
142 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)},
143 {"rx_packet_tc1_out_cnt",
144 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)},
145 {"rx_packet_tc2_out_cnt",
146 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)},
147 {"rx_packet_tc3_out_cnt",
148 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)},
149 {"rx_packet_tc4_out_cnt",
150 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)},
151 {"rx_packet_tc5_out_cnt",
152 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)},
153 {"rx_packet_tc6_out_cnt",
154 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)},
155 {"rx_packet_tc7_out_cnt",
156 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)},
157 {"tx_packet_tc0_in_cnt",
158 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)},
159 {"tx_packet_tc1_in_cnt",
160 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)},
161 {"tx_packet_tc2_in_cnt",
162 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)},
163 {"tx_packet_tc3_in_cnt",
164 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)},
165 {"tx_packet_tc4_in_cnt",
166 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)},
167 {"tx_packet_tc5_in_cnt",
168 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)},
169 {"tx_packet_tc6_in_cnt",
170 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)},
171 {"tx_packet_tc7_in_cnt",
172 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)},
173 {"tx_packet_tc0_out_cnt",
174 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)},
175 {"tx_packet_tc1_out_cnt",
176 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)},
177 {"tx_packet_tc2_out_cnt",
178 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)},
179 {"tx_packet_tc3_out_cnt",
180 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)},
181 {"tx_packet_tc4_out_cnt",
182 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)},
183 {"tx_packet_tc5_out_cnt",
184 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)},
185 {"tx_packet_tc6_out_cnt",
186 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)},
187 {"tx_packet_tc7_out_cnt",
188 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)},
189 {"pkt_curr_buf_tc0_cnt",
190 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)},
191 {"pkt_curr_buf_tc1_cnt",
192 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)},
193 {"pkt_curr_buf_tc2_cnt",
194 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)},
195 {"pkt_curr_buf_tc3_cnt",
196 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)},
197 {"pkt_curr_buf_tc4_cnt",
198 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)},
199 {"pkt_curr_buf_tc5_cnt",
200 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)},
201 {"pkt_curr_buf_tc6_cnt",
202 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)},
203 {"pkt_curr_buf_tc7_cnt",
204 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)},
205 {"mb_uncopy_num",
206 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)},
207 {"lo_pri_unicast_rlt_drop_num",
208 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)},
209 {"hi_pri_multicast_rlt_drop_num",
210 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)},
211 {"lo_pri_multicast_rlt_drop_num",
212 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)},
213 {"rx_oq_drop_pkt_cnt",
214 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)},
215 {"tx_oq_drop_pkt_cnt",
216 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)},
217 {"nic_l2_err_drop_pkt_cnt",
218 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)},
219 {"roc_l2_err_drop_pkt_cnt",
220 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)}
221};
222
223static const struct hclge_comm_stats_str g_mac_stats_string[] = {
224 {"mac_tx_mac_pause_num",
225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
226 {"mac_rx_mac_pause_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
228 {"mac_tx_pfc_pri0_pkt_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
230 {"mac_tx_pfc_pri1_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
232 {"mac_tx_pfc_pri2_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
234 {"mac_tx_pfc_pri3_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
236 {"mac_tx_pfc_pri4_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
238 {"mac_tx_pfc_pri5_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
240 {"mac_tx_pfc_pri6_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
242 {"mac_tx_pfc_pri7_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
244 {"mac_rx_pfc_pri0_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
246 {"mac_rx_pfc_pri1_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
248 {"mac_rx_pfc_pri2_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
250 {"mac_rx_pfc_pri3_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
252 {"mac_rx_pfc_pri4_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
254 {"mac_rx_pfc_pri5_pkt_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
256 {"mac_rx_pfc_pri6_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
258 {"mac_rx_pfc_pri7_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
260 {"mac_tx_total_pkt_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
262 {"mac_tx_total_oct_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
264 {"mac_tx_good_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
266 {"mac_tx_bad_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
268 {"mac_tx_good_oct_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
270 {"mac_tx_bad_oct_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
272 {"mac_tx_uni_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
274 {"mac_tx_multi_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
276 {"mac_tx_broad_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
278 {"mac_tx_undersize_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
280 {"mac_tx_overrsize_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)},
282 {"mac_tx_64_oct_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
284 {"mac_tx_65_127_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
286 {"mac_tx_128_255_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
288 {"mac_tx_256_511_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
290 {"mac_tx_512_1023_oct_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
292 {"mac_tx_1024_1518_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
294 {"mac_tx_1519_max_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)},
296 {"mac_rx_total_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
298 {"mac_rx_total_oct_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
300 {"mac_rx_good_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
302 {"mac_rx_bad_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
304 {"mac_rx_good_oct_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
306 {"mac_rx_bad_oct_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
308 {"mac_rx_uni_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
310 {"mac_rx_multi_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
312 {"mac_rx_broad_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
314 {"mac_rx_undersize_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
316 {"mac_rx_overrsize_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)},
318 {"mac_rx_64_oct_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
320 {"mac_rx_65_127_oct_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
322 {"mac_rx_128_255_oct_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
324 {"mac_rx_256_511_oct_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
326 {"mac_rx_512_1023_oct_pkt_num",
327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
328 {"mac_rx_1024_1518_oct_pkt_num",
329 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
330 {"mac_rx_1519_max_oct_pkt_num",
331 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)},
332
333 {"mac_trans_fragment_pkt_num",
334 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)},
335 {"mac_trans_undermin_pkt_num",
336 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)},
337 {"mac_trans_jabber_pkt_num",
338 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)},
339 {"mac_trans_err_all_pkt_num",
340 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)},
341 {"mac_trans_from_app_good_pkt_num",
342 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)},
343 {"mac_trans_from_app_bad_pkt_num",
344 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)},
345 {"mac_rcv_fragment_pkt_num",
346 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)},
347 {"mac_rcv_undermin_pkt_num",
348 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)},
349 {"mac_rcv_jabber_pkt_num",
350 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)},
351 {"mac_rcv_fcs_err_pkt_num",
352 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)},
353 {"mac_rcv_send_app_good_pkt_num",
354 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)},
355 {"mac_rcv_send_app_bad_pkt_num",
356 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)}
357};
358
359static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
360{
361#define HCLGE_64_BIT_CMD_NUM 5
362#define HCLGE_64_BIT_RTN_DATANUM 4
363 u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats);
364 struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM];
a90bb9a5 365 __le64 *desc_data;
46a3df9f
S
366 int i, k, n;
367 int ret;
368
369 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true);
370 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM);
371 if (ret) {
372 dev_err(&hdev->pdev->dev,
373 "Get 64 bit pkt stats fail, status = %d.\n", ret);
374 return ret;
375 }
376
377 for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) {
378 if (unlikely(i == 0)) {
a90bb9a5 379 desc_data = (__le64 *)(&desc[i].data[0]);
46a3df9f
S
380 n = HCLGE_64_BIT_RTN_DATANUM - 1;
381 } else {
a90bb9a5 382 desc_data = (__le64 *)(&desc[i]);
46a3df9f
S
383 n = HCLGE_64_BIT_RTN_DATANUM;
384 }
385 for (k = 0; k < n; k++) {
a90bb9a5 386 *data++ += le64_to_cpu(*desc_data);
46a3df9f
S
387 desc_data++;
388 }
389 }
390
391 return 0;
392}
393
394static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats)
395{
396 stats->pkt_curr_buf_cnt = 0;
397 stats->pkt_curr_buf_tc0_cnt = 0;
398 stats->pkt_curr_buf_tc1_cnt = 0;
399 stats->pkt_curr_buf_tc2_cnt = 0;
400 stats->pkt_curr_buf_tc3_cnt = 0;
401 stats->pkt_curr_buf_tc4_cnt = 0;
402 stats->pkt_curr_buf_tc5_cnt = 0;
403 stats->pkt_curr_buf_tc6_cnt = 0;
404 stats->pkt_curr_buf_tc7_cnt = 0;
405}
406
407static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
408{
409#define HCLGE_32_BIT_CMD_NUM 8
410#define HCLGE_32_BIT_RTN_DATANUM 8
411
412 struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM];
413 struct hclge_32_bit_stats *all_32_bit_stats;
a90bb9a5 414 __le32 *desc_data;
46a3df9f
S
415 int i, k, n;
416 u64 *data;
417 int ret;
418
419 all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats;
420 data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt);
421
422 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true);
423 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM);
424 if (ret) {
425 dev_err(&hdev->pdev->dev,
426 "Get 32 bit pkt stats fail, status = %d.\n", ret);
427
428 return ret;
429 }
430
431 hclge_reset_partial_32bit_counter(all_32_bit_stats);
432 for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) {
433 if (unlikely(i == 0)) {
a90bb9a5
YL
434 __le16 *desc_data_16bit;
435
46a3df9f 436 all_32_bit_stats->igu_rx_err_pkt +=
a90bb9a5
YL
437 le32_to_cpu(desc[i].data[0]);
438
439 desc_data_16bit = (__le16 *)&desc[i].data[1];
46a3df9f 440 all_32_bit_stats->igu_rx_no_eof_pkt +=
a90bb9a5
YL
441 le16_to_cpu(*desc_data_16bit);
442
443 desc_data_16bit++;
46a3df9f 444 all_32_bit_stats->igu_rx_no_sof_pkt +=
a90bb9a5 445 le16_to_cpu(*desc_data_16bit);
46a3df9f 446
a90bb9a5 447 desc_data = &desc[i].data[2];
46a3df9f
S
448 n = HCLGE_32_BIT_RTN_DATANUM - 4;
449 } else {
a90bb9a5 450 desc_data = (__le32 *)&desc[i];
46a3df9f
S
451 n = HCLGE_32_BIT_RTN_DATANUM;
452 }
453 for (k = 0; k < n; k++) {
a90bb9a5 454 *data++ += le32_to_cpu(*desc_data);
46a3df9f
S
455 desc_data++;
456 }
457 }
458
459 return 0;
460}
461
462static int hclge_mac_update_stats(struct hclge_dev *hdev)
463{
464#define HCLGE_MAC_CMD_NUM 17
465#define HCLGE_RTN_DATA_NUM 4
466
467 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
468 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
a90bb9a5 469 __le64 *desc_data;
46a3df9f
S
470 int i, k, n;
471 int ret;
472
473 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
474 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
475 if (ret) {
476 dev_err(&hdev->pdev->dev,
477 "Get MAC pkt stats fail, status = %d.\n", ret);
478
479 return ret;
480 }
481
482 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
483 if (unlikely(i == 0)) {
a90bb9a5 484 desc_data = (__le64 *)(&desc[i].data[0]);
46a3df9f
S
485 n = HCLGE_RTN_DATA_NUM - 2;
486 } else {
a90bb9a5 487 desc_data = (__le64 *)(&desc[i]);
46a3df9f
S
488 n = HCLGE_RTN_DATA_NUM;
489 }
490 for (k = 0; k < n; k++) {
a90bb9a5 491 *data++ += le64_to_cpu(*desc_data);
46a3df9f
S
492 desc_data++;
493 }
494 }
495
496 return 0;
497}
498
499static int hclge_tqps_update_stats(struct hnae3_handle *handle)
500{
501 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
502 struct hclge_vport *vport = hclge_get_vport(handle);
503 struct hclge_dev *hdev = vport->back;
504 struct hnae3_queue *queue;
505 struct hclge_desc desc[1];
506 struct hclge_tqp *tqp;
507 int ret, i;
508
509 for (i = 0; i < kinfo->num_tqps; i++) {
510 queue = handle->kinfo.tqp[i];
511 tqp = container_of(queue, struct hclge_tqp, q);
512 /* command : HCLGE_OPC_QUERY_IGU_STAT */
513 hclge_cmd_setup_basic_desc(&desc[0],
514 HCLGE_OPC_QUERY_RX_STATUS,
515 true);
516
a90bb9a5 517 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
46a3df9f
S
518 ret = hclge_cmd_send(&hdev->hw, desc, 1);
519 if (ret) {
520 dev_err(&hdev->pdev->dev,
521 "Query tqp stat fail, status = %d,queue = %d\n",
522 ret, i);
523 return ret;
524 }
525 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
a90bb9a5 526 le32_to_cpu(desc[0].data[4]);
46a3df9f
S
527 }
528
529 for (i = 0; i < kinfo->num_tqps; i++) {
530 queue = handle->kinfo.tqp[i];
531 tqp = container_of(queue, struct hclge_tqp, q);
532 /* command : HCLGE_OPC_QUERY_IGU_STAT */
533 hclge_cmd_setup_basic_desc(&desc[0],
534 HCLGE_OPC_QUERY_TX_STATUS,
535 true);
536
a90bb9a5 537 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
46a3df9f
S
538 ret = hclge_cmd_send(&hdev->hw, desc, 1);
539 if (ret) {
540 dev_err(&hdev->pdev->dev,
541 "Query tqp stat fail, status = %d,queue = %d\n",
542 ret, i);
543 return ret;
544 }
545 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
a90bb9a5 546 le32_to_cpu(desc[0].data[4]);
46a3df9f
S
547 }
548
549 return 0;
550}
551
552static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
553{
554 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
555 struct hclge_tqp *tqp;
556 u64 *buff = data;
557 int i;
558
559 for (i = 0; i < kinfo->num_tqps; i++) {
560 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
a90bb9a5 561 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
46a3df9f
S
562 }
563
564 for (i = 0; i < kinfo->num_tqps; i++) {
565 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
a90bb9a5 566 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
46a3df9f
S
567 }
568
569 return buff;
570}
571
572static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
573{
574 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
575
576 return kinfo->num_tqps * (2);
577}
578
579static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
580{
581 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
582 u8 *buff = data;
583 int i = 0;
584
585 for (i = 0; i < kinfo->num_tqps; i++) {
586 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
587 struct hclge_tqp, q);
588 snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd",
589 tqp->index);
590 buff = buff + ETH_GSTRING_LEN;
591 }
592
593 for (i = 0; i < kinfo->num_tqps; i++) {
594 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
595 struct hclge_tqp, q);
596 snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd",
597 tqp->index);
598 buff = buff + ETH_GSTRING_LEN;
599 }
600
601 return buff;
602}
603
604static u64 *hclge_comm_get_stats(void *comm_stats,
605 const struct hclge_comm_stats_str strs[],
606 int size, u64 *data)
607{
608 u64 *buf = data;
609 u32 i;
610
611 for (i = 0; i < size; i++)
612 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
613
614 return buf + size;
615}
616
617static u8 *hclge_comm_get_strings(u32 stringset,
618 const struct hclge_comm_stats_str strs[],
619 int size, u8 *data)
620{
621 char *buff = (char *)data;
622 u32 i;
623
624 if (stringset != ETH_SS_STATS)
625 return buff;
626
627 for (i = 0; i < size; i++) {
628 snprintf(buff, ETH_GSTRING_LEN,
629 strs[i].desc);
630 buff = buff + ETH_GSTRING_LEN;
631 }
632
633 return (u8 *)buff;
634}
635
636static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
637 struct net_device_stats *net_stats)
638{
639 net_stats->tx_dropped = 0;
640 net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num;
641 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
642 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
643
644 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
645 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
646 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt;
647 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
648 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
649 net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
650
651 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
652 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
653
654 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
655 net_stats->rx_length_errors =
656 hw_stats->mac_stats.mac_rx_undersize_pkt_num;
657 net_stats->rx_length_errors +=
658 hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
659 net_stats->rx_over_errors =
660 hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
661}
662
663static void hclge_update_stats_for_all(struct hclge_dev *hdev)
664{
665 struct hnae3_handle *handle;
666 int status;
667
668 handle = &hdev->vport[0].nic;
669 if (handle->client) {
670 status = hclge_tqps_update_stats(handle);
671 if (status) {
672 dev_err(&hdev->pdev->dev,
673 "Update TQPS stats fail, status = %d.\n",
674 status);
675 }
676 }
677
678 status = hclge_mac_update_stats(hdev);
679 if (status)
680 dev_err(&hdev->pdev->dev,
681 "Update MAC stats fail, status = %d.\n", status);
682
683 status = hclge_32_bit_update_stats(hdev);
684 if (status)
685 dev_err(&hdev->pdev->dev,
686 "Update 32 bit stats fail, status = %d.\n",
687 status);
688
689 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
690}
691
692static void hclge_update_stats(struct hnae3_handle *handle,
693 struct net_device_stats *net_stats)
694{
695 struct hclge_vport *vport = hclge_get_vport(handle);
696 struct hclge_dev *hdev = vport->back;
697 struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
698 int status;
699
700 status = hclge_mac_update_stats(hdev);
701 if (status)
702 dev_err(&hdev->pdev->dev,
703 "Update MAC stats fail, status = %d.\n",
704 status);
705
706 status = hclge_32_bit_update_stats(hdev);
707 if (status)
708 dev_err(&hdev->pdev->dev,
709 "Update 32 bit stats fail, status = %d.\n",
710 status);
711
712 status = hclge_64_bit_update_stats(hdev);
713 if (status)
714 dev_err(&hdev->pdev->dev,
715 "Update 64 bit stats fail, status = %d.\n",
716 status);
717
718 status = hclge_tqps_update_stats(handle);
719 if (status)
720 dev_err(&hdev->pdev->dev,
721 "Update TQPS stats fail, status = %d.\n",
722 status);
723
724 hclge_update_netstat(hw_stats, net_stats);
725}
726
727static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
728{
729#define HCLGE_LOOPBACK_TEST_FLAGS 0x7
730
731 struct hclge_vport *vport = hclge_get_vport(handle);
732 struct hclge_dev *hdev = vport->back;
733 int count = 0;
734
735 /* Loopback test support rules:
736 * mac: only GE mode support
737 * serdes: all mac mode will support include GE/XGE/LGE/CGE
738 * phy: only support when phy device exist on board
739 */
740 if (stringset == ETH_SS_TEST) {
741 /* clear loopback bit flags at first */
742 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
743 if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
744 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
746 count += 1;
747 handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK;
748 } else {
749 count = -EOPNOTSUPP;
750 }
751 } else if (stringset == ETH_SS_STATS) {
752 count = ARRAY_SIZE(g_mac_stats_string) +
753 ARRAY_SIZE(g_all_32bit_stats_string) +
754 ARRAY_SIZE(g_all_64bit_stats_string) +
755 hclge_tqps_get_sset_count(handle, stringset);
756 }
757
758 return count;
759}
760
761static void hclge_get_strings(struct hnae3_handle *handle,
762 u32 stringset,
763 u8 *data)
764{
765 u8 *p = (char *)data;
766 int size;
767
768 if (stringset == ETH_SS_STATS) {
769 size = ARRAY_SIZE(g_mac_stats_string);
770 p = hclge_comm_get_strings(stringset,
771 g_mac_stats_string,
772 size,
773 p);
774 size = ARRAY_SIZE(g_all_32bit_stats_string);
775 p = hclge_comm_get_strings(stringset,
776 g_all_32bit_stats_string,
777 size,
778 p);
779 size = ARRAY_SIZE(g_all_64bit_stats_string);
780 p = hclge_comm_get_strings(stringset,
781 g_all_64bit_stats_string,
782 size,
783 p);
784 p = hclge_tqps_get_strings(handle, p);
785 } else if (stringset == ETH_SS_TEST) {
786 if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) {
787 memcpy(p,
788 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC],
789 ETH_GSTRING_LEN);
790 p += ETH_GSTRING_LEN;
791 }
792 if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) {
793 memcpy(p,
794 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES],
795 ETH_GSTRING_LEN);
796 p += ETH_GSTRING_LEN;
797 }
798 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
799 memcpy(p,
800 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY],
801 ETH_GSTRING_LEN);
802 p += ETH_GSTRING_LEN;
803 }
804 }
805}
806
807static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
808{
809 struct hclge_vport *vport = hclge_get_vport(handle);
810 struct hclge_dev *hdev = vport->back;
811 u64 *p;
812
813 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
814 g_mac_stats_string,
815 ARRAY_SIZE(g_mac_stats_string),
816 data);
817 p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats,
818 g_all_32bit_stats_string,
819 ARRAY_SIZE(g_all_32bit_stats_string),
820 p);
821 p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats,
822 g_all_64bit_stats_string,
823 ARRAY_SIZE(g_all_64bit_stats_string),
824 p);
825 p = hclge_tqps_get_stats(handle, p);
826}
827
828static int hclge_parse_func_status(struct hclge_dev *hdev,
d44f9b63 829 struct hclge_func_status_cmd *status)
46a3df9f
S
830{
831 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
832 return -EINVAL;
833
834 /* Set the pf to main pf */
835 if (status->pf_state & HCLGE_PF_STATE_MAIN)
836 hdev->flag |= HCLGE_FLAG_MAIN;
837 else
838 hdev->flag &= ~HCLGE_FLAG_MAIN;
839
46a3df9f
S
840 return 0;
841}
842
843static int hclge_query_function_status(struct hclge_dev *hdev)
844{
d44f9b63 845 struct hclge_func_status_cmd *req;
46a3df9f
S
846 struct hclge_desc desc;
847 int timeout = 0;
848 int ret;
849
850 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
d44f9b63 851 req = (struct hclge_func_status_cmd *)desc.data;
46a3df9f
S
852
853 do {
854 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
855 if (ret) {
856 dev_err(&hdev->pdev->dev,
857 "query function status failed %d.\n",
858 ret);
859
860 return ret;
861 }
862
863 /* Check pf reset is done */
864 if (req->pf_state)
865 break;
866 usleep_range(1000, 2000);
867 } while (timeout++ < 5);
868
869 ret = hclge_parse_func_status(hdev, req);
870
871 return ret;
872}
873
874static int hclge_query_pf_resource(struct hclge_dev *hdev)
875{
d44f9b63 876 struct hclge_pf_res_cmd *req;
46a3df9f
S
877 struct hclge_desc desc;
878 int ret;
879
880 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
881 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
882 if (ret) {
883 dev_err(&hdev->pdev->dev,
884 "query pf resource failed %d.\n", ret);
885 return ret;
886 }
887
d44f9b63 888 req = (struct hclge_pf_res_cmd *)desc.data;
46a3df9f
S
889 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
890 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
891
e92a0843 892 if (hnae3_dev_roce_supported(hdev)) {
46a3df9f
S
893 hdev->num_roce_msix =
894 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
895 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
896
897 /* PF should have NIC vectors and Roce vectors,
898 * NIC vectors are queued before Roce vectors.
899 */
900 hdev->num_msi = hdev->num_roce_msix + HCLGE_ROCE_VECTOR_OFFSET;
901 } else {
902 hdev->num_msi =
903 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
904 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
905 }
906
907 return 0;
908}
909
910static int hclge_parse_speed(int speed_cmd, int *speed)
911{
912 switch (speed_cmd) {
913 case 6:
914 *speed = HCLGE_MAC_SPEED_10M;
915 break;
916 case 7:
917 *speed = HCLGE_MAC_SPEED_100M;
918 break;
919 case 0:
920 *speed = HCLGE_MAC_SPEED_1G;
921 break;
922 case 1:
923 *speed = HCLGE_MAC_SPEED_10G;
924 break;
925 case 2:
926 *speed = HCLGE_MAC_SPEED_25G;
927 break;
928 case 3:
929 *speed = HCLGE_MAC_SPEED_40G;
930 break;
931 case 4:
932 *speed = HCLGE_MAC_SPEED_50G;
933 break;
934 case 5:
935 *speed = HCLGE_MAC_SPEED_100G;
936 break;
937 default:
938 return -EINVAL;
939 }
940
941 return 0;
942}
943
944static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
945{
d44f9b63 946 struct hclge_cfg_param_cmd *req;
46a3df9f
S
947 u64 mac_addr_tmp_high;
948 u64 mac_addr_tmp;
949 int i;
950
d44f9b63 951 req = (struct hclge_cfg_param_cmd *)desc[0].data;
46a3df9f
S
952
953 /* get the configuration */
954 cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]),
955 HCLGE_CFG_VMDQ_M,
956 HCLGE_CFG_VMDQ_S);
957 cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
958 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
959 cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
960 HCLGE_CFG_TQP_DESC_N_M,
961 HCLGE_CFG_TQP_DESC_N_S);
962
963 cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]),
964 HCLGE_CFG_PHY_ADDR_M,
965 HCLGE_CFG_PHY_ADDR_S);
966 cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]),
967 HCLGE_CFG_MEDIA_TP_M,
968 HCLGE_CFG_MEDIA_TP_S);
969 cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]),
970 HCLGE_CFG_RX_BUF_LEN_M,
971 HCLGE_CFG_RX_BUF_LEN_S);
972 /* get mac_address */
973 mac_addr_tmp = __le32_to_cpu(req->param[2]);
974 mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]),
975 HCLGE_CFG_MAC_ADDR_H_M,
976 HCLGE_CFG_MAC_ADDR_H_S);
977
978 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
979
980 cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
981 HCLGE_CFG_DEFAULT_SPEED_M,
982 HCLGE_CFG_DEFAULT_SPEED_S);
983 for (i = 0; i < ETH_ALEN; i++)
984 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
985
d44f9b63 986 req = (struct hclge_cfg_param_cmd *)desc[1].data;
46a3df9f
S
987 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
988}
989
990/* hclge_get_cfg: query the static parameter from flash
991 * @hdev: pointer to struct hclge_dev
992 * @hcfg: the config structure to be getted
993 */
994static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
995{
996 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
d44f9b63 997 struct hclge_cfg_param_cmd *req;
46a3df9f
S
998 int i, ret;
999
1000 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
a90bb9a5
YL
1001 u32 offset = 0;
1002
d44f9b63 1003 req = (struct hclge_cfg_param_cmd *)desc[i].data;
46a3df9f
S
1004 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1005 true);
a90bb9a5 1006 hnae_set_field(offset, HCLGE_CFG_OFFSET_M,
46a3df9f
S
1007 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1008 /* Len should be united by 4 bytes when send to hardware */
a90bb9a5 1009 hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
46a3df9f 1010 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
a90bb9a5 1011 req->offset = cpu_to_le32(offset);
46a3df9f
S
1012 }
1013
1014 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1015 if (ret) {
1016 dev_err(&hdev->pdev->dev,
1017 "get config failed %d.\n", ret);
1018 return ret;
1019 }
1020
1021 hclge_parse_cfg(hcfg, desc);
1022 return 0;
1023}
1024
1025static int hclge_get_cap(struct hclge_dev *hdev)
1026{
1027 int ret;
1028
1029 ret = hclge_query_function_status(hdev);
1030 if (ret) {
1031 dev_err(&hdev->pdev->dev,
1032 "query function status error %d.\n", ret);
1033 return ret;
1034 }
1035
1036 /* get pf resource */
1037 ret = hclge_query_pf_resource(hdev);
1038 if (ret) {
1039 dev_err(&hdev->pdev->dev,
1040 "query pf resource error %d.\n", ret);
1041 return ret;
1042 }
1043
1044 return 0;
1045}
1046
1047static int hclge_configure(struct hclge_dev *hdev)
1048{
1049 struct hclge_cfg cfg;
1050 int ret, i;
1051
1052 ret = hclge_get_cfg(hdev, &cfg);
1053 if (ret) {
1054 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1055 return ret;
1056 }
1057
1058 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1059 hdev->base_tqp_pid = 0;
1060 hdev->rss_size_max = 1;
1061 hdev->rx_buf_len = cfg.rx_buf_len;
fbbb1536 1062 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
46a3df9f 1063 hdev->hw.mac.media_type = cfg.media_type;
2a4776e1 1064 hdev->hw.mac.phy_addr = cfg.phy_addr;
46a3df9f
S
1065 hdev->num_desc = cfg.tqp_desc_num;
1066 hdev->tm_info.num_pg = 1;
cacde272 1067 hdev->tc_max = cfg.tc_num;
46a3df9f
S
1068 hdev->tm_info.hw_pfc_map = 0;
1069
1070 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1071 if (ret) {
1072 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1073 return ret;
1074 }
1075
cacde272
YL
1076 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1077 (hdev->tc_max < 1)) {
46a3df9f 1078 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
cacde272
YL
1079 hdev->tc_max);
1080 hdev->tc_max = 1;
46a3df9f
S
1081 }
1082
cacde272
YL
1083 /* Dev does not support DCB */
1084 if (!hnae3_dev_dcb_supported(hdev)) {
1085 hdev->tc_max = 1;
1086 hdev->pfc_max = 0;
1087 } else {
1088 hdev->pfc_max = hdev->tc_max;
1089 }
1090
1091 hdev->tm_info.num_tc = hdev->tc_max;
1092
46a3df9f 1093 /* Currently not support uncontiuous tc */
cacde272 1094 for (i = 0; i < hdev->tm_info.num_tc; i++)
46a3df9f
S
1095 hnae_set_bit(hdev->hw_tc_map, i, 1);
1096
1097 if (!hdev->num_vmdq_vport && !hdev->num_req_vfs)
1098 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1099 else
1100 hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE;
1101
1102 return ret;
1103}
1104
1105static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1106 int tso_mss_max)
1107{
d44f9b63 1108 struct hclge_cfg_tso_status_cmd *req;
46a3df9f 1109 struct hclge_desc desc;
a90bb9a5 1110 u16 tso_mss;
46a3df9f
S
1111
1112 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1113
d44f9b63 1114 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
a90bb9a5
YL
1115
1116 tso_mss = 0;
1117 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
46a3df9f 1118 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
a90bb9a5
YL
1119 req->tso_mss_min = cpu_to_le16(tso_mss);
1120
1121 tso_mss = 0;
1122 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
46a3df9f 1123 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
a90bb9a5 1124 req->tso_mss_max = cpu_to_le16(tso_mss);
46a3df9f
S
1125
1126 return hclge_cmd_send(&hdev->hw, &desc, 1);
1127}
1128
1129static int hclge_alloc_tqps(struct hclge_dev *hdev)
1130{
1131 struct hclge_tqp *tqp;
1132 int i;
1133
1134 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1135 sizeof(struct hclge_tqp), GFP_KERNEL);
1136 if (!hdev->htqp)
1137 return -ENOMEM;
1138
1139 tqp = hdev->htqp;
1140
1141 for (i = 0; i < hdev->num_tqps; i++) {
1142 tqp->dev = &hdev->pdev->dev;
1143 tqp->index = i;
1144
1145 tqp->q.ae_algo = &ae_algo;
1146 tqp->q.buf_size = hdev->rx_buf_len;
1147 tqp->q.desc_num = hdev->num_desc;
1148 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1149 i * HCLGE_TQP_REG_SIZE;
1150
1151 tqp++;
1152 }
1153
1154 return 0;
1155}
1156
1157static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1158 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1159{
d44f9b63 1160 struct hclge_tqp_map_cmd *req;
46a3df9f
S
1161 struct hclge_desc desc;
1162 int ret;
1163
1164 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1165
d44f9b63 1166 req = (struct hclge_tqp_map_cmd *)desc.data;
46a3df9f 1167 req->tqp_id = cpu_to_le16(tqp_pid);
a90bb9a5 1168 req->tqp_vf = func_id;
46a3df9f
S
1169 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1170 1 << HCLGE_TQP_MAP_EN_B;
1171 req->tqp_vid = cpu_to_le16(tqp_vid);
1172
1173 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1174 if (ret) {
1175 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n",
1176 ret);
1177 return ret;
1178 }
1179
1180 return 0;
1181}
1182
1183static int hclge_assign_tqp(struct hclge_vport *vport,
1184 struct hnae3_queue **tqp, u16 num_tqps)
1185{
1186 struct hclge_dev *hdev = vport->back;
1187 int i, alloced, func_id, ret;
1188 bool is_pf;
1189
1190 func_id = vport->vport_id;
1191 is_pf = (vport->vport_id == 0) ? true : false;
1192
1193 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1194 alloced < num_tqps; i++) {
1195 if (!hdev->htqp[i].alloced) {
1196 hdev->htqp[i].q.handle = &vport->nic;
1197 hdev->htqp[i].q.tqp_index = alloced;
1198 tqp[alloced] = &hdev->htqp[i].q;
1199 hdev->htqp[i].alloced = true;
1200 ret = hclge_map_tqps_to_func(hdev, func_id,
1201 hdev->htqp[i].index,
1202 alloced, is_pf);
1203 if (ret)
1204 return ret;
1205
1206 alloced++;
1207 }
1208 }
1209 vport->alloc_tqps = num_tqps;
1210
1211 return 0;
1212}
1213
1214static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps)
1215{
1216 struct hnae3_handle *nic = &vport->nic;
1217 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1218 struct hclge_dev *hdev = vport->back;
1219 int i, ret;
1220
1221 kinfo->num_desc = hdev->num_desc;
1222 kinfo->rx_buf_len = hdev->rx_buf_len;
1223 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1224 kinfo->rss_size
1225 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1226 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
1227
1228 for (i = 0; i < HNAE3_MAX_TC; i++) {
1229 if (hdev->hw_tc_map & BIT(i)) {
1230 kinfo->tc_info[i].enable = true;
1231 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1232 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1233 kinfo->tc_info[i].tc = i;
1234 } else {
1235 /* Set to default queue if TC is disable */
1236 kinfo->tc_info[i].enable = false;
1237 kinfo->tc_info[i].tqp_offset = 0;
1238 kinfo->tc_info[i].tqp_count = 1;
1239 kinfo->tc_info[i].tc = 0;
1240 }
1241 }
1242
1243 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1244 sizeof(struct hnae3_queue *), GFP_KERNEL);
1245 if (!kinfo->tqp)
1246 return -ENOMEM;
1247
1248 ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps);
1249 if (ret) {
1250 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1251 return -EINVAL;
1252 }
1253
1254 return 0;
1255}
1256
1257static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1258{
1259 /* this would be initialized later */
1260}
1261
1262static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1263{
1264 struct hnae3_handle *nic = &vport->nic;
1265 struct hclge_dev *hdev = vport->back;
1266 int ret;
1267
1268 nic->pdev = hdev->pdev;
1269 nic->ae_algo = &ae_algo;
1270 nic->numa_node_mask = hdev->numa_node_mask;
1271
1272 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1273 ret = hclge_knic_setup(vport, num_tqps);
1274 if (ret) {
1275 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1276 ret);
1277 return ret;
1278 }
1279 } else {
1280 hclge_unic_setup(vport, num_tqps);
1281 }
1282
1283 return 0;
1284}
1285
1286static int hclge_alloc_vport(struct hclge_dev *hdev)
1287{
1288 struct pci_dev *pdev = hdev->pdev;
1289 struct hclge_vport *vport;
1290 u32 tqp_main_vport;
1291 u32 tqp_per_vport;
1292 int num_vport, i;
1293 int ret;
1294
1295 /* We need to alloc a vport for main NIC of PF */
1296 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1297
1298 if (hdev->num_tqps < num_vport)
1299 num_vport = hdev->num_tqps;
1300
1301 /* Alloc the same number of TQPs for every vport */
1302 tqp_per_vport = hdev->num_tqps / num_vport;
1303 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1304
1305 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1306 GFP_KERNEL);
1307 if (!vport)
1308 return -ENOMEM;
1309
1310 hdev->vport = vport;
1311 hdev->num_alloc_vport = num_vport;
1312
1313#ifdef CONFIG_PCI_IOV
1314 /* Enable SRIOV */
1315 if (hdev->num_req_vfs) {
1316 dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n",
1317 hdev->num_req_vfs);
1318 ret = pci_enable_sriov(hdev->pdev, hdev->num_req_vfs);
1319 if (ret) {
1320 hdev->num_alloc_vfs = 0;
1321 dev_err(&pdev->dev, "SRIOV enable failed %d\n",
1322 ret);
1323 return ret;
1324 }
1325 }
1326 hdev->num_alloc_vfs = hdev->num_req_vfs;
1327#endif
1328
1329 for (i = 0; i < num_vport; i++) {
1330 vport->back = hdev;
1331 vport->vport_id = i;
1332
1333 if (i == 0)
1334 ret = hclge_vport_setup(vport, tqp_main_vport);
1335 else
1336 ret = hclge_vport_setup(vport, tqp_per_vport);
1337 if (ret) {
1338 dev_err(&pdev->dev,
1339 "vport setup failed for vport %d, %d\n",
1340 i, ret);
1341 return ret;
1342 }
1343
1344 vport++;
1345 }
1346
1347 return 0;
1348}
1349
acf61ecd
YL
1350static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1351 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1352{
1353/* TX buffer size is unit by 128 byte */
1354#define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1355#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
d44f9b63 1356 struct hclge_tx_buff_alloc_cmd *req;
46a3df9f
S
1357 struct hclge_desc desc;
1358 int ret;
1359 u8 i;
1360
d44f9b63 1361 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
46a3df9f
S
1362
1363 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
9ffe79a9 1364 for (i = 0; i < HCLGE_TC_NUM; i++) {
acf61ecd 1365 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
9ffe79a9 1366
46a3df9f
S
1367 req->tx_pkt_buff[i] =
1368 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1369 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
9ffe79a9 1370 }
46a3df9f
S
1371
1372 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1373 if (ret) {
1374 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1375 ret);
1376 return ret;
1377 }
1378
1379 return 0;
1380}
1381
acf61ecd
YL
1382static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1383 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1384{
acf61ecd 1385 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
46a3df9f
S
1386
1387 if (ret) {
1388 dev_err(&hdev->pdev->dev,
1389 "tx buffer alloc failed %d\n", ret);
1390 return ret;
1391 }
1392
1393 return 0;
1394}
1395
1396static int hclge_get_tc_num(struct hclge_dev *hdev)
1397{
1398 int i, cnt = 0;
1399
1400 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1401 if (hdev->hw_tc_map & BIT(i))
1402 cnt++;
1403 return cnt;
1404}
1405
1406static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1407{
1408 int i, cnt = 0;
1409
1410 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1411 if (hdev->hw_tc_map & BIT(i) &&
1412 hdev->tm_info.hw_pfc_map & BIT(i))
1413 cnt++;
1414 return cnt;
1415}
1416
1417/* Get the number of pfc enabled TCs, which have private buffer */
acf61ecd
YL
1418static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1419 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1420{
1421 struct hclge_priv_buf *priv;
1422 int i, cnt = 0;
1423
1424 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1425 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1426 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1427 priv->enable)
1428 cnt++;
1429 }
1430
1431 return cnt;
1432}
1433
1434/* Get the number of pfc disabled TCs, which have private buffer */
acf61ecd
YL
1435static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1436 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1437{
1438 struct hclge_priv_buf *priv;
1439 int i, cnt = 0;
1440
1441 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1442 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1443 if (hdev->hw_tc_map & BIT(i) &&
1444 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1445 priv->enable)
1446 cnt++;
1447 }
1448
1449 return cnt;
1450}
1451
acf61ecd 1452static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1453{
1454 struct hclge_priv_buf *priv;
1455 u32 rx_priv = 0;
1456 int i;
1457
1458 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1459 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1460 if (priv->enable)
1461 rx_priv += priv->buf_size;
1462 }
1463 return rx_priv;
1464}
1465
acf61ecd 1466static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
9ffe79a9
YL
1467{
1468 u32 i, total_tx_size = 0;
1469
1470 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
acf61ecd 1471 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
9ffe79a9
YL
1472
1473 return total_tx_size;
1474}
1475
acf61ecd
YL
1476static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1477 struct hclge_pkt_buf_alloc *buf_alloc,
1478 u32 rx_all)
46a3df9f
S
1479{
1480 u32 shared_buf_min, shared_buf_tc, shared_std;
1481 int tc_num, pfc_enable_num;
1482 u32 shared_buf;
1483 u32 rx_priv;
1484 int i;
1485
1486 tc_num = hclge_get_tc_num(hdev);
1487 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1488
d221df4e
YL
1489 if (hnae3_dev_dcb_supported(hdev))
1490 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1491 else
1492 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
1493
46a3df9f
S
1494 shared_buf_tc = pfc_enable_num * hdev->mps +
1495 (tc_num - pfc_enable_num) * hdev->mps / 2 +
1496 hdev->mps;
1497 shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1498
acf61ecd 1499 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
46a3df9f
S
1500 if (rx_all <= rx_priv + shared_std)
1501 return false;
1502
1503 shared_buf = rx_all - rx_priv;
acf61ecd
YL
1504 buf_alloc->s_buf.buf_size = shared_buf;
1505 buf_alloc->s_buf.self.high = shared_buf;
1506 buf_alloc->s_buf.self.low = 2 * hdev->mps;
46a3df9f
S
1507
1508 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1509 if ((hdev->hw_tc_map & BIT(i)) &&
1510 (hdev->tm_info.hw_pfc_map & BIT(i))) {
acf61ecd
YL
1511 buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
1512 buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
46a3df9f 1513 } else {
acf61ecd
YL
1514 buf_alloc->s_buf.tc_thrd[i].low = 0;
1515 buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
46a3df9f
S
1516 }
1517 }
1518
1519 return true;
1520}
1521
acf61ecd
YL
1522static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1523 struct hclge_pkt_buf_alloc *buf_alloc)
9ffe79a9
YL
1524{
1525 u32 i, total_size;
1526
1527 total_size = hdev->pkt_buf_size;
1528
1529 /* alloc tx buffer for all enabled tc */
1530 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1531 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
9ffe79a9
YL
1532
1533 if (total_size < HCLGE_DEFAULT_TX_BUF)
1534 return -ENOMEM;
1535
1536 if (hdev->hw_tc_map & BIT(i))
1537 priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1538 else
1539 priv->tx_buf_size = 0;
1540
1541 total_size -= priv->tx_buf_size;
1542 }
1543
1544 return 0;
1545}
1546
46a3df9f
S
1547/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1548 * @hdev: pointer to struct hclge_dev
acf61ecd 1549 * @buf_alloc: pointer to buffer calculation data
46a3df9f
S
1550 * @return: 0: calculate sucessful, negative: fail
1551 */
1db9b1bf
YL
1552static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1553 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1554{
9ffe79a9 1555 u32 rx_all = hdev->pkt_buf_size;
46a3df9f
S
1556 int no_pfc_priv_num, pfc_priv_num;
1557 struct hclge_priv_buf *priv;
1558 int i;
1559
acf61ecd 1560 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
9ffe79a9 1561
d602a525
YL
1562 /* When DCB is not supported, rx private
1563 * buffer is not allocated.
1564 */
1565 if (!hnae3_dev_dcb_supported(hdev)) {
acf61ecd 1566 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
d602a525
YL
1567 return -ENOMEM;
1568
1569 return 0;
1570 }
1571
46a3df9f
S
1572 /* step 1, try to alloc private buffer for all enabled tc */
1573 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1574 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1575 if (hdev->hw_tc_map & BIT(i)) {
1576 priv->enable = 1;
1577 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1578 priv->wl.low = hdev->mps;
1579 priv->wl.high = priv->wl.low + hdev->mps;
1580 priv->buf_size = priv->wl.high +
1581 HCLGE_DEFAULT_DV;
1582 } else {
1583 priv->wl.low = 0;
1584 priv->wl.high = 2 * hdev->mps;
1585 priv->buf_size = priv->wl.high;
1586 }
bb1fe9ea
YL
1587 } else {
1588 priv->enable = 0;
1589 priv->wl.low = 0;
1590 priv->wl.high = 0;
1591 priv->buf_size = 0;
46a3df9f
S
1592 }
1593 }
1594
acf61ecd 1595 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
46a3df9f
S
1596 return 0;
1597
1598 /* step 2, try to decrease the buffer size of
1599 * no pfc TC's private buffer
1600 */
1601 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1602 priv = &buf_alloc->priv_buf[i];
46a3df9f 1603
bb1fe9ea
YL
1604 priv->enable = 0;
1605 priv->wl.low = 0;
1606 priv->wl.high = 0;
1607 priv->buf_size = 0;
1608
1609 if (!(hdev->hw_tc_map & BIT(i)))
1610 continue;
1611
1612 priv->enable = 1;
46a3df9f
S
1613
1614 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1615 priv->wl.low = 128;
1616 priv->wl.high = priv->wl.low + hdev->mps;
1617 priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
1618 } else {
1619 priv->wl.low = 0;
1620 priv->wl.high = hdev->mps;
1621 priv->buf_size = priv->wl.high;
1622 }
1623 }
1624
acf61ecd 1625 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
46a3df9f
S
1626 return 0;
1627
1628 /* step 3, try to reduce the number of pfc disabled TCs,
1629 * which have private buffer
1630 */
1631 /* get the total no pfc enable TC number, which have private buffer */
acf61ecd 1632 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
46a3df9f
S
1633
1634 /* let the last to be cleared first */
1635 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
acf61ecd 1636 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1637
1638 if (hdev->hw_tc_map & BIT(i) &&
1639 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1640 /* Clear the no pfc TC private buffer */
1641 priv->wl.low = 0;
1642 priv->wl.high = 0;
1643 priv->buf_size = 0;
1644 priv->enable = 0;
1645 no_pfc_priv_num--;
1646 }
1647
acf61ecd 1648 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
46a3df9f
S
1649 no_pfc_priv_num == 0)
1650 break;
1651 }
1652
acf61ecd 1653 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
46a3df9f
S
1654 return 0;
1655
1656 /* step 4, try to reduce the number of pfc enabled TCs
1657 * which have private buffer.
1658 */
acf61ecd 1659 pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
46a3df9f
S
1660
1661 /* let the last to be cleared first */
1662 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
acf61ecd 1663 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1664
1665 if (hdev->hw_tc_map & BIT(i) &&
1666 hdev->tm_info.hw_pfc_map & BIT(i)) {
1667 /* Reduce the number of pfc TC with private buffer */
1668 priv->wl.low = 0;
1669 priv->enable = 0;
1670 priv->wl.high = 0;
1671 priv->buf_size = 0;
1672 pfc_priv_num--;
1673 }
1674
acf61ecd 1675 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
46a3df9f
S
1676 pfc_priv_num == 0)
1677 break;
1678 }
acf61ecd 1679 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
46a3df9f
S
1680 return 0;
1681
1682 return -ENOMEM;
1683}
1684
acf61ecd
YL
1685static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1686 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1687{
d44f9b63 1688 struct hclge_rx_priv_buff_cmd *req;
46a3df9f
S
1689 struct hclge_desc desc;
1690 int ret;
1691 int i;
1692
1693 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
d44f9b63 1694 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
46a3df9f
S
1695
1696 /* Alloc private buffer TCs */
1697 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1698 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1699
1700 req->buf_num[i] =
1701 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1702 req->buf_num[i] |=
5bca3b94 1703 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
46a3df9f
S
1704 }
1705
b8c8bf47 1706 req->shared_buf =
acf61ecd 1707 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
b8c8bf47
YL
1708 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1709
46a3df9f
S
1710 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1711 if (ret) {
1712 dev_err(&hdev->pdev->dev,
1713 "rx private buffer alloc cmd failed %d\n", ret);
1714 return ret;
1715 }
1716
1717 return 0;
1718}
1719
1720#define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
1721
acf61ecd
YL
1722static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1723 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1724{
1725 struct hclge_rx_priv_wl_buf *req;
1726 struct hclge_priv_buf *priv;
1727 struct hclge_desc desc[2];
1728 int i, j;
1729 int ret;
1730
1731 for (i = 0; i < 2; i++) {
1732 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1733 false);
1734 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1735
1736 /* The first descriptor set the NEXT bit to 1 */
1737 if (i == 0)
1738 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1739 else
1740 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1741
1742 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
acf61ecd
YL
1743 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1744
1745 priv = &buf_alloc->priv_buf[idx];
46a3df9f
S
1746 req->tc_wl[j].high =
1747 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1748 req->tc_wl[j].high |=
1749 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) <<
1750 HCLGE_RX_PRIV_EN_B);
1751 req->tc_wl[j].low =
1752 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1753 req->tc_wl[j].low |=
1754 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) <<
1755 HCLGE_RX_PRIV_EN_B);
1756 }
1757 }
1758
1759 /* Send 2 descriptor at one time */
1760 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1761 if (ret) {
1762 dev_err(&hdev->pdev->dev,
1763 "rx private waterline config cmd failed %d\n",
1764 ret);
1765 return ret;
1766 }
1767 return 0;
1768}
1769
acf61ecd
YL
1770static int hclge_common_thrd_config(struct hclge_dev *hdev,
1771 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1772{
acf61ecd 1773 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
46a3df9f
S
1774 struct hclge_rx_com_thrd *req;
1775 struct hclge_desc desc[2];
1776 struct hclge_tc_thrd *tc;
1777 int i, j;
1778 int ret;
1779
1780 for (i = 0; i < 2; i++) {
1781 hclge_cmd_setup_basic_desc(&desc[i],
1782 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1783 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1784
1785 /* The first descriptor set the NEXT bit to 1 */
1786 if (i == 0)
1787 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1788 else
1789 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1790
1791 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1792 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1793
1794 req->com_thrd[j].high =
1795 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1796 req->com_thrd[j].high |=
1797 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) <<
1798 HCLGE_RX_PRIV_EN_B);
1799 req->com_thrd[j].low =
1800 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1801 req->com_thrd[j].low |=
1802 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) <<
1803 HCLGE_RX_PRIV_EN_B);
1804 }
1805 }
1806
1807 /* Send 2 descriptors at one time */
1808 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1809 if (ret) {
1810 dev_err(&hdev->pdev->dev,
1811 "common threshold config cmd failed %d\n", ret);
1812 return ret;
1813 }
1814 return 0;
1815}
1816
acf61ecd
YL
1817static int hclge_common_wl_config(struct hclge_dev *hdev,
1818 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1819{
acf61ecd 1820 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
46a3df9f
S
1821 struct hclge_rx_com_wl *req;
1822 struct hclge_desc desc;
1823 int ret;
1824
1825 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1826
1827 req = (struct hclge_rx_com_wl *)desc.data;
1828 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1829 req->com_wl.high |=
1830 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) <<
1831 HCLGE_RX_PRIV_EN_B);
1832
1833 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1834 req->com_wl.low |=
1835 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) <<
1836 HCLGE_RX_PRIV_EN_B);
1837
1838 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1839 if (ret) {
1840 dev_err(&hdev->pdev->dev,
1841 "common waterline config cmd failed %d\n", ret);
1842 return ret;
1843 }
1844
1845 return 0;
1846}
1847
1848int hclge_buffer_alloc(struct hclge_dev *hdev)
1849{
acf61ecd 1850 struct hclge_pkt_buf_alloc *pkt_buf;
46a3df9f
S
1851 int ret;
1852
acf61ecd
YL
1853 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1854 if (!pkt_buf)
46a3df9f
S
1855 return -ENOMEM;
1856
acf61ecd 1857 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
9ffe79a9
YL
1858 if (ret) {
1859 dev_err(&hdev->pdev->dev,
1860 "could not calc tx buffer size for all TCs %d\n", ret);
acf61ecd 1861 goto out;
9ffe79a9
YL
1862 }
1863
acf61ecd 1864 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
46a3df9f
S
1865 if (ret) {
1866 dev_err(&hdev->pdev->dev,
1867 "could not alloc tx buffers %d\n", ret);
acf61ecd 1868 goto out;
46a3df9f
S
1869 }
1870
acf61ecd 1871 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
46a3df9f
S
1872 if (ret) {
1873 dev_err(&hdev->pdev->dev,
1874 "could not calc rx priv buffer size for all TCs %d\n",
1875 ret);
acf61ecd 1876 goto out;
46a3df9f
S
1877 }
1878
acf61ecd 1879 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
46a3df9f
S
1880 if (ret) {
1881 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1882 ret);
acf61ecd 1883 goto out;
46a3df9f
S
1884 }
1885
2daf4a65 1886 if (hnae3_dev_dcb_supported(hdev)) {
acf61ecd 1887 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2daf4a65
YL
1888 if (ret) {
1889 dev_err(&hdev->pdev->dev,
1890 "could not configure rx private waterline %d\n",
1891 ret);
acf61ecd 1892 goto out;
2daf4a65 1893 }
46a3df9f 1894
acf61ecd 1895 ret = hclge_common_thrd_config(hdev, pkt_buf);
2daf4a65
YL
1896 if (ret) {
1897 dev_err(&hdev->pdev->dev,
1898 "could not configure common threshold %d\n",
1899 ret);
acf61ecd 1900 goto out;
2daf4a65 1901 }
46a3df9f
S
1902 }
1903
acf61ecd
YL
1904 ret = hclge_common_wl_config(hdev, pkt_buf);
1905 if (ret)
46a3df9f
S
1906 dev_err(&hdev->pdev->dev,
1907 "could not configure common waterline %d\n", ret);
46a3df9f 1908
acf61ecd
YL
1909out:
1910 kfree(pkt_buf);
1911 return ret;
46a3df9f
S
1912}
1913
1914static int hclge_init_roce_base_info(struct hclge_vport *vport)
1915{
1916 struct hnae3_handle *roce = &vport->roce;
1917 struct hnae3_handle *nic = &vport->nic;
1918
1919 roce->rinfo.num_vectors = vport->back->num_roce_msix;
1920
1921 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1922 vport->back->num_msi_left == 0)
1923 return -EINVAL;
1924
1925 roce->rinfo.base_vector = vport->back->roce_base_vector;
1926
1927 roce->rinfo.netdev = nic->kinfo.netdev;
1928 roce->rinfo.roce_io_base = vport->back->hw.io_base;
1929
1930 roce->pdev = nic->pdev;
1931 roce->ae_algo = nic->ae_algo;
1932 roce->numa_node_mask = nic->numa_node_mask;
1933
1934 return 0;
1935}
1936
1937static int hclge_init_msix(struct hclge_dev *hdev)
1938{
1939 struct pci_dev *pdev = hdev->pdev;
1940 int ret, i;
1941
1942 hdev->msix_entries = devm_kcalloc(&pdev->dev, hdev->num_msi,
1943 sizeof(struct msix_entry),
1944 GFP_KERNEL);
1945 if (!hdev->msix_entries)
1946 return -ENOMEM;
1947
1948 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1949 sizeof(u16), GFP_KERNEL);
1950 if (!hdev->vector_status)
1951 return -ENOMEM;
1952
1953 for (i = 0; i < hdev->num_msi; i++) {
1954 hdev->msix_entries[i].entry = i;
1955 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1956 }
1957
1958 hdev->num_msi_left = hdev->num_msi;
1959 hdev->base_msi_vector = hdev->pdev->irq;
1960 hdev->roce_base_vector = hdev->base_msi_vector +
1961 HCLGE_ROCE_VECTOR_OFFSET;
1962
1963 ret = pci_enable_msix_range(hdev->pdev, hdev->msix_entries,
1964 hdev->num_msi, hdev->num_msi);
1965 if (ret < 0) {
1966 dev_info(&hdev->pdev->dev,
1967 "MSI-X vector alloc failed: %d\n", ret);
1968 return ret;
1969 }
1970
1971 return 0;
1972}
1973
1974static int hclge_init_msi(struct hclge_dev *hdev)
1975{
1976 struct pci_dev *pdev = hdev->pdev;
1977 int vectors;
1978 int i;
1979
1980 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1981 sizeof(u16), GFP_KERNEL);
1982 if (!hdev->vector_status)
1983 return -ENOMEM;
1984
1985 for (i = 0; i < hdev->num_msi; i++)
1986 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1987
1988 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, PCI_IRQ_MSI);
1989 if (vectors < 0) {
1990 dev_err(&pdev->dev, "MSI vectors enable failed %d\n", vectors);
1991 return -EINVAL;
1992 }
1993 hdev->num_msi = vectors;
1994 hdev->num_msi_left = vectors;
1995 hdev->base_msi_vector = pdev->irq;
1996 hdev->roce_base_vector = hdev->base_msi_vector +
1997 HCLGE_ROCE_VECTOR_OFFSET;
1998
1999 return 0;
2000}
2001
2002static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed)
2003{
2004 struct hclge_mac *mac = &hdev->hw.mac;
2005
2006 if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M))
2007 mac->duplex = (u8)duplex;
2008 else
2009 mac->duplex = HCLGE_MAC_FULL;
2010
2011 mac->speed = speed;
2012}
2013
2014int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2015{
d44f9b63 2016 struct hclge_config_mac_speed_dup_cmd *req;
46a3df9f
S
2017 struct hclge_desc desc;
2018 int ret;
2019
d44f9b63 2020 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
46a3df9f
S
2021
2022 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2023
2024 hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2025
2026 switch (speed) {
2027 case HCLGE_MAC_SPEED_10M:
2028 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2029 HCLGE_CFG_SPEED_S, 6);
2030 break;
2031 case HCLGE_MAC_SPEED_100M:
2032 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2033 HCLGE_CFG_SPEED_S, 7);
2034 break;
2035 case HCLGE_MAC_SPEED_1G:
2036 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2037 HCLGE_CFG_SPEED_S, 0);
2038 break;
2039 case HCLGE_MAC_SPEED_10G:
2040 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2041 HCLGE_CFG_SPEED_S, 1);
2042 break;
2043 case HCLGE_MAC_SPEED_25G:
2044 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2045 HCLGE_CFG_SPEED_S, 2);
2046 break;
2047 case HCLGE_MAC_SPEED_40G:
2048 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2049 HCLGE_CFG_SPEED_S, 3);
2050 break;
2051 case HCLGE_MAC_SPEED_50G:
2052 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2053 HCLGE_CFG_SPEED_S, 4);
2054 break;
2055 case HCLGE_MAC_SPEED_100G:
2056 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2057 HCLGE_CFG_SPEED_S, 5);
2058 break;
2059 default:
d7629e74 2060 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
46a3df9f
S
2061 return -EINVAL;
2062 }
2063
2064 hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2065 1);
2066
2067 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2068 if (ret) {
2069 dev_err(&hdev->pdev->dev,
2070 "mac speed/duplex config cmd failed %d.\n", ret);
2071 return ret;
2072 }
2073
2074 hclge_check_speed_dup(hdev, duplex, speed);
2075
2076 return 0;
2077}
2078
2079static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2080 u8 duplex)
2081{
2082 struct hclge_vport *vport = hclge_get_vport(handle);
2083 struct hclge_dev *hdev = vport->back;
2084
2085 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2086}
2087
2088static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
2089 u8 *duplex)
2090{
d44f9b63 2091 struct hclge_query_an_speed_dup_cmd *req;
46a3df9f
S
2092 struct hclge_desc desc;
2093 int speed_tmp;
2094 int ret;
2095
d44f9b63 2096 req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
46a3df9f
S
2097
2098 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
2099 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2100 if (ret) {
2101 dev_err(&hdev->pdev->dev,
2102 "mac speed/autoneg/duplex query cmd failed %d\n",
2103 ret);
2104 return ret;
2105 }
2106
2107 *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
2108 speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
2109 HCLGE_QUERY_SPEED_S);
2110
2111 ret = hclge_parse_speed(speed_tmp, speed);
2112 if (ret) {
2113 dev_err(&hdev->pdev->dev,
2114 "could not parse speed(=%d), %d\n", speed_tmp, ret);
2115 return -EIO;
2116 }
2117
2118 return 0;
2119}
2120
2121static int hclge_query_autoneg_result(struct hclge_dev *hdev)
2122{
2123 struct hclge_mac *mac = &hdev->hw.mac;
d44f9b63 2124 struct hclge_query_an_speed_dup_cmd *req;
46a3df9f
S
2125 struct hclge_desc desc;
2126 int ret;
2127
d44f9b63 2128 req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
46a3df9f
S
2129
2130 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
2131 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2132 if (ret) {
2133 dev_err(&hdev->pdev->dev,
2134 "autoneg result query cmd failed %d.\n", ret);
2135 return ret;
2136 }
2137
2138 mac->autoneg = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_AN_B);
2139
2140 return 0;
2141}
2142
2143static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2144{
d44f9b63 2145 struct hclge_config_auto_neg_cmd *req;
46a3df9f 2146 struct hclge_desc desc;
a90bb9a5 2147 u32 flag = 0;
46a3df9f
S
2148 int ret;
2149
2150 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2151
d44f9b63 2152 req = (struct hclge_config_auto_neg_cmd *)desc.data;
a90bb9a5
YL
2153 hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2154 req->cfg_an_cmd_flag = cpu_to_le32(flag);
46a3df9f
S
2155
2156 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2157 if (ret) {
2158 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2159 ret);
2160 return ret;
2161 }
2162
2163 return 0;
2164}
2165
2166static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2167{
2168 struct hclge_vport *vport = hclge_get_vport(handle);
2169 struct hclge_dev *hdev = vport->back;
2170
2171 return hclge_set_autoneg_en(hdev, enable);
2172}
2173
2174static int hclge_get_autoneg(struct hnae3_handle *handle)
2175{
2176 struct hclge_vport *vport = hclge_get_vport(handle);
2177 struct hclge_dev *hdev = vport->back;
2178
2179 hclge_query_autoneg_result(hdev);
2180
2181 return hdev->hw.mac.autoneg;
2182}
2183
2184static int hclge_mac_init(struct hclge_dev *hdev)
2185{
2186 struct hclge_mac *mac = &hdev->hw.mac;
2187 int ret;
2188
2189 ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
2190 if (ret) {
2191 dev_err(&hdev->pdev->dev,
2192 "Config mac speed dup fail ret=%d\n", ret);
2193 return ret;
2194 }
2195
2196 mac->link = 0;
2197
2198 ret = hclge_mac_mdio_config(hdev);
2199 if (ret) {
2200 dev_warn(&hdev->pdev->dev,
2201 "mdio config fail ret=%d\n", ret);
2202 return ret;
2203 }
2204
2205 /* Initialize the MTA table work mode */
2206 hdev->accept_mta_mc = true;
2207 hdev->enable_mta = true;
2208 hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36;
2209
2210 ret = hclge_set_mta_filter_mode(hdev,
2211 hdev->mta_mac_sel_type,
2212 hdev->enable_mta);
2213 if (ret) {
2214 dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n",
2215 ret);
2216 return ret;
2217 }
2218
2219 return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
2220}
2221
2222static void hclge_task_schedule(struct hclge_dev *hdev)
2223{
2224 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2225 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2226 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2227 (void)schedule_work(&hdev->service_task);
2228}
2229
2230static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2231{
d44f9b63 2232 struct hclge_link_status_cmd *req;
46a3df9f
S
2233 struct hclge_desc desc;
2234 int link_status;
2235 int ret;
2236
2237 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2238 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2239 if (ret) {
2240 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2241 ret);
2242 return ret;
2243 }
2244
d44f9b63 2245 req = (struct hclge_link_status_cmd *)desc.data;
46a3df9f
S
2246 link_status = req->status & HCLGE_LINK_STATUS;
2247
2248 return !!link_status;
2249}
2250
2251static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2252{
2253 int mac_state;
2254 int link_stat;
2255
2256 mac_state = hclge_get_mac_link_status(hdev);
2257
2258 if (hdev->hw.mac.phydev) {
2259 if (!genphy_read_status(hdev->hw.mac.phydev))
2260 link_stat = mac_state &
2261 hdev->hw.mac.phydev->link;
2262 else
2263 link_stat = 0;
2264
2265 } else {
2266 link_stat = mac_state;
2267 }
2268
2269 return !!link_stat;
2270}
2271
2272static void hclge_update_link_status(struct hclge_dev *hdev)
2273{
2274 struct hnae3_client *client = hdev->nic_client;
2275 struct hnae3_handle *handle;
2276 int state;
2277 int i;
2278
2279 if (!client)
2280 return;
2281 state = hclge_get_mac_phy_link(hdev);
2282 if (state != hdev->hw.mac.link) {
2283 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2284 handle = &hdev->vport[i].nic;
2285 client->ops->link_status_change(handle, state);
2286 }
2287 hdev->hw.mac.link = state;
2288 }
2289}
2290
2291static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2292{
2293 struct hclge_mac mac = hdev->hw.mac;
2294 u8 duplex;
2295 int speed;
2296 int ret;
2297
2298 /* get the speed and duplex as autoneg'result from mac cmd when phy
2299 * doesn't exit.
2300 */
2301 if (mac.phydev)
2302 return 0;
2303
2304 /* update mac->antoneg. */
2305 ret = hclge_query_autoneg_result(hdev);
2306 if (ret) {
2307 dev_err(&hdev->pdev->dev,
2308 "autoneg result query failed %d\n", ret);
2309 return ret;
2310 }
2311
2312 if (!mac.autoneg)
2313 return 0;
2314
2315 ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex);
2316 if (ret) {
2317 dev_err(&hdev->pdev->dev,
2318 "mac autoneg/speed/duplex query failed %d\n", ret);
2319 return ret;
2320 }
2321
2322 if ((mac.speed != speed) || (mac.duplex != duplex)) {
2323 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2324 if (ret) {
2325 dev_err(&hdev->pdev->dev,
2326 "mac speed/duplex config failed %d\n", ret);
2327 return ret;
2328 }
2329 }
2330
2331 return 0;
2332}
2333
2334static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2335{
2336 struct hclge_vport *vport = hclge_get_vport(handle);
2337 struct hclge_dev *hdev = vport->back;
2338
2339 return hclge_update_speed_duplex(hdev);
2340}
2341
2342static int hclge_get_status(struct hnae3_handle *handle)
2343{
2344 struct hclge_vport *vport = hclge_get_vport(handle);
2345 struct hclge_dev *hdev = vport->back;
2346
2347 hclge_update_link_status(hdev);
2348
2349 return hdev->hw.mac.link;
2350}
2351
d039ef68 2352static void hclge_service_timer(struct timer_list *t)
46a3df9f 2353{
d039ef68 2354 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
46a3df9f 2355
d039ef68 2356 mod_timer(&hdev->service_timer, jiffies + HZ);
46a3df9f
S
2357 hclge_task_schedule(hdev);
2358}
2359
2360static void hclge_service_complete(struct hclge_dev *hdev)
2361{
2362 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2363
2364 /* Flush memory before next watchdog */
2365 smp_mb__before_atomic();
2366 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2367}
2368
2369static void hclge_service_task(struct work_struct *work)
2370{
2371 struct hclge_dev *hdev =
2372 container_of(work, struct hclge_dev, service_task);
2373
2374 hclge_update_speed_duplex(hdev);
2375 hclge_update_link_status(hdev);
2376 hclge_update_stats_for_all(hdev);
2377 hclge_service_complete(hdev);
2378}
2379
2380static void hclge_disable_sriov(struct hclge_dev *hdev)
2381{
2a32ca13
AB
2382 /* If our VFs are assigned we cannot shut down SR-IOV
2383 * without causing issues, so just leave the hardware
2384 * available but disabled
2385 */
2386 if (pci_vfs_assigned(hdev->pdev)) {
2387 dev_warn(&hdev->pdev->dev,
2388 "disabling driver while VFs are assigned\n");
2389 return;
2390 }
46a3df9f 2391
2a32ca13 2392 pci_disable_sriov(hdev->pdev);
46a3df9f
S
2393}
2394
2395struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
2396{
2397 /* VF handle has no client */
2398 if (!handle->client)
2399 return container_of(handle, struct hclge_vport, nic);
2400 else if (handle->client->type == HNAE3_CLIENT_ROCE)
2401 return container_of(handle, struct hclge_vport, roce);
2402 else
2403 return container_of(handle, struct hclge_vport, nic);
2404}
2405
2406static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
2407 struct hnae3_vector_info *vector_info)
2408{
2409 struct hclge_vport *vport = hclge_get_vport(handle);
2410 struct hnae3_vector_info *vector = vector_info;
2411 struct hclge_dev *hdev = vport->back;
2412 int alloc = 0;
2413 int i, j;
2414
2415 vector_num = min(hdev->num_msi_left, vector_num);
2416
2417 for (j = 0; j < vector_num; j++) {
2418 for (i = 1; i < hdev->num_msi; i++) {
2419 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
2420 vector->vector = pci_irq_vector(hdev->pdev, i);
2421 vector->io_addr = hdev->hw.io_base +
2422 HCLGE_VECTOR_REG_BASE +
2423 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
2424 vport->vport_id *
2425 HCLGE_VECTOR_VF_OFFSET;
2426 hdev->vector_status[i] = vport->vport_id;
2427
2428 vector++;
2429 alloc++;
2430
2431 break;
2432 }
2433 }
2434 }
2435 hdev->num_msi_left -= alloc;
2436 hdev->num_msi_used += alloc;
2437
2438 return alloc;
2439}
2440
2441static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
2442{
2443 int i;
2444
2445 for (i = 0; i < hdev->num_msi; i++) {
2446 if (hdev->msix_entries) {
2447 if (vector == hdev->msix_entries[i].vector)
2448 return i;
2449 } else {
2450 if (vector == (hdev->base_msi_vector + i))
2451 return i;
2452 }
2453 }
2454 return -EINVAL;
2455}
2456
2457static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
2458{
2459 return HCLGE_RSS_KEY_SIZE;
2460}
2461
2462static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
2463{
2464 return HCLGE_RSS_IND_TBL_SIZE;
2465}
2466
2467static int hclge_get_rss_algo(struct hclge_dev *hdev)
2468{
d44f9b63 2469 struct hclge_rss_config_cmd *req;
46a3df9f
S
2470 struct hclge_desc desc;
2471 int rss_hash_algo;
2472 int ret;
2473
2474 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, true);
2475
2476 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2477 if (ret) {
2478 dev_err(&hdev->pdev->dev,
2479 "Get link status error, status =%d\n", ret);
2480 return ret;
2481 }
2482
d44f9b63 2483 req = (struct hclge_rss_config_cmd *)desc.data;
46a3df9f
S
2484 rss_hash_algo = (req->hash_config & HCLGE_RSS_HASH_ALGO_MASK);
2485
2486 if (rss_hash_algo == HCLGE_RSS_HASH_ALGO_TOEPLITZ)
2487 return ETH_RSS_HASH_TOP;
2488
2489 return -EINVAL;
2490}
2491
2492static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
2493 const u8 hfunc, const u8 *key)
2494{
d44f9b63 2495 struct hclge_rss_config_cmd *req;
46a3df9f
S
2496 struct hclge_desc desc;
2497 int key_offset;
2498 int key_size;
2499 int ret;
2500
d44f9b63 2501 req = (struct hclge_rss_config_cmd *)desc.data;
46a3df9f
S
2502
2503 for (key_offset = 0; key_offset < 3; key_offset++) {
2504 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
2505 false);
2506
2507 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
2508 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
2509
2510 if (key_offset == 2)
2511 key_size =
2512 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
2513 else
2514 key_size = HCLGE_RSS_HASH_KEY_NUM;
2515
2516 memcpy(req->hash_key,
2517 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
2518
2519 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2520 if (ret) {
2521 dev_err(&hdev->pdev->dev,
2522 "Configure RSS config fail, status = %d\n",
2523 ret);
2524 return ret;
2525 }
2526 }
2527 return 0;
2528}
2529
2530static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir)
2531{
d44f9b63 2532 struct hclge_rss_indirection_table_cmd *req;
46a3df9f
S
2533 struct hclge_desc desc;
2534 int i, j;
2535 int ret;
2536
d44f9b63 2537 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
46a3df9f
S
2538
2539 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
2540 hclge_cmd_setup_basic_desc
2541 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
2542
a90bb9a5
YL
2543 req->start_table_index =
2544 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
2545 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
46a3df9f
S
2546
2547 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
2548 req->rss_result[j] =
2549 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
2550
2551 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2552 if (ret) {
2553 dev_err(&hdev->pdev->dev,
2554 "Configure rss indir table fail,status = %d\n",
2555 ret);
2556 return ret;
2557 }
2558 }
2559 return 0;
2560}
2561
2562static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
2563 u16 *tc_size, u16 *tc_offset)
2564{
d44f9b63 2565 struct hclge_rss_tc_mode_cmd *req;
46a3df9f
S
2566 struct hclge_desc desc;
2567 int ret;
2568 int i;
2569
2570 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
d44f9b63 2571 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
46a3df9f
S
2572
2573 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
a90bb9a5
YL
2574 u16 mode = 0;
2575
2576 hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
2577 hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M,
46a3df9f 2578 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
a90bb9a5 2579 hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
46a3df9f 2580 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
a90bb9a5
YL
2581
2582 req->rss_tc_mode[i] = cpu_to_le16(mode);
46a3df9f
S
2583 }
2584
2585 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2586 if (ret) {
2587 dev_err(&hdev->pdev->dev,
2588 "Configure rss tc mode fail, status = %d\n", ret);
2589 return ret;
2590 }
2591
2592 return 0;
2593}
2594
2595static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
2596{
d44f9b63 2597 struct hclge_rss_input_tuple_cmd *req;
46a3df9f
S
2598 struct hclge_desc desc;
2599 int ret;
2600
2601 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
2602
d44f9b63 2603 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
46a3df9f
S
2604 req->ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2605 req->ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2606 req->ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
2607 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2608 req->ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2609 req->ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2610 req->ipv6_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
2611 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2612 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2613 if (ret) {
2614 dev_err(&hdev->pdev->dev,
2615 "Configure rss input fail, status = %d\n", ret);
2616 return ret;
2617 }
2618
2619 return 0;
2620}
2621
2622static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
2623 u8 *key, u8 *hfunc)
2624{
2625 struct hclge_vport *vport = hclge_get_vport(handle);
2626 struct hclge_dev *hdev = vport->back;
2627 int i;
2628
2629 /* Get hash algorithm */
2630 if (hfunc)
2631 *hfunc = hclge_get_rss_algo(hdev);
2632
2633 /* Get the RSS Key required by the user */
2634 if (key)
2635 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
2636
2637 /* Get indirect table */
2638 if (indir)
2639 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
2640 indir[i] = vport->rss_indirection_tbl[i];
2641
2642 return 0;
2643}
2644
2645static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
2646 const u8 *key, const u8 hfunc)
2647{
2648 struct hclge_vport *vport = hclge_get_vport(handle);
2649 struct hclge_dev *hdev = vport->back;
2650 u8 hash_algo;
2651 int ret, i;
2652
2653 /* Set the RSS Hash Key if specififed by the user */
2654 if (key) {
2655 /* Update the shadow RSS key with user specified qids */
2656 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
2657
2658 if (hfunc == ETH_RSS_HASH_TOP ||
2659 hfunc == ETH_RSS_HASH_NO_CHANGE)
2660 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
2661 else
2662 return -EINVAL;
2663 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
2664 if (ret)
2665 return ret;
2666 }
2667
2668 /* Update the shadow RSS table with user specified qids */
2669 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
2670 vport->rss_indirection_tbl[i] = indir[i];
2671
2672 /* Update the hardware */
2673 ret = hclge_set_rss_indir_table(hdev, indir);
2674 return ret;
2675}
2676
f7db940a
L
2677static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
2678{
2679 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
2680
2681 if (nfc->data & RXH_L4_B_2_3)
2682 hash_sets |= HCLGE_D_PORT_BIT;
2683 else
2684 hash_sets &= ~HCLGE_D_PORT_BIT;
2685
2686 if (nfc->data & RXH_IP_SRC)
2687 hash_sets |= HCLGE_S_IP_BIT;
2688 else
2689 hash_sets &= ~HCLGE_S_IP_BIT;
2690
2691 if (nfc->data & RXH_IP_DST)
2692 hash_sets |= HCLGE_D_IP_BIT;
2693 else
2694 hash_sets &= ~HCLGE_D_IP_BIT;
2695
2696 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
2697 hash_sets |= HCLGE_V_TAG_BIT;
2698
2699 return hash_sets;
2700}
2701
2702static int hclge_set_rss_tuple(struct hnae3_handle *handle,
2703 struct ethtool_rxnfc *nfc)
2704{
2705 struct hclge_vport *vport = hclge_get_vport(handle);
2706 struct hclge_dev *hdev = vport->back;
2707 struct hclge_rss_input_tuple_cmd *req;
2708 struct hclge_desc desc;
2709 u8 tuple_sets;
2710 int ret;
2711
2712 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2713 RXH_L4_B_0_1 | RXH_L4_B_2_3))
2714 return -EINVAL;
2715
2716 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
2717 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true);
2718 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2719 if (ret) {
2720 dev_err(&hdev->pdev->dev,
2721 "Read rss tuple fail, status = %d\n", ret);
2722 return ret;
2723 }
2724
2725 hclge_cmd_reuse_desc(&desc, false);
2726
2727 tuple_sets = hclge_get_rss_hash_bits(nfc);
2728 switch (nfc->flow_type) {
2729 case TCP_V4_FLOW:
2730 req->ipv4_tcp_en = tuple_sets;
2731 break;
2732 case TCP_V6_FLOW:
2733 req->ipv6_tcp_en = tuple_sets;
2734 break;
2735 case UDP_V4_FLOW:
2736 req->ipv4_udp_en = tuple_sets;
2737 break;
2738 case UDP_V6_FLOW:
2739 req->ipv6_udp_en = tuple_sets;
2740 break;
2741 case SCTP_V4_FLOW:
2742 req->ipv4_sctp_en = tuple_sets;
2743 break;
2744 case SCTP_V6_FLOW:
2745 if ((nfc->data & RXH_L4_B_0_1) ||
2746 (nfc->data & RXH_L4_B_2_3))
2747 return -EINVAL;
2748
2749 req->ipv6_sctp_en = tuple_sets;
2750 break;
2751 case IPV4_FLOW:
2752 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2753 break;
2754 case IPV6_FLOW:
2755 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2756 break;
2757 default:
2758 return -EINVAL;
2759 }
2760
2761 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2762 if (ret)
2763 dev_err(&hdev->pdev->dev,
2764 "Set rss tuple fail, status = %d\n", ret);
2765
2766 return ret;
2767}
2768
07d29954
L
2769static int hclge_get_rss_tuple(struct hnae3_handle *handle,
2770 struct ethtool_rxnfc *nfc)
2771{
2772 struct hclge_vport *vport = hclge_get_vport(handle);
2773 struct hclge_dev *hdev = vport->back;
2774 struct hclge_rss_input_tuple_cmd *req;
2775 struct hclge_desc desc;
2776 u8 tuple_sets;
2777 int ret;
2778
2779 nfc->data = 0;
2780
2781 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
2782 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true);
2783 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2784 if (ret) {
2785 dev_err(&hdev->pdev->dev,
2786 "Read rss tuple fail, status = %d\n", ret);
2787 return ret;
2788 }
2789
2790 switch (nfc->flow_type) {
2791 case TCP_V4_FLOW:
2792 tuple_sets = req->ipv4_tcp_en;
2793 break;
2794 case UDP_V4_FLOW:
2795 tuple_sets = req->ipv4_udp_en;
2796 break;
2797 case TCP_V6_FLOW:
2798 tuple_sets = req->ipv6_tcp_en;
2799 break;
2800 case UDP_V6_FLOW:
2801 tuple_sets = req->ipv6_udp_en;
2802 break;
2803 case SCTP_V4_FLOW:
2804 tuple_sets = req->ipv4_sctp_en;
2805 break;
2806 case SCTP_V6_FLOW:
2807 tuple_sets = req->ipv6_sctp_en;
2808 break;
2809 case IPV4_FLOW:
2810 case IPV6_FLOW:
2811 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
2812 break;
2813 default:
2814 return -EINVAL;
2815 }
2816
2817 if (!tuple_sets)
2818 return 0;
2819
2820 if (tuple_sets & HCLGE_D_PORT_BIT)
2821 nfc->data |= RXH_L4_B_2_3;
2822 if (tuple_sets & HCLGE_S_PORT_BIT)
2823 nfc->data |= RXH_L4_B_0_1;
2824 if (tuple_sets & HCLGE_D_IP_BIT)
2825 nfc->data |= RXH_IP_DST;
2826 if (tuple_sets & HCLGE_S_IP_BIT)
2827 nfc->data |= RXH_IP_SRC;
2828
2829 return 0;
2830}
2831
46a3df9f
S
2832static int hclge_get_tc_size(struct hnae3_handle *handle)
2833{
2834 struct hclge_vport *vport = hclge_get_vport(handle);
2835 struct hclge_dev *hdev = vport->back;
2836
2837 return hdev->rss_size_max;
2838}
2839
77f255c1 2840int hclge_rss_init_hw(struct hclge_dev *hdev)
46a3df9f
S
2841{
2842 const u8 hfunc = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
2843 struct hclge_vport *vport = hdev->vport;
2844 u16 tc_offset[HCLGE_MAX_TC_NUM];
2845 u8 rss_key[HCLGE_RSS_KEY_SIZE];
2846 u16 tc_valid[HCLGE_MAX_TC_NUM];
2847 u16 tc_size[HCLGE_MAX_TC_NUM];
2848 u32 *rss_indir = NULL;
68ece54e 2849 u16 rss_size = 0, roundup_size;
46a3df9f
S
2850 const u8 *key;
2851 int i, ret, j;
2852
2853 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
2854 if (!rss_indir)
2855 return -ENOMEM;
2856
2857 /* Get default RSS key */
2858 netdev_rss_key_fill(rss_key, HCLGE_RSS_KEY_SIZE);
2859
2860 /* Initialize RSS indirect table for each vport */
2861 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
2862 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) {
2863 vport[j].rss_indirection_tbl[i] =
68ece54e
YL
2864 i % vport[j].alloc_rss_size;
2865
2866 /* vport 0 is for PF */
2867 if (j != 0)
2868 continue;
2869
2870 rss_size = vport[j].alloc_rss_size;
46a3df9f
S
2871 rss_indir[i] = vport[j].rss_indirection_tbl[i];
2872 }
2873 }
2874 ret = hclge_set_rss_indir_table(hdev, rss_indir);
2875 if (ret)
2876 goto err;
2877
2878 key = rss_key;
2879 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
2880 if (ret)
2881 goto err;
2882
2883 ret = hclge_set_rss_input_tuple(hdev);
2884 if (ret)
2885 goto err;
2886
68ece54e
YL
2887 /* Each TC have the same queue size, and tc_size set to hardware is
2888 * the log2 of roundup power of two of rss_size, the acutal queue
2889 * size is limited by indirection table.
2890 */
2891 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
2892 dev_err(&hdev->pdev->dev,
2893 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
2894 rss_size);
81359617
CJ
2895 ret = -EINVAL;
2896 goto err;
68ece54e
YL
2897 }
2898
2899 roundup_size = roundup_pow_of_two(rss_size);
2900 roundup_size = ilog2(roundup_size);
2901
46a3df9f 2902 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
68ece54e 2903 tc_valid[i] = 0;
46a3df9f 2904
68ece54e
YL
2905 if (!(hdev->hw_tc_map & BIT(i)))
2906 continue;
2907
2908 tc_valid[i] = 1;
2909 tc_size[i] = roundup_size;
2910 tc_offset[i] = rss_size * i;
46a3df9f 2911 }
68ece54e 2912
46a3df9f
S
2913 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
2914
2915err:
2916 kfree(rss_indir);
2917
2918 return ret;
2919}
2920
2921int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
2922 struct hnae3_ring_chain_node *ring_chain)
2923{
2924 struct hclge_dev *hdev = vport->back;
d44f9b63 2925 struct hclge_ctrl_vector_chain_cmd *req;
46a3df9f
S
2926 struct hnae3_ring_chain_node *node;
2927 struct hclge_desc desc;
2928 int ret;
2929 int i;
2930
2931 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false);
2932
d44f9b63 2933 req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
46a3df9f
S
2934 req->int_vector_id = vector_id;
2935
2936 i = 0;
2937 for (node = ring_chain; node; node = node->next) {
a90bb9a5
YL
2938 u16 type_and_id = 0;
2939
2940 hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
46a3df9f 2941 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
a90bb9a5
YL
2942 hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
2943 node->tqp_index);
2944 hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
0305b443
L
2945 HCLGE_INT_GL_IDX_S,
2946 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
a90bb9a5 2947 req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
0305b443 2948 req->vfid = vport->vport_id;
46a3df9f
S
2949
2950 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
2951 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
2952
2953 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2954 if (ret) {
2955 dev_err(&hdev->pdev->dev,
2956 "Map TQP fail, status is %d.\n",
2957 ret);
2958 return ret;
2959 }
2960 i = 0;
2961
2962 hclge_cmd_setup_basic_desc(&desc,
2963 HCLGE_OPC_ADD_RING_TO_VECTOR,
2964 false);
2965 req->int_vector_id = vector_id;
2966 }
2967 }
2968
2969 if (i > 0) {
2970 req->int_cause_num = i;
2971
2972 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2973 if (ret) {
2974 dev_err(&hdev->pdev->dev,
2975 "Map TQP fail, status is %d.\n", ret);
2976 return ret;
2977 }
2978 }
2979
2980 return 0;
2981}
2982
1db9b1bf
YL
2983static int hclge_map_handle_ring_to_vector(
2984 struct hnae3_handle *handle, int vector,
2985 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
2986{
2987 struct hclge_vport *vport = hclge_get_vport(handle);
2988 struct hclge_dev *hdev = vport->back;
2989 int vector_id;
2990
2991 vector_id = hclge_get_vector_index(hdev, vector);
2992 if (vector_id < 0) {
2993 dev_err(&hdev->pdev->dev,
2994 "Get vector index fail. ret =%d\n", vector_id);
2995 return vector_id;
2996 }
2997
2998 return hclge_map_vport_ring_to_vector(vport, vector_id, ring_chain);
2999}
3000
3001static int hclge_unmap_ring_from_vector(
3002 struct hnae3_handle *handle, int vector,
3003 struct hnae3_ring_chain_node *ring_chain)
3004{
3005 struct hclge_vport *vport = hclge_get_vport(handle);
3006 struct hclge_dev *hdev = vport->back;
d44f9b63 3007 struct hclge_ctrl_vector_chain_cmd *req;
46a3df9f
S
3008 struct hnae3_ring_chain_node *node;
3009 struct hclge_desc desc;
3010 int i, vector_id;
3011 int ret;
3012
3013 vector_id = hclge_get_vector_index(hdev, vector);
3014 if (vector_id < 0) {
3015 dev_err(&handle->pdev->dev,
3016 "Get vector index fail. ret =%d\n", vector_id);
3017 return vector_id;
3018 }
3019
3020 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false);
3021
d44f9b63 3022 req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
46a3df9f
S
3023 req->int_vector_id = vector_id;
3024
3025 i = 0;
3026 for (node = ring_chain; node; node = node->next) {
a90bb9a5
YL
3027 u16 type_and_id = 0;
3028
3029 hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
46a3df9f 3030 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
a90bb9a5
YL
3031 hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
3032 node->tqp_index);
3033 hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
0305b443
L
3034 HCLGE_INT_GL_IDX_S,
3035 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
46a3df9f 3036
a90bb9a5 3037 req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
0305b443 3038 req->vfid = vport->vport_id;
46a3df9f
S
3039
3040 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3041 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3042
3043 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3044 if (ret) {
3045 dev_err(&hdev->pdev->dev,
3046 "Unmap TQP fail, status is %d.\n",
3047 ret);
3048 return ret;
3049 }
3050 i = 0;
3051 hclge_cmd_setup_basic_desc(&desc,
c5b1b975 3052 HCLGE_OPC_DEL_RING_TO_VECTOR,
46a3df9f
S
3053 false);
3054 req->int_vector_id = vector_id;
3055 }
3056 }
3057
3058 if (i > 0) {
3059 req->int_cause_num = i;
3060
3061 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3062 if (ret) {
3063 dev_err(&hdev->pdev->dev,
3064 "Unmap TQP fail, status is %d.\n", ret);
3065 return ret;
3066 }
3067 }
3068
3069 return 0;
3070}
3071
3072int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3073 struct hclge_promisc_param *param)
3074{
d44f9b63 3075 struct hclge_promisc_cfg_cmd *req;
46a3df9f
S
3076 struct hclge_desc desc;
3077 int ret;
3078
3079 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3080
d44f9b63 3081 req = (struct hclge_promisc_cfg_cmd *)desc.data;
46a3df9f
S
3082 req->vf_id = param->vf_id;
3083 req->flag = (param->enable << HCLGE_PROMISC_EN_B);
3084
3085 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3086 if (ret) {
3087 dev_err(&hdev->pdev->dev,
3088 "Set promisc mode fail, status is %d.\n", ret);
3089 return ret;
3090 }
3091 return 0;
3092}
3093
3094void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3095 bool en_mc, bool en_bc, int vport_id)
3096{
3097 if (!param)
3098 return;
3099
3100 memset(param, 0, sizeof(struct hclge_promisc_param));
3101 if (en_uc)
3102 param->enable = HCLGE_PROMISC_EN_UC;
3103 if (en_mc)
3104 param->enable |= HCLGE_PROMISC_EN_MC;
3105 if (en_bc)
3106 param->enable |= HCLGE_PROMISC_EN_BC;
3107 param->vf_id = vport_id;
3108}
3109
3110static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en)
3111{
3112 struct hclge_vport *vport = hclge_get_vport(handle);
3113 struct hclge_dev *hdev = vport->back;
3114 struct hclge_promisc_param param;
3115
3116 hclge_promisc_param_init(&param, en, en, true, vport->vport_id);
3117 hclge_cmd_set_promisc_mode(hdev, &param);
3118}
3119
3120static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
3121{
3122 struct hclge_desc desc;
d44f9b63
YL
3123 struct hclge_config_mac_mode_cmd *req =
3124 (struct hclge_config_mac_mode_cmd *)desc.data;
a90bb9a5 3125 u32 loop_en = 0;
46a3df9f
S
3126 int ret;
3127
3128 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
a90bb9a5
YL
3129 hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
3130 hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
3131 hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
3132 hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
3133 hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
3134 hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
3135 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
3136 hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
3137 hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
3138 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
3139 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
3140 hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
3141 hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
3142 hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
3143 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
46a3df9f
S
3144
3145 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3146 if (ret)
3147 dev_err(&hdev->pdev->dev,
3148 "mac enable fail, ret =%d.\n", ret);
3149}
3150
c39c4d98
YL
3151static int hclge_set_loopback(struct hnae3_handle *handle,
3152 enum hnae3_loop loop_mode, bool en)
3153{
3154 struct hclge_vport *vport = hclge_get_vport(handle);
3155 struct hclge_config_mac_mode_cmd *req;
3156 struct hclge_dev *hdev = vport->back;
3157 struct hclge_desc desc;
3158 u32 loop_en;
3159 int ret;
3160
3161 switch (loop_mode) {
3162 case HNAE3_MAC_INTER_LOOP_MAC:
3163 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
3164 /* 1 Read out the MAC mode config at first */
3165 hclge_cmd_setup_basic_desc(&desc,
3166 HCLGE_OPC_CONFIG_MAC_MODE,
3167 true);
3168 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3169 if (ret) {
3170 dev_err(&hdev->pdev->dev,
3171 "mac loopback get fail, ret =%d.\n",
3172 ret);
3173 return ret;
3174 }
3175
3176 /* 2 Then setup the loopback flag */
3177 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
3178 if (en)
3179 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 1);
3180 else
3181 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
3182
3183 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
3184
3185 /* 3 Config mac work mode with loopback flag
3186 * and its original configure parameters
3187 */
3188 hclge_cmd_reuse_desc(&desc, false);
3189 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3190 if (ret)
3191 dev_err(&hdev->pdev->dev,
3192 "mac loopback set fail, ret =%d.\n", ret);
3193 break;
3194 default:
3195 ret = -ENOTSUPP;
3196 dev_err(&hdev->pdev->dev,
3197 "loop_mode %d is not supported\n", loop_mode);
3198 break;
3199 }
3200
3201 return ret;
3202}
3203
46a3df9f
S
3204static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
3205 int stream_id, bool enable)
3206{
3207 struct hclge_desc desc;
d44f9b63
YL
3208 struct hclge_cfg_com_tqp_queue_cmd *req =
3209 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
46a3df9f
S
3210 int ret;
3211
3212 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
3213 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
3214 req->stream_id = cpu_to_le16(stream_id);
3215 req->enable |= enable << HCLGE_TQP_ENABLE_B;
3216
3217 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3218 if (ret)
3219 dev_err(&hdev->pdev->dev,
3220 "Tqp enable fail, status =%d.\n", ret);
3221 return ret;
3222}
3223
3224static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
3225{
3226 struct hclge_vport *vport = hclge_get_vport(handle);
3227 struct hnae3_queue *queue;
3228 struct hclge_tqp *tqp;
3229 int i;
3230
3231 for (i = 0; i < vport->alloc_tqps; i++) {
3232 queue = handle->kinfo.tqp[i];
3233 tqp = container_of(queue, struct hclge_tqp, q);
3234 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
3235 }
3236}
3237
3238static int hclge_ae_start(struct hnae3_handle *handle)
3239{
3240 struct hclge_vport *vport = hclge_get_vport(handle);
3241 struct hclge_dev *hdev = vport->back;
3242 int i, queue_id, ret;
3243
3244 for (i = 0; i < vport->alloc_tqps; i++) {
3245 /* todo clear interrupt */
3246 /* ring enable */
3247 queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]);
3248 if (queue_id < 0) {
3249 dev_warn(&hdev->pdev->dev,
3250 "Get invalid queue id, ignore it\n");
3251 continue;
3252 }
3253
3254 hclge_tqp_enable(hdev, queue_id, 0, true);
3255 }
3256 /* mac enable */
3257 hclge_cfg_mac_mode(hdev, true);
3258 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
d039ef68 3259 mod_timer(&hdev->service_timer, jiffies + HZ);
46a3df9f
S
3260
3261 ret = hclge_mac_start_phy(hdev);
3262 if (ret)
3263 return ret;
3264
3265 /* reset tqp stats */
3266 hclge_reset_tqp_stats(handle);
3267
3268 return 0;
3269}
3270
3271static void hclge_ae_stop(struct hnae3_handle *handle)
3272{
3273 struct hclge_vport *vport = hclge_get_vport(handle);
3274 struct hclge_dev *hdev = vport->back;
3275 int i, queue_id;
3276
3277 for (i = 0; i < vport->alloc_tqps; i++) {
3278 /* Ring disable */
3279 queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]);
3280 if (queue_id < 0) {
3281 dev_warn(&hdev->pdev->dev,
3282 "Get invalid queue id, ignore it\n");
3283 continue;
3284 }
3285
3286 hclge_tqp_enable(hdev, queue_id, 0, false);
3287 }
3288 /* Mac disable */
3289 hclge_cfg_mac_mode(hdev, false);
3290
3291 hclge_mac_stop_phy(hdev);
3292
3293 /* reset tqp stats */
3294 hclge_reset_tqp_stats(handle);
3295}
3296
3297static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
3298 u16 cmdq_resp, u8 resp_code,
3299 enum hclge_mac_vlan_tbl_opcode op)
3300{
3301 struct hclge_dev *hdev = vport->back;
3302 int return_status = -EIO;
3303
3304 if (cmdq_resp) {
3305 dev_err(&hdev->pdev->dev,
3306 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
3307 cmdq_resp);
3308 return -EIO;
3309 }
3310
3311 if (op == HCLGE_MAC_VLAN_ADD) {
3312 if ((!resp_code) || (resp_code == 1)) {
3313 return_status = 0;
3314 } else if (resp_code == 2) {
3315 return_status = -EIO;
3316 dev_err(&hdev->pdev->dev,
3317 "add mac addr failed for uc_overflow.\n");
3318 } else if (resp_code == 3) {
3319 return_status = -EIO;
3320 dev_err(&hdev->pdev->dev,
3321 "add mac addr failed for mc_overflow.\n");
3322 } else {
3323 dev_err(&hdev->pdev->dev,
3324 "add mac addr failed for undefined, code=%d.\n",
3325 resp_code);
3326 }
3327 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
3328 if (!resp_code) {
3329 return_status = 0;
3330 } else if (resp_code == 1) {
3331 return_status = -EIO;
3332 dev_dbg(&hdev->pdev->dev,
3333 "remove mac addr failed for miss.\n");
3334 } else {
3335 dev_err(&hdev->pdev->dev,
3336 "remove mac addr failed for undefined, code=%d.\n",
3337 resp_code);
3338 }
3339 } else if (op == HCLGE_MAC_VLAN_LKUP) {
3340 if (!resp_code) {
3341 return_status = 0;
3342 } else if (resp_code == 1) {
3343 return_status = -EIO;
3344 dev_dbg(&hdev->pdev->dev,
3345 "lookup mac addr failed for miss.\n");
3346 } else {
3347 dev_err(&hdev->pdev->dev,
3348 "lookup mac addr failed for undefined, code=%d.\n",
3349 resp_code);
3350 }
3351 } else {
3352 return_status = -EIO;
3353 dev_err(&hdev->pdev->dev,
3354 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
3355 op);
3356 }
3357
3358 return return_status;
3359}
3360
3361static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
3362{
3363 int word_num;
3364 int bit_num;
3365
3366 if (vfid > 255 || vfid < 0)
3367 return -EIO;
3368
3369 if (vfid >= 0 && vfid <= 191) {
3370 word_num = vfid / 32;
3371 bit_num = vfid % 32;
3372 if (clr)
a90bb9a5 3373 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
46a3df9f 3374 else
a90bb9a5 3375 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
46a3df9f
S
3376 } else {
3377 word_num = (vfid - 192) / 32;
3378 bit_num = vfid % 32;
3379 if (clr)
a90bb9a5 3380 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
46a3df9f 3381 else
a90bb9a5 3382 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
46a3df9f
S
3383 }
3384
3385 return 0;
3386}
3387
3388static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
3389{
3390#define HCLGE_DESC_NUMBER 3
3391#define HCLGE_FUNC_NUMBER_PER_DESC 6
3392 int i, j;
3393
3394 for (i = 0; i < HCLGE_DESC_NUMBER; i++)
3395 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
3396 if (desc[i].data[j])
3397 return false;
3398
3399 return true;
3400}
3401
d44f9b63 3402static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
46a3df9f
S
3403 const u8 *addr)
3404{
3405 const unsigned char *mac_addr = addr;
3406 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
3407 (mac_addr[0]) | (mac_addr[1] << 8);
3408 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
3409
3410 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
3411 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
3412}
3413
1db9b1bf
YL
3414static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
3415 const u8 *addr)
46a3df9f
S
3416{
3417 u16 high_val = addr[1] | (addr[0] << 8);
3418 struct hclge_dev *hdev = vport->back;
3419 u32 rsh = 4 - hdev->mta_mac_sel_type;
3420 u16 ret_val = (high_val >> rsh) & 0xfff;
3421
3422 return ret_val;
3423}
3424
3425static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
3426 enum hclge_mta_dmac_sel_type mta_mac_sel,
3427 bool enable)
3428{
d44f9b63 3429 struct hclge_mta_filter_mode_cmd *req;
46a3df9f
S
3430 struct hclge_desc desc;
3431 int ret;
3432
d44f9b63 3433 req = (struct hclge_mta_filter_mode_cmd *)desc.data;
46a3df9f
S
3434 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
3435
3436 hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
3437 enable);
3438 hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
3439 HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
3440
3441 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3442 if (ret) {
3443 dev_err(&hdev->pdev->dev,
3444 "Config mat filter mode failed for cmd_send, ret =%d.\n",
3445 ret);
3446 return ret;
3447 }
3448
3449 return 0;
3450}
3451
3452int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
3453 u8 func_id,
3454 bool enable)
3455{
d44f9b63 3456 struct hclge_cfg_func_mta_filter_cmd *req;
46a3df9f
S
3457 struct hclge_desc desc;
3458 int ret;
3459
d44f9b63 3460 req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
46a3df9f
S
3461 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
3462
3463 hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
3464 enable);
3465 req->function_id = func_id;
3466
3467 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3468 if (ret) {
3469 dev_err(&hdev->pdev->dev,
3470 "Config func_id enable failed for cmd_send, ret =%d.\n",
3471 ret);
3472 return ret;
3473 }
3474
3475 return 0;
3476}
3477
3478static int hclge_set_mta_table_item(struct hclge_vport *vport,
3479 u16 idx,
3480 bool enable)
3481{
3482 struct hclge_dev *hdev = vport->back;
d44f9b63 3483 struct hclge_cfg_func_mta_item_cmd *req;
46a3df9f 3484 struct hclge_desc desc;
a90bb9a5 3485 u16 item_idx = 0;
46a3df9f
S
3486 int ret;
3487
d44f9b63 3488 req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
46a3df9f
S
3489 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
3490 hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
3491
a90bb9a5 3492 hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
46a3df9f 3493 HCLGE_CFG_MTA_ITEM_IDX_S, idx);
a90bb9a5 3494 req->item_idx = cpu_to_le16(item_idx);
46a3df9f
S
3495
3496 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3497 if (ret) {
3498 dev_err(&hdev->pdev->dev,
3499 "Config mta table item failed for cmd_send, ret =%d.\n",
3500 ret);
3501 return ret;
3502 }
3503
3504 return 0;
3505}
3506
3507static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 3508 struct hclge_mac_vlan_tbl_entry_cmd *req)
46a3df9f
S
3509{
3510 struct hclge_dev *hdev = vport->back;
3511 struct hclge_desc desc;
3512 u8 resp_code;
a90bb9a5 3513 u16 retval;
46a3df9f
S
3514 int ret;
3515
3516 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
3517
d44f9b63 3518 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
3519
3520 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3521 if (ret) {
3522 dev_err(&hdev->pdev->dev,
3523 "del mac addr failed for cmd_send, ret =%d.\n",
3524 ret);
3525 return ret;
3526 }
a90bb9a5
YL
3527 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
3528 retval = le16_to_cpu(desc.retval);
46a3df9f 3529
a90bb9a5 3530 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
46a3df9f
S
3531 HCLGE_MAC_VLAN_REMOVE);
3532}
3533
3534static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 3535 struct hclge_mac_vlan_tbl_entry_cmd *req,
46a3df9f
S
3536 struct hclge_desc *desc,
3537 bool is_mc)
3538{
3539 struct hclge_dev *hdev = vport->back;
3540 u8 resp_code;
a90bb9a5 3541 u16 retval;
46a3df9f
S
3542 int ret;
3543
3544 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
3545 if (is_mc) {
3546 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3547 memcpy(desc[0].data,
3548 req,
d44f9b63 3549 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
3550 hclge_cmd_setup_basic_desc(&desc[1],
3551 HCLGE_OPC_MAC_VLAN_ADD,
3552 true);
3553 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3554 hclge_cmd_setup_basic_desc(&desc[2],
3555 HCLGE_OPC_MAC_VLAN_ADD,
3556 true);
3557 ret = hclge_cmd_send(&hdev->hw, desc, 3);
3558 } else {
3559 memcpy(desc[0].data,
3560 req,
d44f9b63 3561 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
3562 ret = hclge_cmd_send(&hdev->hw, desc, 1);
3563 }
3564 if (ret) {
3565 dev_err(&hdev->pdev->dev,
3566 "lookup mac addr failed for cmd_send, ret =%d.\n",
3567 ret);
3568 return ret;
3569 }
a90bb9a5
YL
3570 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
3571 retval = le16_to_cpu(desc[0].retval);
46a3df9f 3572
a90bb9a5 3573 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
46a3df9f
S
3574 HCLGE_MAC_VLAN_LKUP);
3575}
3576
3577static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 3578 struct hclge_mac_vlan_tbl_entry_cmd *req,
46a3df9f
S
3579 struct hclge_desc *mc_desc)
3580{
3581 struct hclge_dev *hdev = vport->back;
3582 int cfg_status;
3583 u8 resp_code;
a90bb9a5 3584 u16 retval;
46a3df9f
S
3585 int ret;
3586
3587 if (!mc_desc) {
3588 struct hclge_desc desc;
3589
3590 hclge_cmd_setup_basic_desc(&desc,
3591 HCLGE_OPC_MAC_VLAN_ADD,
3592 false);
d44f9b63
YL
3593 memcpy(desc.data, req,
3594 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f 3595 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
a90bb9a5
YL
3596 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
3597 retval = le16_to_cpu(desc.retval);
3598
3599 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
46a3df9f
S
3600 resp_code,
3601 HCLGE_MAC_VLAN_ADD);
3602 } else {
c3b6f755 3603 hclge_cmd_reuse_desc(&mc_desc[0], false);
46a3df9f 3604 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
c3b6f755 3605 hclge_cmd_reuse_desc(&mc_desc[1], false);
46a3df9f 3606 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
c3b6f755 3607 hclge_cmd_reuse_desc(&mc_desc[2], false);
46a3df9f
S
3608 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
3609 memcpy(mc_desc[0].data, req,
d44f9b63 3610 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f 3611 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
a90bb9a5
YL
3612 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
3613 retval = le16_to_cpu(mc_desc[0].retval);
3614
3615 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
46a3df9f
S
3616 resp_code,
3617 HCLGE_MAC_VLAN_ADD);
3618 }
3619
3620 if (ret) {
3621 dev_err(&hdev->pdev->dev,
3622 "add mac addr failed for cmd_send, ret =%d.\n",
3623 ret);
3624 return ret;
3625 }
3626
3627 return cfg_status;
3628}
3629
3630static int hclge_add_uc_addr(struct hnae3_handle *handle,
3631 const unsigned char *addr)
3632{
3633 struct hclge_vport *vport = hclge_get_vport(handle);
3634
3635 return hclge_add_uc_addr_common(vport, addr);
3636}
3637
3638int hclge_add_uc_addr_common(struct hclge_vport *vport,
3639 const unsigned char *addr)
3640{
3641 struct hclge_dev *hdev = vport->back;
d44f9b63 3642 struct hclge_mac_vlan_tbl_entry_cmd req;
46a3df9f 3643 enum hclge_cmd_status status;
a90bb9a5 3644 u16 egress_port = 0;
46a3df9f
S
3645
3646 /* mac addr check */
3647 if (is_zero_ether_addr(addr) ||
3648 is_broadcast_ether_addr(addr) ||
3649 is_multicast_ether_addr(addr)) {
3650 dev_err(&hdev->pdev->dev,
3651 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
3652 addr,
3653 is_zero_ether_addr(addr),
3654 is_broadcast_ether_addr(addr),
3655 is_multicast_ether_addr(addr));
3656 return -EINVAL;
3657 }
3658
3659 memset(&req, 0, sizeof(req));
3660 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3661 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3662 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0);
3663 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
a90bb9a5
YL
3664
3665 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0);
3666 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0);
3667 hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
46a3df9f 3668 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
a90bb9a5 3669 hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M,
46a3df9f 3670 HCLGE_MAC_EPORT_PFID_S, 0);
a90bb9a5
YL
3671
3672 req.egress_port = cpu_to_le16(egress_port);
46a3df9f
S
3673
3674 hclge_prepare_mac_addr(&req, addr);
3675
3676 status = hclge_add_mac_vlan_tbl(vport, &req, NULL);
3677
3678 return status;
3679}
3680
3681static int hclge_rm_uc_addr(struct hnae3_handle *handle,
3682 const unsigned char *addr)
3683{
3684 struct hclge_vport *vport = hclge_get_vport(handle);
3685
3686 return hclge_rm_uc_addr_common(vport, addr);
3687}
3688
3689int hclge_rm_uc_addr_common(struct hclge_vport *vport,
3690 const unsigned char *addr)
3691{
3692 struct hclge_dev *hdev = vport->back;
d44f9b63 3693 struct hclge_mac_vlan_tbl_entry_cmd req;
46a3df9f
S
3694 enum hclge_cmd_status status;
3695
3696 /* mac addr check */
3697 if (is_zero_ether_addr(addr) ||
3698 is_broadcast_ether_addr(addr) ||
3699 is_multicast_ether_addr(addr)) {
3700 dev_dbg(&hdev->pdev->dev,
3701 "Remove mac err! invalid mac:%pM.\n",
3702 addr);
3703 return -EINVAL;
3704 }
3705
3706 memset(&req, 0, sizeof(req));
3707 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3708 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3709 hclge_prepare_mac_addr(&req, addr);
3710 status = hclge_remove_mac_vlan_tbl(vport, &req);
3711
3712 return status;
3713}
3714
3715static int hclge_add_mc_addr(struct hnae3_handle *handle,
3716 const unsigned char *addr)
3717{
3718 struct hclge_vport *vport = hclge_get_vport(handle);
3719
3720 return hclge_add_mc_addr_common(vport, addr);
3721}
3722
3723int hclge_add_mc_addr_common(struct hclge_vport *vport,
3724 const unsigned char *addr)
3725{
3726 struct hclge_dev *hdev = vport->back;
d44f9b63 3727 struct hclge_mac_vlan_tbl_entry_cmd req;
46a3df9f
S
3728 struct hclge_desc desc[3];
3729 u16 tbl_idx;
3730 int status;
3731
3732 /* mac addr check */
3733 if (!is_multicast_ether_addr(addr)) {
3734 dev_err(&hdev->pdev->dev,
3735 "Add mc mac err! invalid mac:%pM.\n",
3736 addr);
3737 return -EINVAL;
3738 }
3739 memset(&req, 0, sizeof(req));
3740 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3741 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3742 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
3743 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3744 hclge_prepare_mac_addr(&req, addr);
3745 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
3746 if (!status) {
3747 /* This mac addr exist, update VFID for it */
3748 hclge_update_desc_vfid(desc, vport->vport_id, false);
3749 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
3750 } else {
3751 /* This mac addr do not exist, add new entry for it */
3752 memset(desc[0].data, 0, sizeof(desc[0].data));
3753 memset(desc[1].data, 0, sizeof(desc[0].data));
3754 memset(desc[2].data, 0, sizeof(desc[0].data));
3755 hclge_update_desc_vfid(desc, vport->vport_id, false);
3756 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
3757 }
3758
3759 /* Set MTA table for this MAC address */
3760 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
3761 status = hclge_set_mta_table_item(vport, tbl_idx, true);
3762
3763 return status;
3764}
3765
3766static int hclge_rm_mc_addr(struct hnae3_handle *handle,
3767 const unsigned char *addr)
3768{
3769 struct hclge_vport *vport = hclge_get_vport(handle);
3770
3771 return hclge_rm_mc_addr_common(vport, addr);
3772}
3773
3774int hclge_rm_mc_addr_common(struct hclge_vport *vport,
3775 const unsigned char *addr)
3776{
3777 struct hclge_dev *hdev = vport->back;
d44f9b63 3778 struct hclge_mac_vlan_tbl_entry_cmd req;
46a3df9f
S
3779 enum hclge_cmd_status status;
3780 struct hclge_desc desc[3];
3781 u16 tbl_idx;
3782
3783 /* mac addr check */
3784 if (!is_multicast_ether_addr(addr)) {
3785 dev_dbg(&hdev->pdev->dev,
3786 "Remove mc mac err! invalid mac:%pM.\n",
3787 addr);
3788 return -EINVAL;
3789 }
3790
3791 memset(&req, 0, sizeof(req));
3792 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3793 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3794 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
3795 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3796 hclge_prepare_mac_addr(&req, addr);
3797 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
3798 if (!status) {
3799 /* This mac addr exist, remove this handle's VFID for it */
3800 hclge_update_desc_vfid(desc, vport->vport_id, true);
3801
3802 if (hclge_is_all_function_id_zero(desc))
3803 /* All the vfid is zero, so need to delete this entry */
3804 status = hclge_remove_mac_vlan_tbl(vport, &req);
3805 else
3806 /* Not all the vfid is zero, update the vfid */
3807 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
3808
3809 } else {
3810 /* This mac addr do not exist, can't delete it */
3811 dev_err(&hdev->pdev->dev,
d7629e74 3812 "Rm multicast mac addr failed, ret = %d.\n",
46a3df9f
S
3813 status);
3814 return -EIO;
3815 }
3816
3817 /* Set MTB table for this MAC address */
3818 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
3819 status = hclge_set_mta_table_item(vport, tbl_idx, false);
3820
3821 return status;
3822}
3823
3824static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
3825{
3826 struct hclge_vport *vport = hclge_get_vport(handle);
3827 struct hclge_dev *hdev = vport->back;
3828
3829 ether_addr_copy(p, hdev->hw.mac.mac_addr);
3830}
3831
3832static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
3833{
3834 const unsigned char *new_addr = (const unsigned char *)p;
3835 struct hclge_vport *vport = hclge_get_vport(handle);
3836 struct hclge_dev *hdev = vport->back;
3837
3838 /* mac addr check */
3839 if (is_zero_ether_addr(new_addr) ||
3840 is_broadcast_ether_addr(new_addr) ||
3841 is_multicast_ether_addr(new_addr)) {
3842 dev_err(&hdev->pdev->dev,
3843 "Change uc mac err! invalid mac:%p.\n",
3844 new_addr);
3845 return -EINVAL;
3846 }
3847
3848 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
3849
3850 if (!hclge_add_uc_addr(handle, new_addr)) {
3851 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
3852 return 0;
3853 }
3854
3855 return -EIO;
3856}
3857
3858static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
3859 bool filter_en)
3860{
d44f9b63 3861 struct hclge_vlan_filter_ctrl_cmd *req;
46a3df9f
S
3862 struct hclge_desc desc;
3863 int ret;
3864
3865 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
3866
d44f9b63 3867 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
46a3df9f
S
3868 req->vlan_type = vlan_type;
3869 req->vlan_fe = filter_en;
3870
3871 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3872 if (ret) {
3873 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
3874 ret);
3875 return ret;
3876 }
3877
3878 return 0;
3879}
3880
3881int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
3882 bool is_kill, u16 vlan, u8 qos, __be16 proto)
3883{
3884#define HCLGE_MAX_VF_BYTES 16
d44f9b63
YL
3885 struct hclge_vlan_filter_vf_cfg_cmd *req0;
3886 struct hclge_vlan_filter_vf_cfg_cmd *req1;
46a3df9f
S
3887 struct hclge_desc desc[2];
3888 u8 vf_byte_val;
3889 u8 vf_byte_off;
3890 int ret;
3891
3892 hclge_cmd_setup_basic_desc(&desc[0],
3893 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
3894 hclge_cmd_setup_basic_desc(&desc[1],
3895 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
3896
3897 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3898
3899 vf_byte_off = vfid / 8;
3900 vf_byte_val = 1 << (vfid % 8);
3901
d44f9b63
YL
3902 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
3903 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
46a3df9f 3904
a90bb9a5 3905 req0->vlan_id = cpu_to_le16(vlan);
46a3df9f
S
3906 req0->vlan_cfg = is_kill;
3907
3908 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
3909 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
3910 else
3911 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
3912
3913 ret = hclge_cmd_send(&hdev->hw, desc, 2);
3914 if (ret) {
3915 dev_err(&hdev->pdev->dev,
3916 "Send vf vlan command fail, ret =%d.\n",
3917 ret);
3918 return ret;
3919 }
3920
3921 if (!is_kill) {
3922 if (!req0->resp_code || req0->resp_code == 1)
3923 return 0;
3924
3925 dev_err(&hdev->pdev->dev,
3926 "Add vf vlan filter fail, ret =%d.\n",
3927 req0->resp_code);
3928 } else {
3929 if (!req0->resp_code)
3930 return 0;
3931
3932 dev_err(&hdev->pdev->dev,
3933 "Kill vf vlan filter fail, ret =%d.\n",
3934 req0->resp_code);
3935 }
3936
3937 return -EIO;
3938}
3939
3940static int hclge_set_port_vlan_filter(struct hnae3_handle *handle,
3941 __be16 proto, u16 vlan_id,
3942 bool is_kill)
3943{
3944 struct hclge_vport *vport = hclge_get_vport(handle);
3945 struct hclge_dev *hdev = vport->back;
d44f9b63 3946 struct hclge_vlan_filter_pf_cfg_cmd *req;
46a3df9f
S
3947 struct hclge_desc desc;
3948 u8 vlan_offset_byte_val;
3949 u8 vlan_offset_byte;
3950 u8 vlan_offset_160;
3951 int ret;
3952
3953 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
3954
3955 vlan_offset_160 = vlan_id / 160;
3956 vlan_offset_byte = (vlan_id % 160) / 8;
3957 vlan_offset_byte_val = 1 << (vlan_id % 8);
3958
d44f9b63 3959 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
46a3df9f
S
3960 req->vlan_offset = vlan_offset_160;
3961 req->vlan_cfg = is_kill;
3962 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
3963
3964 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3965 if (ret) {
3966 dev_err(&hdev->pdev->dev,
3967 "port vlan command, send fail, ret =%d.\n",
3968 ret);
3969 return ret;
3970 }
3971
3972 ret = hclge_set_vf_vlan_common(hdev, 0, is_kill, vlan_id, 0, proto);
3973 if (ret) {
3974 dev_err(&hdev->pdev->dev,
3975 "Set pf vlan filter config fail, ret =%d.\n",
3976 ret);
3977 return -EIO;
3978 }
3979
3980 return 0;
3981}
3982
3983static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
3984 u16 vlan, u8 qos, __be16 proto)
3985{
3986 struct hclge_vport *vport = hclge_get_vport(handle);
3987 struct hclge_dev *hdev = vport->back;
3988
3989 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
3990 return -EINVAL;
3991 if (proto != htons(ETH_P_8021Q))
3992 return -EPROTONOSUPPORT;
3993
3994 return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto);
3995}
3996
3997static int hclge_init_vlan_config(struct hclge_dev *hdev)
3998{
3999#define HCLGE_VLAN_TYPE_VF_TABLE 0
4000#define HCLGE_VLAN_TYPE_PORT_TABLE 1
5e43aef8 4001 struct hnae3_handle *handle;
46a3df9f
S
4002 int ret;
4003
4004 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE,
4005 true);
4006 if (ret)
4007 return ret;
4008
4009 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE,
4010 true);
5e43aef8
L
4011 if (ret)
4012 return ret;
46a3df9f 4013
5e43aef8
L
4014 handle = &hdev->vport[0].nic;
4015 return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
46a3df9f
S
4016}
4017
4018static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
4019{
4020 struct hclge_vport *vport = hclge_get_vport(handle);
d44f9b63 4021 struct hclge_config_max_frm_size_cmd *req;
46a3df9f
S
4022 struct hclge_dev *hdev = vport->back;
4023 struct hclge_desc desc;
4024 int ret;
4025
4026 if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU))
4027 return -EINVAL;
4028
4029 hdev->mps = new_mtu;
4030 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
4031
d44f9b63 4032 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
46a3df9f
S
4033 req->max_frm_size = cpu_to_le16(new_mtu);
4034
4035 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4036 if (ret) {
4037 dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
4038 return ret;
4039 }
4040
4041 return 0;
4042}
4043
4044static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
4045 bool enable)
4046{
d44f9b63 4047 struct hclge_reset_tqp_queue_cmd *req;
46a3df9f
S
4048 struct hclge_desc desc;
4049 int ret;
4050
4051 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
4052
d44f9b63 4053 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
46a3df9f
S
4054 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
4055 hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
4056
4057 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4058 if (ret) {
4059 dev_err(&hdev->pdev->dev,
4060 "Send tqp reset cmd error, status =%d\n", ret);
4061 return ret;
4062 }
4063
4064 return 0;
4065}
4066
4067static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
4068{
d44f9b63 4069 struct hclge_reset_tqp_queue_cmd *req;
46a3df9f
S
4070 struct hclge_desc desc;
4071 int ret;
4072
4073 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
4074
d44f9b63 4075 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
46a3df9f
S
4076 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
4077
4078 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4079 if (ret) {
4080 dev_err(&hdev->pdev->dev,
4081 "Get reset status error, status =%d\n", ret);
4082 return ret;
4083 }
4084
4085 return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
4086}
4087
4088static void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
4089{
4090 struct hclge_vport *vport = hclge_get_vport(handle);
4091 struct hclge_dev *hdev = vport->back;
4092 int reset_try_times = 0;
4093 int reset_status;
4094 int ret;
4095
4096 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
4097 if (ret) {
4098 dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
4099 return;
4100 }
4101
4102 ret = hclge_send_reset_tqp_cmd(hdev, queue_id, true);
4103 if (ret) {
4104 dev_warn(&hdev->pdev->dev,
4105 "Send reset tqp cmd fail, ret = %d\n", ret);
4106 return;
4107 }
4108
4109 reset_try_times = 0;
4110 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
4111 /* Wait for tqp hw reset */
4112 msleep(20);
4113 reset_status = hclge_get_reset_status(hdev, queue_id);
4114 if (reset_status)
4115 break;
4116 }
4117
4118 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
4119 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
4120 return;
4121 }
4122
4123 ret = hclge_send_reset_tqp_cmd(hdev, queue_id, false);
4124 if (ret) {
4125 dev_warn(&hdev->pdev->dev,
4126 "Deassert the soft reset fail, ret = %d\n", ret);
4127 return;
4128 }
4129}
4130
4131static u32 hclge_get_fw_version(struct hnae3_handle *handle)
4132{
4133 struct hclge_vport *vport = hclge_get_vport(handle);
4134 struct hclge_dev *hdev = vport->back;
4135
4136 return hdev->fw_version;
4137}
4138
4139static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
4140 u32 *rx_en, u32 *tx_en)
4141{
4142 struct hclge_vport *vport = hclge_get_vport(handle);
4143 struct hclge_dev *hdev = vport->back;
4144
4145 *auto_neg = hclge_get_autoneg(handle);
4146
4147 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
4148 *rx_en = 0;
4149 *tx_en = 0;
4150 return;
4151 }
4152
4153 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
4154 *rx_en = 1;
4155 *tx_en = 0;
4156 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
4157 *tx_en = 1;
4158 *rx_en = 0;
4159 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
4160 *rx_en = 1;
4161 *tx_en = 1;
4162 } else {
4163 *rx_en = 0;
4164 *tx_en = 0;
4165 }
4166}
4167
4168static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
4169 u8 *auto_neg, u32 *speed, u8 *duplex)
4170{
4171 struct hclge_vport *vport = hclge_get_vport(handle);
4172 struct hclge_dev *hdev = vport->back;
4173
4174 if (speed)
4175 *speed = hdev->hw.mac.speed;
4176 if (duplex)
4177 *duplex = hdev->hw.mac.duplex;
4178 if (auto_neg)
4179 *auto_neg = hdev->hw.mac.autoneg;
4180}
4181
4182static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
4183{
4184 struct hclge_vport *vport = hclge_get_vport(handle);
4185 struct hclge_dev *hdev = vport->back;
4186
4187 if (media_type)
4188 *media_type = hdev->hw.mac.media_type;
4189}
4190
4191static void hclge_get_mdix_mode(struct hnae3_handle *handle,
4192 u8 *tp_mdix_ctrl, u8 *tp_mdix)
4193{
4194 struct hclge_vport *vport = hclge_get_vport(handle);
4195 struct hclge_dev *hdev = vport->back;
4196 struct phy_device *phydev = hdev->hw.mac.phydev;
4197 int mdix_ctrl, mdix, retval, is_resolved;
4198
4199 if (!phydev) {
4200 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
4201 *tp_mdix = ETH_TP_MDI_INVALID;
4202 return;
4203 }
4204
4205 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
4206
4207 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
4208 mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
4209 HCLGE_PHY_MDIX_CTRL_S);
4210
4211 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
4212 mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
4213 is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
4214
4215 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
4216
4217 switch (mdix_ctrl) {
4218 case 0x0:
4219 *tp_mdix_ctrl = ETH_TP_MDI;
4220 break;
4221 case 0x1:
4222 *tp_mdix_ctrl = ETH_TP_MDI_X;
4223 break;
4224 case 0x3:
4225 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
4226 break;
4227 default:
4228 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
4229 break;
4230 }
4231
4232 if (!is_resolved)
4233 *tp_mdix = ETH_TP_MDI_INVALID;
4234 else if (mdix)
4235 *tp_mdix = ETH_TP_MDI_X;
4236 else
4237 *tp_mdix = ETH_TP_MDI;
4238}
4239
4240static int hclge_init_client_instance(struct hnae3_client *client,
4241 struct hnae3_ae_dev *ae_dev)
4242{
4243 struct hclge_dev *hdev = ae_dev->priv;
4244 struct hclge_vport *vport;
4245 int i, ret;
4246
4247 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4248 vport = &hdev->vport[i];
4249
4250 switch (client->type) {
4251 case HNAE3_CLIENT_KNIC:
4252
4253 hdev->nic_client = client;
4254 vport->nic.client = client;
4255 ret = client->ops->init_instance(&vport->nic);
4256 if (ret)
4257 goto err;
4258
4259 if (hdev->roce_client &&
e92a0843 4260 hnae3_dev_roce_supported(hdev)) {
46a3df9f
S
4261 struct hnae3_client *rc = hdev->roce_client;
4262
4263 ret = hclge_init_roce_base_info(vport);
4264 if (ret)
4265 goto err;
4266
4267 ret = rc->ops->init_instance(&vport->roce);
4268 if (ret)
4269 goto err;
4270 }
4271
4272 break;
4273 case HNAE3_CLIENT_UNIC:
4274 hdev->nic_client = client;
4275 vport->nic.client = client;
4276
4277 ret = client->ops->init_instance(&vport->nic);
4278 if (ret)
4279 goto err;
4280
4281 break;
4282 case HNAE3_CLIENT_ROCE:
e92a0843 4283 if (hnae3_dev_roce_supported(hdev)) {
46a3df9f
S
4284 hdev->roce_client = client;
4285 vport->roce.client = client;
4286 }
4287
3a46f34d 4288 if (hdev->roce_client && hdev->nic_client) {
46a3df9f
S
4289 ret = hclge_init_roce_base_info(vport);
4290 if (ret)
4291 goto err;
4292
4293 ret = client->ops->init_instance(&vport->roce);
4294 if (ret)
4295 goto err;
4296 }
4297 }
4298 }
4299
4300 return 0;
4301err:
4302 return ret;
4303}
4304
4305static void hclge_uninit_client_instance(struct hnae3_client *client,
4306 struct hnae3_ae_dev *ae_dev)
4307{
4308 struct hclge_dev *hdev = ae_dev->priv;
4309 struct hclge_vport *vport;
4310 int i;
4311
4312 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4313 vport = &hdev->vport[i];
a17dcf3f 4314 if (hdev->roce_client) {
46a3df9f
S
4315 hdev->roce_client->ops->uninit_instance(&vport->roce,
4316 0);
a17dcf3f
L
4317 hdev->roce_client = NULL;
4318 vport->roce.client = NULL;
4319 }
46a3df9f
S
4320 if (client->type == HNAE3_CLIENT_ROCE)
4321 return;
a17dcf3f 4322 if (client->ops->uninit_instance) {
46a3df9f 4323 client->ops->uninit_instance(&vport->nic, 0);
a17dcf3f
L
4324 hdev->nic_client = NULL;
4325 vport->nic.client = NULL;
4326 }
46a3df9f
S
4327 }
4328}
4329
4330static int hclge_pci_init(struct hclge_dev *hdev)
4331{
4332 struct pci_dev *pdev = hdev->pdev;
4333 struct hclge_hw *hw;
4334 int ret;
4335
4336 ret = pci_enable_device(pdev);
4337 if (ret) {
4338 dev_err(&pdev->dev, "failed to enable PCI device\n");
4339 goto err_no_drvdata;
4340 }
4341
4342 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4343 if (ret) {
4344 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4345 if (ret) {
4346 dev_err(&pdev->dev,
4347 "can't set consistent PCI DMA");
4348 goto err_disable_device;
4349 }
4350 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
4351 }
4352
4353 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
4354 if (ret) {
4355 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
4356 goto err_disable_device;
4357 }
4358
4359 pci_set_master(pdev);
4360 hw = &hdev->hw;
4361 hw->back = hdev;
4362 hw->io_base = pcim_iomap(pdev, 2, 0);
4363 if (!hw->io_base) {
4364 dev_err(&pdev->dev, "Can't map configuration register space\n");
4365 ret = -ENOMEM;
4366 goto err_clr_master;
4367 }
4368
709eb41a
L
4369 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
4370
46a3df9f
S
4371 return 0;
4372err_clr_master:
4373 pci_clear_master(pdev);
4374 pci_release_regions(pdev);
4375err_disable_device:
4376 pci_disable_device(pdev);
4377err_no_drvdata:
4378 pci_set_drvdata(pdev, NULL);
4379
4380 return ret;
4381}
4382
4383static void hclge_pci_uninit(struct hclge_dev *hdev)
4384{
4385 struct pci_dev *pdev = hdev->pdev;
4386
4387 if (hdev->flag & HCLGE_FLAG_USE_MSIX) {
4388 pci_disable_msix(pdev);
4389 devm_kfree(&pdev->dev, hdev->msix_entries);
4390 hdev->msix_entries = NULL;
4391 } else {
4392 pci_disable_msi(pdev);
4393 }
4394
4395 pci_clear_master(pdev);
4396 pci_release_mem_regions(pdev);
4397 pci_disable_device(pdev);
4398}
4399
4400static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
4401{
4402 struct pci_dev *pdev = ae_dev->pdev;
46a3df9f
S
4403 struct hclge_dev *hdev;
4404 int ret;
4405
4406 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
4407 if (!hdev) {
4408 ret = -ENOMEM;
4409 goto err_hclge_dev;
4410 }
4411
4412 hdev->flag |= HCLGE_FLAG_USE_MSIX;
4413 hdev->pdev = pdev;
4414 hdev->ae_dev = ae_dev;
4415 ae_dev->priv = hdev;
4416
46a3df9f
S
4417 ret = hclge_pci_init(hdev);
4418 if (ret) {
4419 dev_err(&pdev->dev, "PCI init failed\n");
4420 goto err_pci_init;
4421 }
4422
4423 /* Command queue initialize */
4424 ret = hclge_cmd_init(hdev);
4425 if (ret)
4426 goto err_cmd_init;
4427
4428 ret = hclge_get_cap(hdev);
4429 if (ret) {
e00e2197
CIK
4430 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
4431 ret);
46a3df9f
S
4432 return ret;
4433 }
4434
4435 ret = hclge_configure(hdev);
4436 if (ret) {
4437 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
4438 return ret;
4439 }
4440
4441 if (hdev->flag & HCLGE_FLAG_USE_MSIX)
4442 ret = hclge_init_msix(hdev);
4443 else
4444 ret = hclge_init_msi(hdev);
4445 if (ret) {
4446 dev_err(&pdev->dev, "Init msix/msi error, ret = %d.\n", ret);
4447 return ret;
4448 }
4449
4450 ret = hclge_alloc_tqps(hdev);
4451 if (ret) {
4452 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
4453 return ret;
4454 }
4455
4456 ret = hclge_alloc_vport(hdev);
4457 if (ret) {
4458 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
4459 return ret;
4460 }
4461
4462 ret = hclge_mac_init(hdev);
4463 if (ret) {
4464 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
4465 return ret;
4466 }
4467 ret = hclge_buffer_alloc(hdev);
4468 if (ret) {
4469 dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret);
4470 return ret;
4471 }
4472
4473 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
4474 if (ret) {
4475 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
4476 return ret;
4477 }
4478
46a3df9f
S
4479 ret = hclge_init_vlan_config(hdev);
4480 if (ret) {
4481 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
4482 return ret;
4483 }
4484
4485 ret = hclge_tm_schd_init(hdev);
4486 if (ret) {
4487 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
4488 return ret;
68ece54e
YL
4489 }
4490
4491 ret = hclge_rss_init_hw(hdev);
4492 if (ret) {
4493 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
4494 return ret;
46a3df9f
S
4495 }
4496
cacde272
YL
4497 hclge_dcb_ops_set(hdev);
4498
d039ef68 4499 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
46a3df9f
S
4500 INIT_WORK(&hdev->service_task, hclge_service_task);
4501
4502 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
4503 set_bit(HCLGE_STATE_DOWN, &hdev->state);
4504
4505 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
4506 return 0;
4507
4508err_cmd_init:
4509 pci_release_regions(pdev);
4510err_pci_init:
4511 pci_set_drvdata(pdev, NULL);
4512err_hclge_dev:
4513 return ret;
4514}
4515
4516static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
4517{
4518 struct hclge_dev *hdev = ae_dev->priv;
4519 struct hclge_mac *mac = &hdev->hw.mac;
4520
4521 set_bit(HCLGE_STATE_DOWN, &hdev->state);
4522
2a32ca13
AB
4523 if (IS_ENABLED(CONFIG_PCI_IOV))
4524 hclge_disable_sriov(hdev);
46a3df9f 4525
d039ef68 4526 if (hdev->service_timer.function)
46a3df9f
S
4527 del_timer_sync(&hdev->service_timer);
4528 if (hdev->service_task.func)
4529 cancel_work_sync(&hdev->service_task);
4530
4531 if (mac->phydev)
4532 mdiobus_unregister(mac->mdio_bus);
4533
4534 hclge_destroy_cmd_queue(&hdev->hw);
4535 hclge_pci_uninit(hdev);
4536 ae_dev->priv = NULL;
4537}
4538
4539static const struct hnae3_ae_ops hclge_ops = {
4540 .init_ae_dev = hclge_init_ae_dev,
4541 .uninit_ae_dev = hclge_uninit_ae_dev,
4542 .init_client_instance = hclge_init_client_instance,
4543 .uninit_client_instance = hclge_uninit_client_instance,
4544 .map_ring_to_vector = hclge_map_handle_ring_to_vector,
4545 .unmap_ring_from_vector = hclge_unmap_ring_from_vector,
4546 .get_vector = hclge_get_vector,
4547 .set_promisc_mode = hclge_set_promisc_mode,
c39c4d98 4548 .set_loopback = hclge_set_loopback,
46a3df9f
S
4549 .start = hclge_ae_start,
4550 .stop = hclge_ae_stop,
4551 .get_status = hclge_get_status,
4552 .get_ksettings_an_result = hclge_get_ksettings_an_result,
4553 .update_speed_duplex_h = hclge_update_speed_duplex_h,
4554 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
4555 .get_media_type = hclge_get_media_type,
4556 .get_rss_key_size = hclge_get_rss_key_size,
4557 .get_rss_indir_size = hclge_get_rss_indir_size,
4558 .get_rss = hclge_get_rss,
4559 .set_rss = hclge_set_rss,
f7db940a 4560 .set_rss_tuple = hclge_set_rss_tuple,
07d29954 4561 .get_rss_tuple = hclge_get_rss_tuple,
46a3df9f
S
4562 .get_tc_size = hclge_get_tc_size,
4563 .get_mac_addr = hclge_get_mac_addr,
4564 .set_mac_addr = hclge_set_mac_addr,
4565 .add_uc_addr = hclge_add_uc_addr,
4566 .rm_uc_addr = hclge_rm_uc_addr,
4567 .add_mc_addr = hclge_add_mc_addr,
4568 .rm_mc_addr = hclge_rm_mc_addr,
4569 .set_autoneg = hclge_set_autoneg,
4570 .get_autoneg = hclge_get_autoneg,
4571 .get_pauseparam = hclge_get_pauseparam,
4572 .set_mtu = hclge_set_mtu,
4573 .reset_queue = hclge_reset_tqp,
4574 .get_stats = hclge_get_stats,
4575 .update_stats = hclge_update_stats,
4576 .get_strings = hclge_get_strings,
4577 .get_sset_count = hclge_get_sset_count,
4578 .get_fw_version = hclge_get_fw_version,
4579 .get_mdix_mode = hclge_get_mdix_mode,
4580 .set_vlan_filter = hclge_set_port_vlan_filter,
4581 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
4582};
4583
4584static struct hnae3_ae_algo ae_algo = {
4585 .ops = &hclge_ops,
4586 .name = HCLGE_NAME,
4587 .pdev_id_table = ae_algo_pci_tbl,
4588};
4589
4590static int hclge_init(void)
4591{
4592 pr_info("%s is initializing\n", HCLGE_NAME);
4593
4594 return hnae3_register_ae_algo(&ae_algo);
4595}
4596
4597static void hclge_exit(void)
4598{
4599 hnae3_unregister_ae_algo(&ae_algo);
4600}
4601module_init(hclge_init);
4602module_exit(hclge_exit);
4603
4604MODULE_LICENSE("GPL");
4605MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
4606MODULE_DESCRIPTION("HCLGE Driver");
4607MODULE_VERSION(HCLGE_MOD_VERSION);