]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
net: hns3: get rss_size_max from configuration but not hardcode
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
CommitLineData
46a3df9f
S
1/*
2 * Copyright (c) 2016-2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/acpi.h>
11#include <linux/device.h>
12#include <linux/etherdevice.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/netdevice.h>
18#include <linux/pci.h>
19#include <linux/platform_device.h>
d5752031 20#include <net/rtnetlink.h>
46a3df9f 21#include "hclge_cmd.h"
cacde272 22#include "hclge_dcb.h"
46a3df9f 23#include "hclge_main.h"
0cdbdd3e 24#include "hclge_mbx.h"
46a3df9f
S
25#include "hclge_mdio.h"
26#include "hclge_tm.h"
27#include "hnae3.h"
28
29#define HCLGE_NAME "hclge"
30#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
31#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
32#define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
33#define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
34
46a3df9f
S
35static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
36 enum hclge_mta_dmac_sel_type mta_mac_sel,
37 bool enable);
38static int hclge_init_vlan_config(struct hclge_dev *hdev);
4ed340ab 39static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
46a3df9f
S
40
41static struct hnae3_ae_algo ae_algo;
42
43static const struct pci_device_id ae_algo_pci_tbl[] = {
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
e92a0843 51 /* required last entry */
46a3df9f
S
52 {0, }
53};
54
55static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
56 "Mac Loopback test",
57 "Serdes Loopback test",
58 "Phy Loopback test"
59};
60
61static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = {
62 {"igu_rx_oversize_pkt",
63 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)},
64 {"igu_rx_undersize_pkt",
65 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)},
66 {"igu_rx_out_all_pkt",
67 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)},
68 {"igu_rx_uni_pkt",
69 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)},
70 {"igu_rx_multi_pkt",
71 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)},
72 {"igu_rx_broad_pkt",
73 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)},
74 {"egu_tx_out_all_pkt",
75 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)},
76 {"egu_tx_uni_pkt",
77 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)},
78 {"egu_tx_multi_pkt",
79 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)},
80 {"egu_tx_broad_pkt",
81 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)},
82 {"ssu_ppp_mac_key_num",
83 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)},
84 {"ssu_ppp_host_key_num",
85 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)},
86 {"ppp_ssu_mac_rlt_num",
87 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)},
88 {"ppp_ssu_host_rlt_num",
89 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)},
90 {"ssu_tx_in_num",
91 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)},
92 {"ssu_tx_out_num",
93 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)},
94 {"ssu_rx_in_num",
95 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)},
96 {"ssu_rx_out_num",
97 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)}
98};
99
100static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = {
101 {"igu_rx_err_pkt",
102 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)},
103 {"igu_rx_no_eof_pkt",
104 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)},
105 {"igu_rx_no_sof_pkt",
106 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)},
107 {"egu_tx_1588_pkt",
108 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)},
109 {"ssu_full_drop_num",
110 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)},
111 {"ssu_part_drop_num",
112 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)},
113 {"ppp_key_drop_num",
114 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)},
115 {"ppp_rlt_drop_num",
116 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)},
117 {"ssu_key_drop_num",
118 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)},
119 {"pkt_curr_buf_cnt",
120 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)},
121 {"qcn_fb_rcv_cnt",
122 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)},
123 {"qcn_fb_drop_cnt",
124 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)},
125 {"qcn_fb_invaild_cnt",
126 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)},
127 {"rx_packet_tc0_in_cnt",
128 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)},
129 {"rx_packet_tc1_in_cnt",
130 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)},
131 {"rx_packet_tc2_in_cnt",
132 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)},
133 {"rx_packet_tc3_in_cnt",
134 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)},
135 {"rx_packet_tc4_in_cnt",
136 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)},
137 {"rx_packet_tc5_in_cnt",
138 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)},
139 {"rx_packet_tc6_in_cnt",
140 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)},
141 {"rx_packet_tc7_in_cnt",
142 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)},
143 {"rx_packet_tc0_out_cnt",
144 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)},
145 {"rx_packet_tc1_out_cnt",
146 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)},
147 {"rx_packet_tc2_out_cnt",
148 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)},
149 {"rx_packet_tc3_out_cnt",
150 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)},
151 {"rx_packet_tc4_out_cnt",
152 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)},
153 {"rx_packet_tc5_out_cnt",
154 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)},
155 {"rx_packet_tc6_out_cnt",
156 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)},
157 {"rx_packet_tc7_out_cnt",
158 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)},
159 {"tx_packet_tc0_in_cnt",
160 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)},
161 {"tx_packet_tc1_in_cnt",
162 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)},
163 {"tx_packet_tc2_in_cnt",
164 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)},
165 {"tx_packet_tc3_in_cnt",
166 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)},
167 {"tx_packet_tc4_in_cnt",
168 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)},
169 {"tx_packet_tc5_in_cnt",
170 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)},
171 {"tx_packet_tc6_in_cnt",
172 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)},
173 {"tx_packet_tc7_in_cnt",
174 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)},
175 {"tx_packet_tc0_out_cnt",
176 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)},
177 {"tx_packet_tc1_out_cnt",
178 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)},
179 {"tx_packet_tc2_out_cnt",
180 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)},
181 {"tx_packet_tc3_out_cnt",
182 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)},
183 {"tx_packet_tc4_out_cnt",
184 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)},
185 {"tx_packet_tc5_out_cnt",
186 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)},
187 {"tx_packet_tc6_out_cnt",
188 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)},
189 {"tx_packet_tc7_out_cnt",
190 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)},
191 {"pkt_curr_buf_tc0_cnt",
192 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)},
193 {"pkt_curr_buf_tc1_cnt",
194 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)},
195 {"pkt_curr_buf_tc2_cnt",
196 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)},
197 {"pkt_curr_buf_tc3_cnt",
198 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)},
199 {"pkt_curr_buf_tc4_cnt",
200 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)},
201 {"pkt_curr_buf_tc5_cnt",
202 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)},
203 {"pkt_curr_buf_tc6_cnt",
204 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)},
205 {"pkt_curr_buf_tc7_cnt",
206 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)},
207 {"mb_uncopy_num",
208 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)},
209 {"lo_pri_unicast_rlt_drop_num",
210 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)},
211 {"hi_pri_multicast_rlt_drop_num",
212 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)},
213 {"lo_pri_multicast_rlt_drop_num",
214 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)},
215 {"rx_oq_drop_pkt_cnt",
216 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)},
217 {"tx_oq_drop_pkt_cnt",
218 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)},
219 {"nic_l2_err_drop_pkt_cnt",
220 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)},
221 {"roc_l2_err_drop_pkt_cnt",
222 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)}
223};
224
225static const struct hclge_comm_stats_str g_mac_stats_string[] = {
226 {"mac_tx_mac_pause_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
228 {"mac_rx_mac_pause_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
230 {"mac_tx_pfc_pri0_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
232 {"mac_tx_pfc_pri1_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
234 {"mac_tx_pfc_pri2_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
236 {"mac_tx_pfc_pri3_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
238 {"mac_tx_pfc_pri4_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
240 {"mac_tx_pfc_pri5_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
242 {"mac_tx_pfc_pri6_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
244 {"mac_tx_pfc_pri7_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
246 {"mac_rx_pfc_pri0_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
248 {"mac_rx_pfc_pri1_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
250 {"mac_rx_pfc_pri2_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
252 {"mac_rx_pfc_pri3_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
254 {"mac_rx_pfc_pri4_pkt_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
256 {"mac_rx_pfc_pri5_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
258 {"mac_rx_pfc_pri6_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
260 {"mac_rx_pfc_pri7_pkt_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
262 {"mac_tx_total_pkt_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
264 {"mac_tx_total_oct_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
266 {"mac_tx_good_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
268 {"mac_tx_bad_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
270 {"mac_tx_good_oct_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
272 {"mac_tx_bad_oct_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
274 {"mac_tx_uni_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
276 {"mac_tx_multi_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
278 {"mac_tx_broad_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
280 {"mac_tx_undersize_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
282 {"mac_tx_overrsize_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)},
284 {"mac_tx_64_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
286 {"mac_tx_65_127_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
288 {"mac_tx_128_255_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
290 {"mac_tx_256_511_oct_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
292 {"mac_tx_512_1023_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
294 {"mac_tx_1024_1518_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
296 {"mac_tx_1519_max_oct_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)},
298 {"mac_rx_total_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
300 {"mac_rx_total_oct_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
302 {"mac_rx_good_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
304 {"mac_rx_bad_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
306 {"mac_rx_good_oct_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
308 {"mac_rx_bad_oct_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
310 {"mac_rx_uni_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
312 {"mac_rx_multi_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
314 {"mac_rx_broad_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
316 {"mac_rx_undersize_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
318 {"mac_rx_overrsize_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)},
320 {"mac_rx_64_oct_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
322 {"mac_rx_65_127_oct_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
324 {"mac_rx_128_255_oct_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
326 {"mac_rx_256_511_oct_pkt_num",
327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
328 {"mac_rx_512_1023_oct_pkt_num",
329 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
330 {"mac_rx_1024_1518_oct_pkt_num",
331 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
332 {"mac_rx_1519_max_oct_pkt_num",
333 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)},
334
335 {"mac_trans_fragment_pkt_num",
336 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)},
337 {"mac_trans_undermin_pkt_num",
338 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)},
339 {"mac_trans_jabber_pkt_num",
340 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)},
341 {"mac_trans_err_all_pkt_num",
342 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)},
343 {"mac_trans_from_app_good_pkt_num",
344 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)},
345 {"mac_trans_from_app_bad_pkt_num",
346 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)},
347 {"mac_rcv_fragment_pkt_num",
348 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)},
349 {"mac_rcv_undermin_pkt_num",
350 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)},
351 {"mac_rcv_jabber_pkt_num",
352 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)},
353 {"mac_rcv_fcs_err_pkt_num",
354 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)},
355 {"mac_rcv_send_app_good_pkt_num",
356 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)},
357 {"mac_rcv_send_app_bad_pkt_num",
358 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)}
359};
360
361static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
362{
363#define HCLGE_64_BIT_CMD_NUM 5
364#define HCLGE_64_BIT_RTN_DATANUM 4
365 u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats);
366 struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM];
a90bb9a5 367 __le64 *desc_data;
46a3df9f
S
368 int i, k, n;
369 int ret;
370
371 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true);
372 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM);
373 if (ret) {
374 dev_err(&hdev->pdev->dev,
375 "Get 64 bit pkt stats fail, status = %d.\n", ret);
376 return ret;
377 }
378
379 for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) {
380 if (unlikely(i == 0)) {
a90bb9a5 381 desc_data = (__le64 *)(&desc[i].data[0]);
46a3df9f
S
382 n = HCLGE_64_BIT_RTN_DATANUM - 1;
383 } else {
a90bb9a5 384 desc_data = (__le64 *)(&desc[i]);
46a3df9f
S
385 n = HCLGE_64_BIT_RTN_DATANUM;
386 }
387 for (k = 0; k < n; k++) {
a90bb9a5 388 *data++ += le64_to_cpu(*desc_data);
46a3df9f
S
389 desc_data++;
390 }
391 }
392
393 return 0;
394}
395
396static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats)
397{
398 stats->pkt_curr_buf_cnt = 0;
399 stats->pkt_curr_buf_tc0_cnt = 0;
400 stats->pkt_curr_buf_tc1_cnt = 0;
401 stats->pkt_curr_buf_tc2_cnt = 0;
402 stats->pkt_curr_buf_tc3_cnt = 0;
403 stats->pkt_curr_buf_tc4_cnt = 0;
404 stats->pkt_curr_buf_tc5_cnt = 0;
405 stats->pkt_curr_buf_tc6_cnt = 0;
406 stats->pkt_curr_buf_tc7_cnt = 0;
407}
408
409static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
410{
411#define HCLGE_32_BIT_CMD_NUM 8
412#define HCLGE_32_BIT_RTN_DATANUM 8
413
414 struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM];
415 struct hclge_32_bit_stats *all_32_bit_stats;
a90bb9a5 416 __le32 *desc_data;
46a3df9f
S
417 int i, k, n;
418 u64 *data;
419 int ret;
420
421 all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats;
422 data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt);
423
424 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true);
425 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM);
426 if (ret) {
427 dev_err(&hdev->pdev->dev,
428 "Get 32 bit pkt stats fail, status = %d.\n", ret);
429
430 return ret;
431 }
432
433 hclge_reset_partial_32bit_counter(all_32_bit_stats);
434 for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) {
435 if (unlikely(i == 0)) {
a90bb9a5
YL
436 __le16 *desc_data_16bit;
437
46a3df9f 438 all_32_bit_stats->igu_rx_err_pkt +=
a90bb9a5
YL
439 le32_to_cpu(desc[i].data[0]);
440
441 desc_data_16bit = (__le16 *)&desc[i].data[1];
46a3df9f 442 all_32_bit_stats->igu_rx_no_eof_pkt +=
a90bb9a5
YL
443 le16_to_cpu(*desc_data_16bit);
444
445 desc_data_16bit++;
46a3df9f 446 all_32_bit_stats->igu_rx_no_sof_pkt +=
a90bb9a5 447 le16_to_cpu(*desc_data_16bit);
46a3df9f 448
a90bb9a5 449 desc_data = &desc[i].data[2];
46a3df9f
S
450 n = HCLGE_32_BIT_RTN_DATANUM - 4;
451 } else {
a90bb9a5 452 desc_data = (__le32 *)&desc[i];
46a3df9f
S
453 n = HCLGE_32_BIT_RTN_DATANUM;
454 }
455 for (k = 0; k < n; k++) {
a90bb9a5 456 *data++ += le32_to_cpu(*desc_data);
46a3df9f
S
457 desc_data++;
458 }
459 }
460
461 return 0;
462}
463
464static int hclge_mac_update_stats(struct hclge_dev *hdev)
465{
466#define HCLGE_MAC_CMD_NUM 17
467#define HCLGE_RTN_DATA_NUM 4
468
469 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
470 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
a90bb9a5 471 __le64 *desc_data;
46a3df9f
S
472 int i, k, n;
473 int ret;
474
475 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
476 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
477 if (ret) {
478 dev_err(&hdev->pdev->dev,
479 "Get MAC pkt stats fail, status = %d.\n", ret);
480
481 return ret;
482 }
483
484 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
485 if (unlikely(i == 0)) {
a90bb9a5 486 desc_data = (__le64 *)(&desc[i].data[0]);
46a3df9f
S
487 n = HCLGE_RTN_DATA_NUM - 2;
488 } else {
a90bb9a5 489 desc_data = (__le64 *)(&desc[i]);
46a3df9f
S
490 n = HCLGE_RTN_DATA_NUM;
491 }
492 for (k = 0; k < n; k++) {
a90bb9a5 493 *data++ += le64_to_cpu(*desc_data);
46a3df9f
S
494 desc_data++;
495 }
496 }
497
498 return 0;
499}
500
501static int hclge_tqps_update_stats(struct hnae3_handle *handle)
502{
503 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
504 struct hclge_vport *vport = hclge_get_vport(handle);
505 struct hclge_dev *hdev = vport->back;
506 struct hnae3_queue *queue;
507 struct hclge_desc desc[1];
508 struct hclge_tqp *tqp;
509 int ret, i;
510
511 for (i = 0; i < kinfo->num_tqps; i++) {
512 queue = handle->kinfo.tqp[i];
513 tqp = container_of(queue, struct hclge_tqp, q);
514 /* command : HCLGE_OPC_QUERY_IGU_STAT */
515 hclge_cmd_setup_basic_desc(&desc[0],
516 HCLGE_OPC_QUERY_RX_STATUS,
517 true);
518
a90bb9a5 519 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
46a3df9f
S
520 ret = hclge_cmd_send(&hdev->hw, desc, 1);
521 if (ret) {
522 dev_err(&hdev->pdev->dev,
523 "Query tqp stat fail, status = %d,queue = %d\n",
524 ret, i);
525 return ret;
526 }
527 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
a90bb9a5 528 le32_to_cpu(desc[0].data[4]);
46a3df9f
S
529 }
530
531 for (i = 0; i < kinfo->num_tqps; i++) {
532 queue = handle->kinfo.tqp[i];
533 tqp = container_of(queue, struct hclge_tqp, q);
534 /* command : HCLGE_OPC_QUERY_IGU_STAT */
535 hclge_cmd_setup_basic_desc(&desc[0],
536 HCLGE_OPC_QUERY_TX_STATUS,
537 true);
538
a90bb9a5 539 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
46a3df9f
S
540 ret = hclge_cmd_send(&hdev->hw, desc, 1);
541 if (ret) {
542 dev_err(&hdev->pdev->dev,
543 "Query tqp stat fail, status = %d,queue = %d\n",
544 ret, i);
545 return ret;
546 }
547 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
a90bb9a5 548 le32_to_cpu(desc[0].data[4]);
46a3df9f
S
549 }
550
551 return 0;
552}
553
554static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
555{
556 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
557 struct hclge_tqp *tqp;
558 u64 *buff = data;
559 int i;
560
561 for (i = 0; i < kinfo->num_tqps; i++) {
562 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
a90bb9a5 563 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
46a3df9f
S
564 }
565
566 for (i = 0; i < kinfo->num_tqps; i++) {
567 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
a90bb9a5 568 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
46a3df9f
S
569 }
570
571 return buff;
572}
573
574static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
575{
576 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
577
578 return kinfo->num_tqps * (2);
579}
580
581static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
582{
583 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
584 u8 *buff = data;
585 int i = 0;
586
587 for (i = 0; i < kinfo->num_tqps; i++) {
588 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
589 struct hclge_tqp, q);
590 snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd",
591 tqp->index);
592 buff = buff + ETH_GSTRING_LEN;
593 }
594
595 for (i = 0; i < kinfo->num_tqps; i++) {
596 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
597 struct hclge_tqp, q);
598 snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd",
599 tqp->index);
600 buff = buff + ETH_GSTRING_LEN;
601 }
602
603 return buff;
604}
605
606static u64 *hclge_comm_get_stats(void *comm_stats,
607 const struct hclge_comm_stats_str strs[],
608 int size, u64 *data)
609{
610 u64 *buf = data;
611 u32 i;
612
613 for (i = 0; i < size; i++)
614 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
615
616 return buf + size;
617}
618
619static u8 *hclge_comm_get_strings(u32 stringset,
620 const struct hclge_comm_stats_str strs[],
621 int size, u8 *data)
622{
623 char *buff = (char *)data;
624 u32 i;
625
626 if (stringset != ETH_SS_STATS)
627 return buff;
628
629 for (i = 0; i < size; i++) {
630 snprintf(buff, ETH_GSTRING_LEN,
631 strs[i].desc);
632 buff = buff + ETH_GSTRING_LEN;
633 }
634
635 return (u8 *)buff;
636}
637
638static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
639 struct net_device_stats *net_stats)
640{
641 net_stats->tx_dropped = 0;
642 net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num;
643 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
644 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
645
646 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
647 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
648 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt;
649 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
650 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
651 net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
652
653 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
654 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
655
656 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
657 net_stats->rx_length_errors =
658 hw_stats->mac_stats.mac_rx_undersize_pkt_num;
659 net_stats->rx_length_errors +=
660 hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
661 net_stats->rx_over_errors =
662 hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
663}
664
665static void hclge_update_stats_for_all(struct hclge_dev *hdev)
666{
667 struct hnae3_handle *handle;
668 int status;
669
670 handle = &hdev->vport[0].nic;
671 if (handle->client) {
672 status = hclge_tqps_update_stats(handle);
673 if (status) {
674 dev_err(&hdev->pdev->dev,
675 "Update TQPS stats fail, status = %d.\n",
676 status);
677 }
678 }
679
680 status = hclge_mac_update_stats(hdev);
681 if (status)
682 dev_err(&hdev->pdev->dev,
683 "Update MAC stats fail, status = %d.\n", status);
684
685 status = hclge_32_bit_update_stats(hdev);
686 if (status)
687 dev_err(&hdev->pdev->dev,
688 "Update 32 bit stats fail, status = %d.\n",
689 status);
690
691 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
692}
693
694static void hclge_update_stats(struct hnae3_handle *handle,
695 struct net_device_stats *net_stats)
696{
697 struct hclge_vport *vport = hclge_get_vport(handle);
698 struct hclge_dev *hdev = vport->back;
699 struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
700 int status;
701
702 status = hclge_mac_update_stats(hdev);
703 if (status)
704 dev_err(&hdev->pdev->dev,
705 "Update MAC stats fail, status = %d.\n",
706 status);
707
708 status = hclge_32_bit_update_stats(hdev);
709 if (status)
710 dev_err(&hdev->pdev->dev,
711 "Update 32 bit stats fail, status = %d.\n",
712 status);
713
714 status = hclge_64_bit_update_stats(hdev);
715 if (status)
716 dev_err(&hdev->pdev->dev,
717 "Update 64 bit stats fail, status = %d.\n",
718 status);
719
720 status = hclge_tqps_update_stats(handle);
721 if (status)
722 dev_err(&hdev->pdev->dev,
723 "Update TQPS stats fail, status = %d.\n",
724 status);
725
726 hclge_update_netstat(hw_stats, net_stats);
727}
728
729static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
730{
731#define HCLGE_LOOPBACK_TEST_FLAGS 0x7
732
733 struct hclge_vport *vport = hclge_get_vport(handle);
734 struct hclge_dev *hdev = vport->back;
735 int count = 0;
736
737 /* Loopback test support rules:
738 * mac: only GE mode support
739 * serdes: all mac mode will support include GE/XGE/LGE/CGE
740 * phy: only support when phy device exist on board
741 */
742 if (stringset == ETH_SS_TEST) {
743 /* clear loopback bit flags at first */
744 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
745 if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
746 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
747 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
748 count += 1;
749 handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK;
750 } else {
751 count = -EOPNOTSUPP;
752 }
753 } else if (stringset == ETH_SS_STATS) {
754 count = ARRAY_SIZE(g_mac_stats_string) +
755 ARRAY_SIZE(g_all_32bit_stats_string) +
756 ARRAY_SIZE(g_all_64bit_stats_string) +
757 hclge_tqps_get_sset_count(handle, stringset);
758 }
759
760 return count;
761}
762
763static void hclge_get_strings(struct hnae3_handle *handle,
764 u32 stringset,
765 u8 *data)
766{
767 u8 *p = (char *)data;
768 int size;
769
770 if (stringset == ETH_SS_STATS) {
771 size = ARRAY_SIZE(g_mac_stats_string);
772 p = hclge_comm_get_strings(stringset,
773 g_mac_stats_string,
774 size,
775 p);
776 size = ARRAY_SIZE(g_all_32bit_stats_string);
777 p = hclge_comm_get_strings(stringset,
778 g_all_32bit_stats_string,
779 size,
780 p);
781 size = ARRAY_SIZE(g_all_64bit_stats_string);
782 p = hclge_comm_get_strings(stringset,
783 g_all_64bit_stats_string,
784 size,
785 p);
786 p = hclge_tqps_get_strings(handle, p);
787 } else if (stringset == ETH_SS_TEST) {
788 if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) {
789 memcpy(p,
790 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC],
791 ETH_GSTRING_LEN);
792 p += ETH_GSTRING_LEN;
793 }
794 if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) {
795 memcpy(p,
796 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES],
797 ETH_GSTRING_LEN);
798 p += ETH_GSTRING_LEN;
799 }
800 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
801 memcpy(p,
802 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY],
803 ETH_GSTRING_LEN);
804 p += ETH_GSTRING_LEN;
805 }
806 }
807}
808
809static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
810{
811 struct hclge_vport *vport = hclge_get_vport(handle);
812 struct hclge_dev *hdev = vport->back;
813 u64 *p;
814
815 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
816 g_mac_stats_string,
817 ARRAY_SIZE(g_mac_stats_string),
818 data);
819 p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats,
820 g_all_32bit_stats_string,
821 ARRAY_SIZE(g_all_32bit_stats_string),
822 p);
823 p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats,
824 g_all_64bit_stats_string,
825 ARRAY_SIZE(g_all_64bit_stats_string),
826 p);
827 p = hclge_tqps_get_stats(handle, p);
828}
829
830static int hclge_parse_func_status(struct hclge_dev *hdev,
d44f9b63 831 struct hclge_func_status_cmd *status)
46a3df9f
S
832{
833 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
834 return -EINVAL;
835
836 /* Set the pf to main pf */
837 if (status->pf_state & HCLGE_PF_STATE_MAIN)
838 hdev->flag |= HCLGE_FLAG_MAIN;
839 else
840 hdev->flag &= ~HCLGE_FLAG_MAIN;
841
46a3df9f
S
842 return 0;
843}
844
845static int hclge_query_function_status(struct hclge_dev *hdev)
846{
d44f9b63 847 struct hclge_func_status_cmd *req;
46a3df9f
S
848 struct hclge_desc desc;
849 int timeout = 0;
850 int ret;
851
852 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
d44f9b63 853 req = (struct hclge_func_status_cmd *)desc.data;
46a3df9f
S
854
855 do {
856 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
857 if (ret) {
858 dev_err(&hdev->pdev->dev,
859 "query function status failed %d.\n",
860 ret);
861
862 return ret;
863 }
864
865 /* Check pf reset is done */
866 if (req->pf_state)
867 break;
868 usleep_range(1000, 2000);
869 } while (timeout++ < 5);
870
871 ret = hclge_parse_func_status(hdev, req);
872
873 return ret;
874}
875
876static int hclge_query_pf_resource(struct hclge_dev *hdev)
877{
d44f9b63 878 struct hclge_pf_res_cmd *req;
46a3df9f
S
879 struct hclge_desc desc;
880 int ret;
881
882 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
883 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
884 if (ret) {
885 dev_err(&hdev->pdev->dev,
886 "query pf resource failed %d.\n", ret);
887 return ret;
888 }
889
d44f9b63 890 req = (struct hclge_pf_res_cmd *)desc.data;
46a3df9f
S
891 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
892 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
893
e92a0843 894 if (hnae3_dev_roce_supported(hdev)) {
887c3820 895 hdev->num_roce_msi =
46a3df9f
S
896 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
897 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
898
899 /* PF should have NIC vectors and Roce vectors,
900 * NIC vectors are queued before Roce vectors.
901 */
887c3820 902 hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET;
46a3df9f
S
903 } else {
904 hdev->num_msi =
905 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
906 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
907 }
908
909 return 0;
910}
911
912static int hclge_parse_speed(int speed_cmd, int *speed)
913{
914 switch (speed_cmd) {
915 case 6:
916 *speed = HCLGE_MAC_SPEED_10M;
917 break;
918 case 7:
919 *speed = HCLGE_MAC_SPEED_100M;
920 break;
921 case 0:
922 *speed = HCLGE_MAC_SPEED_1G;
923 break;
924 case 1:
925 *speed = HCLGE_MAC_SPEED_10G;
926 break;
927 case 2:
928 *speed = HCLGE_MAC_SPEED_25G;
929 break;
930 case 3:
931 *speed = HCLGE_MAC_SPEED_40G;
932 break;
933 case 4:
934 *speed = HCLGE_MAC_SPEED_50G;
935 break;
936 case 5:
937 *speed = HCLGE_MAC_SPEED_100G;
938 break;
939 default:
940 return -EINVAL;
941 }
942
943 return 0;
944}
945
946static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
947{
d44f9b63 948 struct hclge_cfg_param_cmd *req;
46a3df9f
S
949 u64 mac_addr_tmp_high;
950 u64 mac_addr_tmp;
951 int i;
952
d44f9b63 953 req = (struct hclge_cfg_param_cmd *)desc[0].data;
46a3df9f
S
954
955 /* get the configuration */
956 cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]),
957 HCLGE_CFG_VMDQ_M,
958 HCLGE_CFG_VMDQ_S);
959 cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
960 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
961 cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
962 HCLGE_CFG_TQP_DESC_N_M,
963 HCLGE_CFG_TQP_DESC_N_S);
964
965 cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]),
966 HCLGE_CFG_PHY_ADDR_M,
967 HCLGE_CFG_PHY_ADDR_S);
968 cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]),
969 HCLGE_CFG_MEDIA_TP_M,
970 HCLGE_CFG_MEDIA_TP_S);
971 cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]),
972 HCLGE_CFG_RX_BUF_LEN_M,
973 HCLGE_CFG_RX_BUF_LEN_S);
974 /* get mac_address */
975 mac_addr_tmp = __le32_to_cpu(req->param[2]);
976 mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]),
977 HCLGE_CFG_MAC_ADDR_H_M,
978 HCLGE_CFG_MAC_ADDR_H_S);
979
980 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
981
982 cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
983 HCLGE_CFG_DEFAULT_SPEED_M,
984 HCLGE_CFG_DEFAULT_SPEED_S);
c408e202
PL
985 cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]),
986 HCLGE_CFG_RSS_SIZE_M,
987 HCLGE_CFG_RSS_SIZE_S);
988
46a3df9f
S
989 for (i = 0; i < ETH_ALEN; i++)
990 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
991
d44f9b63 992 req = (struct hclge_cfg_param_cmd *)desc[1].data;
46a3df9f
S
993 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
994}
995
996/* hclge_get_cfg: query the static parameter from flash
997 * @hdev: pointer to struct hclge_dev
998 * @hcfg: the config structure to be getted
999 */
1000static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1001{
1002 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
d44f9b63 1003 struct hclge_cfg_param_cmd *req;
46a3df9f
S
1004 int i, ret;
1005
1006 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
a90bb9a5
YL
1007 u32 offset = 0;
1008
d44f9b63 1009 req = (struct hclge_cfg_param_cmd *)desc[i].data;
46a3df9f
S
1010 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1011 true);
a90bb9a5 1012 hnae_set_field(offset, HCLGE_CFG_OFFSET_M,
46a3df9f
S
1013 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1014 /* Len should be united by 4 bytes when send to hardware */
a90bb9a5 1015 hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
46a3df9f 1016 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
a90bb9a5 1017 req->offset = cpu_to_le32(offset);
46a3df9f
S
1018 }
1019
1020 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1021 if (ret) {
1022 dev_err(&hdev->pdev->dev,
1023 "get config failed %d.\n", ret);
1024 return ret;
1025 }
1026
1027 hclge_parse_cfg(hcfg, desc);
1028 return 0;
1029}
1030
1031static int hclge_get_cap(struct hclge_dev *hdev)
1032{
1033 int ret;
1034
1035 ret = hclge_query_function_status(hdev);
1036 if (ret) {
1037 dev_err(&hdev->pdev->dev,
1038 "query function status error %d.\n", ret);
1039 return ret;
1040 }
1041
1042 /* get pf resource */
1043 ret = hclge_query_pf_resource(hdev);
1044 if (ret) {
1045 dev_err(&hdev->pdev->dev,
1046 "query pf resource error %d.\n", ret);
1047 return ret;
1048 }
1049
1050 return 0;
1051}
1052
1053static int hclge_configure(struct hclge_dev *hdev)
1054{
1055 struct hclge_cfg cfg;
1056 int ret, i;
1057
1058 ret = hclge_get_cfg(hdev, &cfg);
1059 if (ret) {
1060 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1061 return ret;
1062 }
1063
1064 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1065 hdev->base_tqp_pid = 0;
c408e202 1066 hdev->rss_size_max = cfg.rss_size_max;
46a3df9f 1067 hdev->rx_buf_len = cfg.rx_buf_len;
fbbb1536 1068 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
46a3df9f 1069 hdev->hw.mac.media_type = cfg.media_type;
2a4776e1 1070 hdev->hw.mac.phy_addr = cfg.phy_addr;
46a3df9f
S
1071 hdev->num_desc = cfg.tqp_desc_num;
1072 hdev->tm_info.num_pg = 1;
cacde272 1073 hdev->tc_max = cfg.tc_num;
46a3df9f
S
1074 hdev->tm_info.hw_pfc_map = 0;
1075
1076 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1077 if (ret) {
1078 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1079 return ret;
1080 }
1081
cacde272
YL
1082 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1083 (hdev->tc_max < 1)) {
46a3df9f 1084 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
cacde272
YL
1085 hdev->tc_max);
1086 hdev->tc_max = 1;
46a3df9f
S
1087 }
1088
cacde272
YL
1089 /* Dev does not support DCB */
1090 if (!hnae3_dev_dcb_supported(hdev)) {
1091 hdev->tc_max = 1;
1092 hdev->pfc_max = 0;
1093 } else {
1094 hdev->pfc_max = hdev->tc_max;
1095 }
1096
1097 hdev->tm_info.num_tc = hdev->tc_max;
1098
46a3df9f 1099 /* Currently not support uncontiuous tc */
cacde272 1100 for (i = 0; i < hdev->tm_info.num_tc; i++)
46a3df9f
S
1101 hnae_set_bit(hdev->hw_tc_map, i, 1);
1102
1103 if (!hdev->num_vmdq_vport && !hdev->num_req_vfs)
1104 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1105 else
1106 hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE;
1107
1108 return ret;
1109}
1110
1111static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1112 int tso_mss_max)
1113{
d44f9b63 1114 struct hclge_cfg_tso_status_cmd *req;
46a3df9f 1115 struct hclge_desc desc;
a90bb9a5 1116 u16 tso_mss;
46a3df9f
S
1117
1118 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1119
d44f9b63 1120 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
a90bb9a5
YL
1121
1122 tso_mss = 0;
1123 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
46a3df9f 1124 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
a90bb9a5
YL
1125 req->tso_mss_min = cpu_to_le16(tso_mss);
1126
1127 tso_mss = 0;
1128 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
46a3df9f 1129 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
a90bb9a5 1130 req->tso_mss_max = cpu_to_le16(tso_mss);
46a3df9f
S
1131
1132 return hclge_cmd_send(&hdev->hw, &desc, 1);
1133}
1134
1135static int hclge_alloc_tqps(struct hclge_dev *hdev)
1136{
1137 struct hclge_tqp *tqp;
1138 int i;
1139
1140 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1141 sizeof(struct hclge_tqp), GFP_KERNEL);
1142 if (!hdev->htqp)
1143 return -ENOMEM;
1144
1145 tqp = hdev->htqp;
1146
1147 for (i = 0; i < hdev->num_tqps; i++) {
1148 tqp->dev = &hdev->pdev->dev;
1149 tqp->index = i;
1150
1151 tqp->q.ae_algo = &ae_algo;
1152 tqp->q.buf_size = hdev->rx_buf_len;
1153 tqp->q.desc_num = hdev->num_desc;
1154 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1155 i * HCLGE_TQP_REG_SIZE;
1156
1157 tqp++;
1158 }
1159
1160 return 0;
1161}
1162
1163static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1164 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1165{
d44f9b63 1166 struct hclge_tqp_map_cmd *req;
46a3df9f
S
1167 struct hclge_desc desc;
1168 int ret;
1169
1170 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1171
d44f9b63 1172 req = (struct hclge_tqp_map_cmd *)desc.data;
46a3df9f 1173 req->tqp_id = cpu_to_le16(tqp_pid);
a90bb9a5 1174 req->tqp_vf = func_id;
46a3df9f
S
1175 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1176 1 << HCLGE_TQP_MAP_EN_B;
1177 req->tqp_vid = cpu_to_le16(tqp_vid);
1178
1179 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1180 if (ret) {
1181 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n",
1182 ret);
1183 return ret;
1184 }
1185
1186 return 0;
1187}
1188
1189static int hclge_assign_tqp(struct hclge_vport *vport,
1190 struct hnae3_queue **tqp, u16 num_tqps)
1191{
1192 struct hclge_dev *hdev = vport->back;
7df7dad6 1193 int i, alloced;
46a3df9f
S
1194
1195 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1196 alloced < num_tqps; i++) {
1197 if (!hdev->htqp[i].alloced) {
1198 hdev->htqp[i].q.handle = &vport->nic;
1199 hdev->htqp[i].q.tqp_index = alloced;
1200 tqp[alloced] = &hdev->htqp[i].q;
1201 hdev->htqp[i].alloced = true;
46a3df9f
S
1202 alloced++;
1203 }
1204 }
1205 vport->alloc_tqps = num_tqps;
1206
1207 return 0;
1208}
1209
1210static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps)
1211{
1212 struct hnae3_handle *nic = &vport->nic;
1213 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1214 struct hclge_dev *hdev = vport->back;
1215 int i, ret;
1216
1217 kinfo->num_desc = hdev->num_desc;
1218 kinfo->rx_buf_len = hdev->rx_buf_len;
1219 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1220 kinfo->rss_size
1221 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1222 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
1223
1224 for (i = 0; i < HNAE3_MAX_TC; i++) {
1225 if (hdev->hw_tc_map & BIT(i)) {
1226 kinfo->tc_info[i].enable = true;
1227 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1228 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1229 kinfo->tc_info[i].tc = i;
1230 } else {
1231 /* Set to default queue if TC is disable */
1232 kinfo->tc_info[i].enable = false;
1233 kinfo->tc_info[i].tqp_offset = 0;
1234 kinfo->tc_info[i].tqp_count = 1;
1235 kinfo->tc_info[i].tc = 0;
1236 }
1237 }
1238
1239 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1240 sizeof(struct hnae3_queue *), GFP_KERNEL);
1241 if (!kinfo->tqp)
1242 return -ENOMEM;
1243
1244 ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps);
1245 if (ret) {
1246 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1247 return -EINVAL;
1248 }
1249
1250 return 0;
1251}
1252
7df7dad6
L
1253static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1254 struct hclge_vport *vport)
1255{
1256 struct hnae3_handle *nic = &vport->nic;
1257 struct hnae3_knic_private_info *kinfo;
1258 u16 i;
1259
1260 kinfo = &nic->kinfo;
1261 for (i = 0; i < kinfo->num_tqps; i++) {
1262 struct hclge_tqp *q =
1263 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1264 bool is_pf;
1265 int ret;
1266
1267 is_pf = !(vport->vport_id);
1268 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1269 i, is_pf);
1270 if (ret)
1271 return ret;
1272 }
1273
1274 return 0;
1275}
1276
1277static int hclge_map_tqp(struct hclge_dev *hdev)
1278{
1279 struct hclge_vport *vport = hdev->vport;
1280 u16 i, num_vport;
1281
1282 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1283 for (i = 0; i < num_vport; i++) {
1284 int ret;
1285
1286 ret = hclge_map_tqp_to_vport(hdev, vport);
1287 if (ret)
1288 return ret;
1289
1290 vport++;
1291 }
1292
1293 return 0;
1294}
1295
46a3df9f
S
1296static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1297{
1298 /* this would be initialized later */
1299}
1300
1301static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1302{
1303 struct hnae3_handle *nic = &vport->nic;
1304 struct hclge_dev *hdev = vport->back;
1305 int ret;
1306
1307 nic->pdev = hdev->pdev;
1308 nic->ae_algo = &ae_algo;
1309 nic->numa_node_mask = hdev->numa_node_mask;
1310
1311 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1312 ret = hclge_knic_setup(vport, num_tqps);
1313 if (ret) {
1314 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1315 ret);
1316 return ret;
1317 }
1318 } else {
1319 hclge_unic_setup(vport, num_tqps);
1320 }
1321
1322 return 0;
1323}
1324
1325static int hclge_alloc_vport(struct hclge_dev *hdev)
1326{
1327 struct pci_dev *pdev = hdev->pdev;
1328 struct hclge_vport *vport;
1329 u32 tqp_main_vport;
1330 u32 tqp_per_vport;
1331 int num_vport, i;
1332 int ret;
1333
1334 /* We need to alloc a vport for main NIC of PF */
1335 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1336
1337 if (hdev->num_tqps < num_vport)
1338 num_vport = hdev->num_tqps;
1339
1340 /* Alloc the same number of TQPs for every vport */
1341 tqp_per_vport = hdev->num_tqps / num_vport;
1342 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1343
1344 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1345 GFP_KERNEL);
1346 if (!vport)
1347 return -ENOMEM;
1348
1349 hdev->vport = vport;
1350 hdev->num_alloc_vport = num_vport;
1351
1352#ifdef CONFIG_PCI_IOV
1353 /* Enable SRIOV */
1354 if (hdev->num_req_vfs) {
1355 dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n",
1356 hdev->num_req_vfs);
1357 ret = pci_enable_sriov(hdev->pdev, hdev->num_req_vfs);
1358 if (ret) {
1359 hdev->num_alloc_vfs = 0;
1360 dev_err(&pdev->dev, "SRIOV enable failed %d\n",
1361 ret);
1362 return ret;
1363 }
1364 }
1365 hdev->num_alloc_vfs = hdev->num_req_vfs;
1366#endif
1367
1368 for (i = 0; i < num_vport; i++) {
1369 vport->back = hdev;
1370 vport->vport_id = i;
1371
1372 if (i == 0)
1373 ret = hclge_vport_setup(vport, tqp_main_vport);
1374 else
1375 ret = hclge_vport_setup(vport, tqp_per_vport);
1376 if (ret) {
1377 dev_err(&pdev->dev,
1378 "vport setup failed for vport %d, %d\n",
1379 i, ret);
1380 return ret;
1381 }
1382
1383 vport++;
1384 }
1385
1386 return 0;
1387}
1388
acf61ecd
YL
1389static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1390 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1391{
1392/* TX buffer size is unit by 128 byte */
1393#define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1394#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
d44f9b63 1395 struct hclge_tx_buff_alloc_cmd *req;
46a3df9f
S
1396 struct hclge_desc desc;
1397 int ret;
1398 u8 i;
1399
d44f9b63 1400 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
46a3df9f
S
1401
1402 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
9ffe79a9 1403 for (i = 0; i < HCLGE_TC_NUM; i++) {
acf61ecd 1404 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
9ffe79a9 1405
46a3df9f
S
1406 req->tx_pkt_buff[i] =
1407 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1408 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
9ffe79a9 1409 }
46a3df9f
S
1410
1411 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1412 if (ret) {
1413 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1414 ret);
1415 return ret;
1416 }
1417
1418 return 0;
1419}
1420
acf61ecd
YL
1421static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1422 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1423{
acf61ecd 1424 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
46a3df9f
S
1425
1426 if (ret) {
1427 dev_err(&hdev->pdev->dev,
1428 "tx buffer alloc failed %d\n", ret);
1429 return ret;
1430 }
1431
1432 return 0;
1433}
1434
1435static int hclge_get_tc_num(struct hclge_dev *hdev)
1436{
1437 int i, cnt = 0;
1438
1439 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1440 if (hdev->hw_tc_map & BIT(i))
1441 cnt++;
1442 return cnt;
1443}
1444
1445static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1446{
1447 int i, cnt = 0;
1448
1449 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1450 if (hdev->hw_tc_map & BIT(i) &&
1451 hdev->tm_info.hw_pfc_map & BIT(i))
1452 cnt++;
1453 return cnt;
1454}
1455
1456/* Get the number of pfc enabled TCs, which have private buffer */
acf61ecd
YL
1457static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1458 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1459{
1460 struct hclge_priv_buf *priv;
1461 int i, cnt = 0;
1462
1463 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1464 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1465 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1466 priv->enable)
1467 cnt++;
1468 }
1469
1470 return cnt;
1471}
1472
1473/* Get the number of pfc disabled TCs, which have private buffer */
acf61ecd
YL
1474static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1475 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1476{
1477 struct hclge_priv_buf *priv;
1478 int i, cnt = 0;
1479
1480 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1481 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1482 if (hdev->hw_tc_map & BIT(i) &&
1483 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1484 priv->enable)
1485 cnt++;
1486 }
1487
1488 return cnt;
1489}
1490
acf61ecd 1491static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1492{
1493 struct hclge_priv_buf *priv;
1494 u32 rx_priv = 0;
1495 int i;
1496
1497 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1498 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1499 if (priv->enable)
1500 rx_priv += priv->buf_size;
1501 }
1502 return rx_priv;
1503}
1504
acf61ecd 1505static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
9ffe79a9
YL
1506{
1507 u32 i, total_tx_size = 0;
1508
1509 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
acf61ecd 1510 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
9ffe79a9
YL
1511
1512 return total_tx_size;
1513}
1514
acf61ecd
YL
1515static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1516 struct hclge_pkt_buf_alloc *buf_alloc,
1517 u32 rx_all)
46a3df9f
S
1518{
1519 u32 shared_buf_min, shared_buf_tc, shared_std;
1520 int tc_num, pfc_enable_num;
1521 u32 shared_buf;
1522 u32 rx_priv;
1523 int i;
1524
1525 tc_num = hclge_get_tc_num(hdev);
1526 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1527
d221df4e
YL
1528 if (hnae3_dev_dcb_supported(hdev))
1529 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1530 else
1531 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
1532
46a3df9f
S
1533 shared_buf_tc = pfc_enable_num * hdev->mps +
1534 (tc_num - pfc_enable_num) * hdev->mps / 2 +
1535 hdev->mps;
1536 shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1537
acf61ecd 1538 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
46a3df9f
S
1539 if (rx_all <= rx_priv + shared_std)
1540 return false;
1541
1542 shared_buf = rx_all - rx_priv;
acf61ecd
YL
1543 buf_alloc->s_buf.buf_size = shared_buf;
1544 buf_alloc->s_buf.self.high = shared_buf;
1545 buf_alloc->s_buf.self.low = 2 * hdev->mps;
46a3df9f
S
1546
1547 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1548 if ((hdev->hw_tc_map & BIT(i)) &&
1549 (hdev->tm_info.hw_pfc_map & BIT(i))) {
acf61ecd
YL
1550 buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
1551 buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
46a3df9f 1552 } else {
acf61ecd
YL
1553 buf_alloc->s_buf.tc_thrd[i].low = 0;
1554 buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
46a3df9f
S
1555 }
1556 }
1557
1558 return true;
1559}
1560
acf61ecd
YL
1561static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1562 struct hclge_pkt_buf_alloc *buf_alloc)
9ffe79a9
YL
1563{
1564 u32 i, total_size;
1565
1566 total_size = hdev->pkt_buf_size;
1567
1568 /* alloc tx buffer for all enabled tc */
1569 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1570 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
9ffe79a9
YL
1571
1572 if (total_size < HCLGE_DEFAULT_TX_BUF)
1573 return -ENOMEM;
1574
1575 if (hdev->hw_tc_map & BIT(i))
1576 priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1577 else
1578 priv->tx_buf_size = 0;
1579
1580 total_size -= priv->tx_buf_size;
1581 }
1582
1583 return 0;
1584}
1585
46a3df9f
S
1586/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1587 * @hdev: pointer to struct hclge_dev
acf61ecd 1588 * @buf_alloc: pointer to buffer calculation data
46a3df9f
S
1589 * @return: 0: calculate sucessful, negative: fail
1590 */
1db9b1bf
YL
1591static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1592 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1593{
9ffe79a9 1594 u32 rx_all = hdev->pkt_buf_size;
46a3df9f
S
1595 int no_pfc_priv_num, pfc_priv_num;
1596 struct hclge_priv_buf *priv;
1597 int i;
1598
acf61ecd 1599 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
9ffe79a9 1600
d602a525
YL
1601 /* When DCB is not supported, rx private
1602 * buffer is not allocated.
1603 */
1604 if (!hnae3_dev_dcb_supported(hdev)) {
acf61ecd 1605 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
d602a525
YL
1606 return -ENOMEM;
1607
1608 return 0;
1609 }
1610
46a3df9f
S
1611 /* step 1, try to alloc private buffer for all enabled tc */
1612 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1613 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1614 if (hdev->hw_tc_map & BIT(i)) {
1615 priv->enable = 1;
1616 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1617 priv->wl.low = hdev->mps;
1618 priv->wl.high = priv->wl.low + hdev->mps;
1619 priv->buf_size = priv->wl.high +
1620 HCLGE_DEFAULT_DV;
1621 } else {
1622 priv->wl.low = 0;
1623 priv->wl.high = 2 * hdev->mps;
1624 priv->buf_size = priv->wl.high;
1625 }
bb1fe9ea
YL
1626 } else {
1627 priv->enable = 0;
1628 priv->wl.low = 0;
1629 priv->wl.high = 0;
1630 priv->buf_size = 0;
46a3df9f
S
1631 }
1632 }
1633
acf61ecd 1634 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
46a3df9f
S
1635 return 0;
1636
1637 /* step 2, try to decrease the buffer size of
1638 * no pfc TC's private buffer
1639 */
1640 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1641 priv = &buf_alloc->priv_buf[i];
46a3df9f 1642
bb1fe9ea
YL
1643 priv->enable = 0;
1644 priv->wl.low = 0;
1645 priv->wl.high = 0;
1646 priv->buf_size = 0;
1647
1648 if (!(hdev->hw_tc_map & BIT(i)))
1649 continue;
1650
1651 priv->enable = 1;
46a3df9f
S
1652
1653 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1654 priv->wl.low = 128;
1655 priv->wl.high = priv->wl.low + hdev->mps;
1656 priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
1657 } else {
1658 priv->wl.low = 0;
1659 priv->wl.high = hdev->mps;
1660 priv->buf_size = priv->wl.high;
1661 }
1662 }
1663
acf61ecd 1664 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
46a3df9f
S
1665 return 0;
1666
1667 /* step 3, try to reduce the number of pfc disabled TCs,
1668 * which have private buffer
1669 */
1670 /* get the total no pfc enable TC number, which have private buffer */
acf61ecd 1671 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
46a3df9f
S
1672
1673 /* let the last to be cleared first */
1674 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
acf61ecd 1675 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1676
1677 if (hdev->hw_tc_map & BIT(i) &&
1678 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1679 /* Clear the no pfc TC private buffer */
1680 priv->wl.low = 0;
1681 priv->wl.high = 0;
1682 priv->buf_size = 0;
1683 priv->enable = 0;
1684 no_pfc_priv_num--;
1685 }
1686
acf61ecd 1687 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
46a3df9f
S
1688 no_pfc_priv_num == 0)
1689 break;
1690 }
1691
acf61ecd 1692 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
46a3df9f
S
1693 return 0;
1694
1695 /* step 4, try to reduce the number of pfc enabled TCs
1696 * which have private buffer.
1697 */
acf61ecd 1698 pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
46a3df9f
S
1699
1700 /* let the last to be cleared first */
1701 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
acf61ecd 1702 priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1703
1704 if (hdev->hw_tc_map & BIT(i) &&
1705 hdev->tm_info.hw_pfc_map & BIT(i)) {
1706 /* Reduce the number of pfc TC with private buffer */
1707 priv->wl.low = 0;
1708 priv->enable = 0;
1709 priv->wl.high = 0;
1710 priv->buf_size = 0;
1711 pfc_priv_num--;
1712 }
1713
acf61ecd 1714 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
46a3df9f
S
1715 pfc_priv_num == 0)
1716 break;
1717 }
acf61ecd 1718 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
46a3df9f
S
1719 return 0;
1720
1721 return -ENOMEM;
1722}
1723
acf61ecd
YL
1724static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1725 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1726{
d44f9b63 1727 struct hclge_rx_priv_buff_cmd *req;
46a3df9f
S
1728 struct hclge_desc desc;
1729 int ret;
1730 int i;
1731
1732 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
d44f9b63 1733 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
46a3df9f
S
1734
1735 /* Alloc private buffer TCs */
1736 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
acf61ecd 1737 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
46a3df9f
S
1738
1739 req->buf_num[i] =
1740 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1741 req->buf_num[i] |=
5bca3b94 1742 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
46a3df9f
S
1743 }
1744
b8c8bf47 1745 req->shared_buf =
acf61ecd 1746 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
b8c8bf47
YL
1747 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1748
46a3df9f
S
1749 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1750 if (ret) {
1751 dev_err(&hdev->pdev->dev,
1752 "rx private buffer alloc cmd failed %d\n", ret);
1753 return ret;
1754 }
1755
1756 return 0;
1757}
1758
1759#define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
1760
acf61ecd
YL
1761static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1762 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f
S
1763{
1764 struct hclge_rx_priv_wl_buf *req;
1765 struct hclge_priv_buf *priv;
1766 struct hclge_desc desc[2];
1767 int i, j;
1768 int ret;
1769
1770 for (i = 0; i < 2; i++) {
1771 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1772 false);
1773 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1774
1775 /* The first descriptor set the NEXT bit to 1 */
1776 if (i == 0)
1777 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1778 else
1779 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1780
1781 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
acf61ecd
YL
1782 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1783
1784 priv = &buf_alloc->priv_buf[idx];
46a3df9f
S
1785 req->tc_wl[j].high =
1786 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1787 req->tc_wl[j].high |=
1788 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) <<
1789 HCLGE_RX_PRIV_EN_B);
1790 req->tc_wl[j].low =
1791 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1792 req->tc_wl[j].low |=
1793 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) <<
1794 HCLGE_RX_PRIV_EN_B);
1795 }
1796 }
1797
1798 /* Send 2 descriptor at one time */
1799 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1800 if (ret) {
1801 dev_err(&hdev->pdev->dev,
1802 "rx private waterline config cmd failed %d\n",
1803 ret);
1804 return ret;
1805 }
1806 return 0;
1807}
1808
acf61ecd
YL
1809static int hclge_common_thrd_config(struct hclge_dev *hdev,
1810 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1811{
acf61ecd 1812 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
46a3df9f
S
1813 struct hclge_rx_com_thrd *req;
1814 struct hclge_desc desc[2];
1815 struct hclge_tc_thrd *tc;
1816 int i, j;
1817 int ret;
1818
1819 for (i = 0; i < 2; i++) {
1820 hclge_cmd_setup_basic_desc(&desc[i],
1821 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1822 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1823
1824 /* The first descriptor set the NEXT bit to 1 */
1825 if (i == 0)
1826 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1827 else
1828 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1829
1830 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1831 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1832
1833 req->com_thrd[j].high =
1834 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1835 req->com_thrd[j].high |=
1836 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) <<
1837 HCLGE_RX_PRIV_EN_B);
1838 req->com_thrd[j].low =
1839 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1840 req->com_thrd[j].low |=
1841 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) <<
1842 HCLGE_RX_PRIV_EN_B);
1843 }
1844 }
1845
1846 /* Send 2 descriptors at one time */
1847 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1848 if (ret) {
1849 dev_err(&hdev->pdev->dev,
1850 "common threshold config cmd failed %d\n", ret);
1851 return ret;
1852 }
1853 return 0;
1854}
1855
acf61ecd
YL
1856static int hclge_common_wl_config(struct hclge_dev *hdev,
1857 struct hclge_pkt_buf_alloc *buf_alloc)
46a3df9f 1858{
acf61ecd 1859 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
46a3df9f
S
1860 struct hclge_rx_com_wl *req;
1861 struct hclge_desc desc;
1862 int ret;
1863
1864 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1865
1866 req = (struct hclge_rx_com_wl *)desc.data;
1867 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1868 req->com_wl.high |=
1869 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) <<
1870 HCLGE_RX_PRIV_EN_B);
1871
1872 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1873 req->com_wl.low |=
1874 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) <<
1875 HCLGE_RX_PRIV_EN_B);
1876
1877 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1878 if (ret) {
1879 dev_err(&hdev->pdev->dev,
1880 "common waterline config cmd failed %d\n", ret);
1881 return ret;
1882 }
1883
1884 return 0;
1885}
1886
1887int hclge_buffer_alloc(struct hclge_dev *hdev)
1888{
acf61ecd 1889 struct hclge_pkt_buf_alloc *pkt_buf;
46a3df9f
S
1890 int ret;
1891
acf61ecd
YL
1892 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1893 if (!pkt_buf)
46a3df9f
S
1894 return -ENOMEM;
1895
acf61ecd 1896 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
9ffe79a9
YL
1897 if (ret) {
1898 dev_err(&hdev->pdev->dev,
1899 "could not calc tx buffer size for all TCs %d\n", ret);
acf61ecd 1900 goto out;
9ffe79a9
YL
1901 }
1902
acf61ecd 1903 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
46a3df9f
S
1904 if (ret) {
1905 dev_err(&hdev->pdev->dev,
1906 "could not alloc tx buffers %d\n", ret);
acf61ecd 1907 goto out;
46a3df9f
S
1908 }
1909
acf61ecd 1910 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
46a3df9f
S
1911 if (ret) {
1912 dev_err(&hdev->pdev->dev,
1913 "could not calc rx priv buffer size for all TCs %d\n",
1914 ret);
acf61ecd 1915 goto out;
46a3df9f
S
1916 }
1917
acf61ecd 1918 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
46a3df9f
S
1919 if (ret) {
1920 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1921 ret);
acf61ecd 1922 goto out;
46a3df9f
S
1923 }
1924
2daf4a65 1925 if (hnae3_dev_dcb_supported(hdev)) {
acf61ecd 1926 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2daf4a65
YL
1927 if (ret) {
1928 dev_err(&hdev->pdev->dev,
1929 "could not configure rx private waterline %d\n",
1930 ret);
acf61ecd 1931 goto out;
2daf4a65 1932 }
46a3df9f 1933
acf61ecd 1934 ret = hclge_common_thrd_config(hdev, pkt_buf);
2daf4a65
YL
1935 if (ret) {
1936 dev_err(&hdev->pdev->dev,
1937 "could not configure common threshold %d\n",
1938 ret);
acf61ecd 1939 goto out;
2daf4a65 1940 }
46a3df9f
S
1941 }
1942
acf61ecd
YL
1943 ret = hclge_common_wl_config(hdev, pkt_buf);
1944 if (ret)
46a3df9f
S
1945 dev_err(&hdev->pdev->dev,
1946 "could not configure common waterline %d\n", ret);
46a3df9f 1947
acf61ecd
YL
1948out:
1949 kfree(pkt_buf);
1950 return ret;
46a3df9f
S
1951}
1952
1953static int hclge_init_roce_base_info(struct hclge_vport *vport)
1954{
1955 struct hnae3_handle *roce = &vport->roce;
1956 struct hnae3_handle *nic = &vport->nic;
1957
887c3820 1958 roce->rinfo.num_vectors = vport->back->num_roce_msi;
46a3df9f
S
1959
1960 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1961 vport->back->num_msi_left == 0)
1962 return -EINVAL;
1963
1964 roce->rinfo.base_vector = vport->back->roce_base_vector;
1965
1966 roce->rinfo.netdev = nic->kinfo.netdev;
1967 roce->rinfo.roce_io_base = vport->back->hw.io_base;
1968
1969 roce->pdev = nic->pdev;
1970 roce->ae_algo = nic->ae_algo;
1971 roce->numa_node_mask = nic->numa_node_mask;
1972
1973 return 0;
1974}
1975
887c3820 1976static int hclge_init_msi(struct hclge_dev *hdev)
46a3df9f
S
1977{
1978 struct pci_dev *pdev = hdev->pdev;
887c3820
SM
1979 int vectors;
1980 int i;
46a3df9f 1981
887c3820
SM
1982 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1983 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1984 if (vectors < 0) {
1985 dev_err(&pdev->dev,
1986 "failed(%d) to allocate MSI/MSI-X vectors\n",
1987 vectors);
1988 return vectors;
46a3df9f 1989 }
887c3820
SM
1990 if (vectors < hdev->num_msi)
1991 dev_warn(&hdev->pdev->dev,
1992 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1993 hdev->num_msi, vectors);
46a3df9f 1994
887c3820
SM
1995 hdev->num_msi = vectors;
1996 hdev->num_msi_left = vectors;
1997 hdev->base_msi_vector = pdev->irq;
46a3df9f
S
1998 hdev->roce_base_vector = hdev->base_msi_vector +
1999 HCLGE_ROCE_VECTOR_OFFSET;
2000
46a3df9f
S
2001 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2002 sizeof(u16), GFP_KERNEL);
887c3820
SM
2003 if (!hdev->vector_status) {
2004 pci_free_irq_vectors(pdev);
46a3df9f 2005 return -ENOMEM;
887c3820 2006 }
46a3df9f
S
2007
2008 for (i = 0; i < hdev->num_msi; i++)
2009 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2010
887c3820
SM
2011 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2012 sizeof(int), GFP_KERNEL);
2013 if (!hdev->vector_irq) {
2014 pci_free_irq_vectors(pdev);
2015 return -ENOMEM;
46a3df9f 2016 }
46a3df9f
S
2017
2018 return 0;
2019}
2020
2021static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed)
2022{
2023 struct hclge_mac *mac = &hdev->hw.mac;
2024
2025 if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M))
2026 mac->duplex = (u8)duplex;
2027 else
2028 mac->duplex = HCLGE_MAC_FULL;
2029
2030 mac->speed = speed;
2031}
2032
2033int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2034{
d44f9b63 2035 struct hclge_config_mac_speed_dup_cmd *req;
46a3df9f
S
2036 struct hclge_desc desc;
2037 int ret;
2038
d44f9b63 2039 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
46a3df9f
S
2040
2041 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2042
2043 hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2044
2045 switch (speed) {
2046 case HCLGE_MAC_SPEED_10M:
2047 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2048 HCLGE_CFG_SPEED_S, 6);
2049 break;
2050 case HCLGE_MAC_SPEED_100M:
2051 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2052 HCLGE_CFG_SPEED_S, 7);
2053 break;
2054 case HCLGE_MAC_SPEED_1G:
2055 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2056 HCLGE_CFG_SPEED_S, 0);
2057 break;
2058 case HCLGE_MAC_SPEED_10G:
2059 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2060 HCLGE_CFG_SPEED_S, 1);
2061 break;
2062 case HCLGE_MAC_SPEED_25G:
2063 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2064 HCLGE_CFG_SPEED_S, 2);
2065 break;
2066 case HCLGE_MAC_SPEED_40G:
2067 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2068 HCLGE_CFG_SPEED_S, 3);
2069 break;
2070 case HCLGE_MAC_SPEED_50G:
2071 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2072 HCLGE_CFG_SPEED_S, 4);
2073 break;
2074 case HCLGE_MAC_SPEED_100G:
2075 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2076 HCLGE_CFG_SPEED_S, 5);
2077 break;
2078 default:
d7629e74 2079 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
46a3df9f
S
2080 return -EINVAL;
2081 }
2082
2083 hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2084 1);
2085
2086 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2087 if (ret) {
2088 dev_err(&hdev->pdev->dev,
2089 "mac speed/duplex config cmd failed %d.\n", ret);
2090 return ret;
2091 }
2092
2093 hclge_check_speed_dup(hdev, duplex, speed);
2094
2095 return 0;
2096}
2097
2098static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2099 u8 duplex)
2100{
2101 struct hclge_vport *vport = hclge_get_vport(handle);
2102 struct hclge_dev *hdev = vport->back;
2103
2104 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2105}
2106
2107static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
2108 u8 *duplex)
2109{
d44f9b63 2110 struct hclge_query_an_speed_dup_cmd *req;
46a3df9f
S
2111 struct hclge_desc desc;
2112 int speed_tmp;
2113 int ret;
2114
d44f9b63 2115 req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
46a3df9f
S
2116
2117 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
2118 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2119 if (ret) {
2120 dev_err(&hdev->pdev->dev,
2121 "mac speed/autoneg/duplex query cmd failed %d\n",
2122 ret);
2123 return ret;
2124 }
2125
2126 *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
2127 speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
2128 HCLGE_QUERY_SPEED_S);
2129
2130 ret = hclge_parse_speed(speed_tmp, speed);
2131 if (ret) {
2132 dev_err(&hdev->pdev->dev,
2133 "could not parse speed(=%d), %d\n", speed_tmp, ret);
2134 return -EIO;
2135 }
2136
2137 return 0;
2138}
2139
2140static int hclge_query_autoneg_result(struct hclge_dev *hdev)
2141{
2142 struct hclge_mac *mac = &hdev->hw.mac;
d44f9b63 2143 struct hclge_query_an_speed_dup_cmd *req;
46a3df9f
S
2144 struct hclge_desc desc;
2145 int ret;
2146
d44f9b63 2147 req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
46a3df9f
S
2148
2149 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
2150 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2151 if (ret) {
2152 dev_err(&hdev->pdev->dev,
2153 "autoneg result query cmd failed %d.\n", ret);
2154 return ret;
2155 }
2156
2157 mac->autoneg = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_AN_B);
2158
2159 return 0;
2160}
2161
2162static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2163{
d44f9b63 2164 struct hclge_config_auto_neg_cmd *req;
46a3df9f 2165 struct hclge_desc desc;
a90bb9a5 2166 u32 flag = 0;
46a3df9f
S
2167 int ret;
2168
2169 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2170
d44f9b63 2171 req = (struct hclge_config_auto_neg_cmd *)desc.data;
a90bb9a5
YL
2172 hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2173 req->cfg_an_cmd_flag = cpu_to_le32(flag);
46a3df9f
S
2174
2175 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2176 if (ret) {
2177 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2178 ret);
2179 return ret;
2180 }
2181
2182 return 0;
2183}
2184
2185static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2186{
2187 struct hclge_vport *vport = hclge_get_vport(handle);
2188 struct hclge_dev *hdev = vport->back;
2189
2190 return hclge_set_autoneg_en(hdev, enable);
2191}
2192
2193static int hclge_get_autoneg(struct hnae3_handle *handle)
2194{
2195 struct hclge_vport *vport = hclge_get_vport(handle);
2196 struct hclge_dev *hdev = vport->back;
2197
2198 hclge_query_autoneg_result(hdev);
2199
2200 return hdev->hw.mac.autoneg;
2201}
2202
2203static int hclge_mac_init(struct hclge_dev *hdev)
2204{
2205 struct hclge_mac *mac = &hdev->hw.mac;
2206 int ret;
2207
2208 ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
2209 if (ret) {
2210 dev_err(&hdev->pdev->dev,
2211 "Config mac speed dup fail ret=%d\n", ret);
2212 return ret;
2213 }
2214
2215 mac->link = 0;
2216
46a3df9f
S
2217 /* Initialize the MTA table work mode */
2218 hdev->accept_mta_mc = true;
2219 hdev->enable_mta = true;
2220 hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36;
2221
2222 ret = hclge_set_mta_filter_mode(hdev,
2223 hdev->mta_mac_sel_type,
2224 hdev->enable_mta);
2225 if (ret) {
2226 dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n",
2227 ret);
2228 return ret;
2229 }
2230
2231 return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
2232}
2233
22fd3468
SM
2234static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2235{
2236 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2237 schedule_work(&hdev->mbx_service_task);
2238}
2239
ed4a1bb8
SM
2240static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2241{
2242 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2243 schedule_work(&hdev->rst_service_task);
2244}
2245
46a3df9f
S
2246static void hclge_task_schedule(struct hclge_dev *hdev)
2247{
2248 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2249 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2250 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2251 (void)schedule_work(&hdev->service_task);
2252}
2253
2254static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2255{
d44f9b63 2256 struct hclge_link_status_cmd *req;
46a3df9f
S
2257 struct hclge_desc desc;
2258 int link_status;
2259 int ret;
2260
2261 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2262 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2263 if (ret) {
2264 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2265 ret);
2266 return ret;
2267 }
2268
d44f9b63 2269 req = (struct hclge_link_status_cmd *)desc.data;
46a3df9f
S
2270 link_status = req->status & HCLGE_LINK_STATUS;
2271
2272 return !!link_status;
2273}
2274
2275static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2276{
2277 int mac_state;
2278 int link_stat;
2279
2280 mac_state = hclge_get_mac_link_status(hdev);
2281
2282 if (hdev->hw.mac.phydev) {
2283 if (!genphy_read_status(hdev->hw.mac.phydev))
2284 link_stat = mac_state &
2285 hdev->hw.mac.phydev->link;
2286 else
2287 link_stat = 0;
2288
2289 } else {
2290 link_stat = mac_state;
2291 }
2292
2293 return !!link_stat;
2294}
2295
2296static void hclge_update_link_status(struct hclge_dev *hdev)
2297{
2298 struct hnae3_client *client = hdev->nic_client;
2299 struct hnae3_handle *handle;
2300 int state;
2301 int i;
2302
2303 if (!client)
2304 return;
2305 state = hclge_get_mac_phy_link(hdev);
2306 if (state != hdev->hw.mac.link) {
2307 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2308 handle = &hdev->vport[i].nic;
2309 client->ops->link_status_change(handle, state);
2310 }
2311 hdev->hw.mac.link = state;
2312 }
2313}
2314
2315static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2316{
2317 struct hclge_mac mac = hdev->hw.mac;
2318 u8 duplex;
2319 int speed;
2320 int ret;
2321
2322 /* get the speed and duplex as autoneg'result from mac cmd when phy
2323 * doesn't exit.
2324 */
c040366b 2325 if (mac.phydev || !mac.autoneg)
46a3df9f
S
2326 return 0;
2327
2328 ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex);
2329 if (ret) {
2330 dev_err(&hdev->pdev->dev,
2331 "mac autoneg/speed/duplex query failed %d\n", ret);
2332 return ret;
2333 }
2334
2335 if ((mac.speed != speed) || (mac.duplex != duplex)) {
2336 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2337 if (ret) {
2338 dev_err(&hdev->pdev->dev,
2339 "mac speed/duplex config failed %d\n", ret);
2340 return ret;
2341 }
2342 }
2343
2344 return 0;
2345}
2346
2347static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2348{
2349 struct hclge_vport *vport = hclge_get_vport(handle);
2350 struct hclge_dev *hdev = vport->back;
2351
2352 return hclge_update_speed_duplex(hdev);
2353}
2354
2355static int hclge_get_status(struct hnae3_handle *handle)
2356{
2357 struct hclge_vport *vport = hclge_get_vport(handle);
2358 struct hclge_dev *hdev = vport->back;
2359
2360 hclge_update_link_status(hdev);
2361
2362 return hdev->hw.mac.link;
2363}
2364
d039ef68 2365static void hclge_service_timer(struct timer_list *t)
46a3df9f 2366{
d039ef68 2367 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
46a3df9f 2368
d039ef68 2369 mod_timer(&hdev->service_timer, jiffies + HZ);
46a3df9f
S
2370 hclge_task_schedule(hdev);
2371}
2372
2373static void hclge_service_complete(struct hclge_dev *hdev)
2374{
2375 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2376
2377 /* Flush memory before next watchdog */
2378 smp_mb__before_atomic();
2379 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2380}
2381
202f2014
SM
2382static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2383{
2384 u32 rst_src_reg;
22fd3468 2385 u32 cmdq_src_reg;
202f2014
SM
2386
2387 /* fetch the events from their corresponding regs */
2388 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
22fd3468
SM
2389 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2390
2391 /* Assumption: If by any chance reset and mailbox events are reported
2392 * together then we will only process reset event in this go and will
2393 * defer the processing of the mailbox events. Since, we would have not
2394 * cleared RX CMDQ event this time we would receive again another
2395 * interrupt from H/W just for the mailbox.
2396 */
202f2014
SM
2397
2398 /* check for vector0 reset event sources */
2399 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2400 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2401 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2402 return HCLGE_VECTOR0_EVENT_RST;
2403 }
2404
2405 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2406 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2407 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2408 return HCLGE_VECTOR0_EVENT_RST;
2409 }
2410
2411 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2412 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2413 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2414 return HCLGE_VECTOR0_EVENT_RST;
2415 }
2416
22fd3468
SM
2417 /* check for vector0 mailbox(=CMDQ RX) event source */
2418 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2419 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2420 *clearval = cmdq_src_reg;
2421 return HCLGE_VECTOR0_EVENT_MBX;
2422 }
202f2014
SM
2423
2424 return HCLGE_VECTOR0_EVENT_OTHER;
2425}
2426
2427static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2428 u32 regclr)
2429{
22fd3468
SM
2430 switch (event_type) {
2431 case HCLGE_VECTOR0_EVENT_RST:
202f2014 2432 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
22fd3468
SM
2433 break;
2434 case HCLGE_VECTOR0_EVENT_MBX:
2435 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2436 break;
2437 }
202f2014
SM
2438}
2439
466b0c00
L
2440static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2441{
2442 writel(enable ? 1 : 0, vector->addr);
2443}
2444
2445static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2446{
2447 struct hclge_dev *hdev = data;
202f2014
SM
2448 u32 event_cause;
2449 u32 clearval;
466b0c00
L
2450
2451 hclge_enable_vector(&hdev->misc_vector, false);
202f2014
SM
2452 event_cause = hclge_check_event_cause(hdev, &clearval);
2453
22fd3468 2454 /* vector 0 interrupt is shared with reset and mailbox source events.*/
202f2014
SM
2455 switch (event_cause) {
2456 case HCLGE_VECTOR0_EVENT_RST:
ed4a1bb8 2457 hclge_reset_task_schedule(hdev);
202f2014 2458 break;
22fd3468
SM
2459 case HCLGE_VECTOR0_EVENT_MBX:
2460 /* If we are here then,
2461 * 1. Either we are not handling any mbx task and we are not
2462 * scheduled as well
2463 * OR
2464 * 2. We could be handling a mbx task but nothing more is
2465 * scheduled.
2466 * In both cases, we should schedule mbx task as there are more
2467 * mbx messages reported by this interrupt.
2468 */
2469 hclge_mbx_task_schedule(hdev);
2470
202f2014
SM
2471 default:
2472 dev_dbg(&hdev->pdev->dev,
2473 "received unknown or unhandled event of vector0\n");
2474 break;
2475 }
2476
2477 /* we should clear the source of interrupt */
2478 hclge_clear_event_cause(hdev, event_cause, clearval);
2479 hclge_enable_vector(&hdev->misc_vector, true);
466b0c00
L
2480
2481 return IRQ_HANDLED;
2482}
2483
2484static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2485{
2486 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2487 hdev->num_msi_left += 1;
2488 hdev->num_msi_used -= 1;
2489}
2490
2491static void hclge_get_misc_vector(struct hclge_dev *hdev)
2492{
2493 struct hclge_misc_vector *vector = &hdev->misc_vector;
2494
2495 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2496
2497 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2498 hdev->vector_status[0] = 0;
2499
2500 hdev->num_msi_left -= 1;
2501 hdev->num_msi_used += 1;
2502}
2503
2504static int hclge_misc_irq_init(struct hclge_dev *hdev)
2505{
2506 int ret;
2507
2508 hclge_get_misc_vector(hdev);
2509
202f2014
SM
2510 /* this would be explicitly freed in the end */
2511 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2512 0, "hclge_misc", hdev);
466b0c00
L
2513 if (ret) {
2514 hclge_free_vector(hdev, 0);
2515 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2516 hdev->misc_vector.vector_irq);
2517 }
2518
2519 return ret;
2520}
2521
202f2014
SM
2522static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2523{
2524 free_irq(hdev->misc_vector.vector_irq, hdev);
2525 hclge_free_vector(hdev, 0);
2526}
2527
4ed340ab
L
2528static int hclge_notify_client(struct hclge_dev *hdev,
2529 enum hnae3_reset_notify_type type)
2530{
2531 struct hnae3_client *client = hdev->nic_client;
2532 u16 i;
2533
2534 if (!client->ops->reset_notify)
2535 return -EOPNOTSUPP;
2536
2537 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2538 struct hnae3_handle *handle = &hdev->vport[i].nic;
2539 int ret;
2540
2541 ret = client->ops->reset_notify(handle, type);
2542 if (ret)
2543 return ret;
2544 }
2545
2546 return 0;
2547}
2548
2549static int hclge_reset_wait(struct hclge_dev *hdev)
2550{
2551#define HCLGE_RESET_WATI_MS 100
2552#define HCLGE_RESET_WAIT_CNT 5
2553 u32 val, reg, reg_bit;
2554 u32 cnt = 0;
2555
2556 switch (hdev->reset_type) {
2557 case HNAE3_GLOBAL_RESET:
2558 reg = HCLGE_GLOBAL_RESET_REG;
2559 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2560 break;
2561 case HNAE3_CORE_RESET:
2562 reg = HCLGE_GLOBAL_RESET_REG;
2563 reg_bit = HCLGE_CORE_RESET_BIT;
2564 break;
2565 case HNAE3_FUNC_RESET:
2566 reg = HCLGE_FUN_RST_ING;
2567 reg_bit = HCLGE_FUN_RST_ING_B;
2568 break;
2569 default:
2570 dev_err(&hdev->pdev->dev,
2571 "Wait for unsupported reset type: %d\n",
2572 hdev->reset_type);
2573 return -EINVAL;
2574 }
2575
2576 val = hclge_read_dev(&hdev->hw, reg);
2577 while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2578 msleep(HCLGE_RESET_WATI_MS);
2579 val = hclge_read_dev(&hdev->hw, reg);
2580 cnt++;
2581 }
2582
4ed340ab
L
2583 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2584 dev_warn(&hdev->pdev->dev,
2585 "Wait for reset timeout: %d\n", hdev->reset_type);
2586 return -EBUSY;
2587 }
2588
2589 return 0;
2590}
2591
2592static int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2593{
2594 struct hclge_desc desc;
2595 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2596 int ret;
2597
2598 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2599 hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0);
2600 hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2601 req->fun_reset_vfid = func_id;
2602
2603 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2604 if (ret)
2605 dev_err(&hdev->pdev->dev,
2606 "send function reset cmd fail, status =%d\n", ret);
2607
2608 return ret;
2609}
2610
d5752031 2611static void hclge_do_reset(struct hclge_dev *hdev)
4ed340ab
L
2612{
2613 struct pci_dev *pdev = hdev->pdev;
2614 u32 val;
2615
d5752031 2616 switch (hdev->reset_type) {
4ed340ab
L
2617 case HNAE3_GLOBAL_RESET:
2618 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2619 hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2620 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2621 dev_info(&pdev->dev, "Global Reset requested\n");
2622 break;
2623 case HNAE3_CORE_RESET:
2624 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2625 hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2626 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2627 dev_info(&pdev->dev, "Core Reset requested\n");
2628 break;
2629 case HNAE3_FUNC_RESET:
2630 dev_info(&pdev->dev, "PF Reset requested\n");
2631 hclge_func_reset_cmd(hdev, 0);
ed4a1bb8
SM
2632 /* schedule again to check later */
2633 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2634 hclge_reset_task_schedule(hdev);
4ed340ab
L
2635 break;
2636 default:
2637 dev_warn(&pdev->dev,
d5752031 2638 "Unsupported reset type: %d\n", hdev->reset_type);
4ed340ab
L
2639 break;
2640 }
2641}
2642
d5752031
SM
2643static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2644 unsigned long *addr)
2645{
2646 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2647
2648 /* return the highest priority reset level amongst all */
2649 if (test_bit(HNAE3_GLOBAL_RESET, addr))
2650 rst_level = HNAE3_GLOBAL_RESET;
2651 else if (test_bit(HNAE3_CORE_RESET, addr))
2652 rst_level = HNAE3_CORE_RESET;
2653 else if (test_bit(HNAE3_IMP_RESET, addr))
2654 rst_level = HNAE3_IMP_RESET;
2655 else if (test_bit(HNAE3_FUNC_RESET, addr))
2656 rst_level = HNAE3_FUNC_RESET;
2657
2658 /* now, clear all other resets */
2659 clear_bit(HNAE3_GLOBAL_RESET, addr);
2660 clear_bit(HNAE3_CORE_RESET, addr);
2661 clear_bit(HNAE3_IMP_RESET, addr);
2662 clear_bit(HNAE3_FUNC_RESET, addr);
2663
2664 return rst_level;
2665}
2666
2667static void hclge_reset(struct hclge_dev *hdev)
2668{
2669 /* perform reset of the stack & ae device for a client */
2670
2671 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2672
2673 if (!hclge_reset_wait(hdev)) {
2674 rtnl_lock();
2675 hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2676 hclge_reset_ae_dev(hdev->ae_dev);
2677 hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2678 rtnl_unlock();
2679 } else {
2680 /* schedule again to check pending resets later */
2681 set_bit(hdev->reset_type, &hdev->reset_pending);
2682 hclge_reset_task_schedule(hdev);
2683 }
2684
2685 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2686}
2687
4ed340ab
L
2688static void hclge_reset_event(struct hnae3_handle *handle,
2689 enum hnae3_reset_type reset)
2690{
2691 struct hclge_vport *vport = hclge_get_vport(handle);
2692 struct hclge_dev *hdev = vport->back;
2693
2694 dev_info(&hdev->pdev->dev,
2695 "Receive reset event , reset_type is %d", reset);
2696
2697 switch (reset) {
2698 case HNAE3_FUNC_RESET:
2699 case HNAE3_CORE_RESET:
2700 case HNAE3_GLOBAL_RESET:
ed4a1bb8
SM
2701 /* request reset & schedule reset task */
2702 set_bit(reset, &hdev->reset_request);
2703 hclge_reset_task_schedule(hdev);
4ed340ab
L
2704 break;
2705 default:
2706 dev_warn(&hdev->pdev->dev, "Unsupported reset event:%d", reset);
2707 break;
2708 }
2709}
2710
2711static void hclge_reset_subtask(struct hclge_dev *hdev)
2712{
d5752031
SM
2713 /* check if there is any ongoing reset in the hardware. This status can
2714 * be checked from reset_pending. If there is then, we need to wait for
2715 * hardware to complete reset.
2716 * a. If we are able to figure out in reasonable time that hardware
2717 * has fully resetted then, we can proceed with driver, client
2718 * reset.
2719 * b. else, we can come back later to check this status so re-sched
2720 * now.
2721 */
2722 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
2723 if (hdev->reset_type != HNAE3_NONE_RESET)
2724 hclge_reset(hdev);
4ed340ab 2725
d5752031
SM
2726 /* check if we got any *new* reset requests to be honored */
2727 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
2728 if (hdev->reset_type != HNAE3_NONE_RESET)
2729 hclge_do_reset(hdev);
4ed340ab 2730
4ed340ab
L
2731 hdev->reset_type = HNAE3_NONE_RESET;
2732}
2733
ed4a1bb8 2734static void hclge_reset_service_task(struct work_struct *work)
466b0c00 2735{
ed4a1bb8
SM
2736 struct hclge_dev *hdev =
2737 container_of(work, struct hclge_dev, rst_service_task);
2738
2739 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
2740 return;
2741
2742 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
2743
4ed340ab 2744 hclge_reset_subtask(hdev);
ed4a1bb8
SM
2745
2746 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
466b0c00
L
2747}
2748
22fd3468
SM
2749static void hclge_mailbox_service_task(struct work_struct *work)
2750{
2751 struct hclge_dev *hdev =
2752 container_of(work, struct hclge_dev, mbx_service_task);
2753
2754 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
2755 return;
2756
2757 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
2758
2759 hclge_mbx_handler(hdev);
2760
2761 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
2762}
2763
46a3df9f
S
2764static void hclge_service_task(struct work_struct *work)
2765{
2766 struct hclge_dev *hdev =
2767 container_of(work, struct hclge_dev, service_task);
2768
2769 hclge_update_speed_duplex(hdev);
2770 hclge_update_link_status(hdev);
2771 hclge_update_stats_for_all(hdev);
2772 hclge_service_complete(hdev);
2773}
2774
2775static void hclge_disable_sriov(struct hclge_dev *hdev)
2776{
2a32ca13
AB
2777 /* If our VFs are assigned we cannot shut down SR-IOV
2778 * without causing issues, so just leave the hardware
2779 * available but disabled
2780 */
2781 if (pci_vfs_assigned(hdev->pdev)) {
2782 dev_warn(&hdev->pdev->dev,
2783 "disabling driver while VFs are assigned\n");
2784 return;
2785 }
46a3df9f 2786
2a32ca13 2787 pci_disable_sriov(hdev->pdev);
46a3df9f
S
2788}
2789
2790struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
2791{
2792 /* VF handle has no client */
2793 if (!handle->client)
2794 return container_of(handle, struct hclge_vport, nic);
2795 else if (handle->client->type == HNAE3_CLIENT_ROCE)
2796 return container_of(handle, struct hclge_vport, roce);
2797 else
2798 return container_of(handle, struct hclge_vport, nic);
2799}
2800
2801static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
2802 struct hnae3_vector_info *vector_info)
2803{
2804 struct hclge_vport *vport = hclge_get_vport(handle);
2805 struct hnae3_vector_info *vector = vector_info;
2806 struct hclge_dev *hdev = vport->back;
2807 int alloc = 0;
2808 int i, j;
2809
2810 vector_num = min(hdev->num_msi_left, vector_num);
2811
2812 for (j = 0; j < vector_num; j++) {
2813 for (i = 1; i < hdev->num_msi; i++) {
2814 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
2815 vector->vector = pci_irq_vector(hdev->pdev, i);
2816 vector->io_addr = hdev->hw.io_base +
2817 HCLGE_VECTOR_REG_BASE +
2818 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
2819 vport->vport_id *
2820 HCLGE_VECTOR_VF_OFFSET;
2821 hdev->vector_status[i] = vport->vport_id;
887c3820 2822 hdev->vector_irq[i] = vector->vector;
46a3df9f
S
2823
2824 vector++;
2825 alloc++;
2826
2827 break;
2828 }
2829 }
2830 }
2831 hdev->num_msi_left -= alloc;
2832 hdev->num_msi_used += alloc;
2833
2834 return alloc;
2835}
2836
2837static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
2838{
2839 int i;
2840
887c3820
SM
2841 for (i = 0; i < hdev->num_msi; i++)
2842 if (vector == hdev->vector_irq[i])
2843 return i;
2844
46a3df9f
S
2845 return -EINVAL;
2846}
2847
2848static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
2849{
2850 return HCLGE_RSS_KEY_SIZE;
2851}
2852
2853static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
2854{
2855 return HCLGE_RSS_IND_TBL_SIZE;
2856}
2857
2858static int hclge_get_rss_algo(struct hclge_dev *hdev)
2859{
d44f9b63 2860 struct hclge_rss_config_cmd *req;
46a3df9f
S
2861 struct hclge_desc desc;
2862 int rss_hash_algo;
2863 int ret;
2864
2865 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, true);
2866
2867 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2868 if (ret) {
2869 dev_err(&hdev->pdev->dev,
2870 "Get link status error, status =%d\n", ret);
2871 return ret;
2872 }
2873
d44f9b63 2874 req = (struct hclge_rss_config_cmd *)desc.data;
46a3df9f
S
2875 rss_hash_algo = (req->hash_config & HCLGE_RSS_HASH_ALGO_MASK);
2876
2877 if (rss_hash_algo == HCLGE_RSS_HASH_ALGO_TOEPLITZ)
2878 return ETH_RSS_HASH_TOP;
2879
2880 return -EINVAL;
2881}
2882
2883static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
2884 const u8 hfunc, const u8 *key)
2885{
d44f9b63 2886 struct hclge_rss_config_cmd *req;
46a3df9f
S
2887 struct hclge_desc desc;
2888 int key_offset;
2889 int key_size;
2890 int ret;
2891
d44f9b63 2892 req = (struct hclge_rss_config_cmd *)desc.data;
46a3df9f
S
2893
2894 for (key_offset = 0; key_offset < 3; key_offset++) {
2895 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
2896 false);
2897
2898 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
2899 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
2900
2901 if (key_offset == 2)
2902 key_size =
2903 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
2904 else
2905 key_size = HCLGE_RSS_HASH_KEY_NUM;
2906
2907 memcpy(req->hash_key,
2908 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
2909
2910 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2911 if (ret) {
2912 dev_err(&hdev->pdev->dev,
2913 "Configure RSS config fail, status = %d\n",
2914 ret);
2915 return ret;
2916 }
2917 }
2918 return 0;
2919}
2920
2921static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir)
2922{
d44f9b63 2923 struct hclge_rss_indirection_table_cmd *req;
46a3df9f
S
2924 struct hclge_desc desc;
2925 int i, j;
2926 int ret;
2927
d44f9b63 2928 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
46a3df9f
S
2929
2930 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
2931 hclge_cmd_setup_basic_desc
2932 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
2933
a90bb9a5
YL
2934 req->start_table_index =
2935 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
2936 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
46a3df9f
S
2937
2938 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
2939 req->rss_result[j] =
2940 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
2941
2942 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2943 if (ret) {
2944 dev_err(&hdev->pdev->dev,
2945 "Configure rss indir table fail,status = %d\n",
2946 ret);
2947 return ret;
2948 }
2949 }
2950 return 0;
2951}
2952
2953static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
2954 u16 *tc_size, u16 *tc_offset)
2955{
d44f9b63 2956 struct hclge_rss_tc_mode_cmd *req;
46a3df9f
S
2957 struct hclge_desc desc;
2958 int ret;
2959 int i;
2960
2961 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
d44f9b63 2962 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
46a3df9f
S
2963
2964 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
a90bb9a5
YL
2965 u16 mode = 0;
2966
2967 hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
2968 hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M,
46a3df9f 2969 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
a90bb9a5 2970 hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
46a3df9f 2971 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
a90bb9a5
YL
2972
2973 req->rss_tc_mode[i] = cpu_to_le16(mode);
46a3df9f
S
2974 }
2975
2976 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2977 if (ret) {
2978 dev_err(&hdev->pdev->dev,
2979 "Configure rss tc mode fail, status = %d\n", ret);
2980 return ret;
2981 }
2982
2983 return 0;
2984}
2985
2986static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
2987{
d44f9b63 2988 struct hclge_rss_input_tuple_cmd *req;
46a3df9f
S
2989 struct hclge_desc desc;
2990 int ret;
2991
2992 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
2993
d44f9b63 2994 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
46a3df9f
S
2995 req->ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2996 req->ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2997 req->ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
2998 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2999 req->ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3000 req->ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3001 req->ipv6_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
3002 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3003 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3004 if (ret) {
3005 dev_err(&hdev->pdev->dev,
3006 "Configure rss input fail, status = %d\n", ret);
3007 return ret;
3008 }
3009
3010 return 0;
3011}
3012
3013static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3014 u8 *key, u8 *hfunc)
3015{
3016 struct hclge_vport *vport = hclge_get_vport(handle);
3017 struct hclge_dev *hdev = vport->back;
3018 int i;
3019
3020 /* Get hash algorithm */
3021 if (hfunc)
3022 *hfunc = hclge_get_rss_algo(hdev);
3023
3024 /* Get the RSS Key required by the user */
3025 if (key)
3026 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3027
3028 /* Get indirect table */
3029 if (indir)
3030 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3031 indir[i] = vport->rss_indirection_tbl[i];
3032
3033 return 0;
3034}
3035
3036static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3037 const u8 *key, const u8 hfunc)
3038{
3039 struct hclge_vport *vport = hclge_get_vport(handle);
3040 struct hclge_dev *hdev = vport->back;
3041 u8 hash_algo;
3042 int ret, i;
3043
3044 /* Set the RSS Hash Key if specififed by the user */
3045 if (key) {
3046 /* Update the shadow RSS key with user specified qids */
3047 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3048
3049 if (hfunc == ETH_RSS_HASH_TOP ||
3050 hfunc == ETH_RSS_HASH_NO_CHANGE)
3051 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3052 else
3053 return -EINVAL;
3054 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3055 if (ret)
3056 return ret;
3057 }
3058
3059 /* Update the shadow RSS table with user specified qids */
3060 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3061 vport->rss_indirection_tbl[i] = indir[i];
3062
3063 /* Update the hardware */
3064 ret = hclge_set_rss_indir_table(hdev, indir);
3065 return ret;
3066}
3067
f7db940a
L
3068static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3069{
3070 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3071
3072 if (nfc->data & RXH_L4_B_2_3)
3073 hash_sets |= HCLGE_D_PORT_BIT;
3074 else
3075 hash_sets &= ~HCLGE_D_PORT_BIT;
3076
3077 if (nfc->data & RXH_IP_SRC)
3078 hash_sets |= HCLGE_S_IP_BIT;
3079 else
3080 hash_sets &= ~HCLGE_S_IP_BIT;
3081
3082 if (nfc->data & RXH_IP_DST)
3083 hash_sets |= HCLGE_D_IP_BIT;
3084 else
3085 hash_sets &= ~HCLGE_D_IP_BIT;
3086
3087 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3088 hash_sets |= HCLGE_V_TAG_BIT;
3089
3090 return hash_sets;
3091}
3092
3093static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3094 struct ethtool_rxnfc *nfc)
3095{
3096 struct hclge_vport *vport = hclge_get_vport(handle);
3097 struct hclge_dev *hdev = vport->back;
3098 struct hclge_rss_input_tuple_cmd *req;
3099 struct hclge_desc desc;
3100 u8 tuple_sets;
3101 int ret;
3102
3103 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3104 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3105 return -EINVAL;
3106
3107 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3108 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true);
3109 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3110 if (ret) {
3111 dev_err(&hdev->pdev->dev,
3112 "Read rss tuple fail, status = %d\n", ret);
3113 return ret;
3114 }
3115
3116 hclge_cmd_reuse_desc(&desc, false);
3117
3118 tuple_sets = hclge_get_rss_hash_bits(nfc);
3119 switch (nfc->flow_type) {
3120 case TCP_V4_FLOW:
3121 req->ipv4_tcp_en = tuple_sets;
3122 break;
3123 case TCP_V6_FLOW:
3124 req->ipv6_tcp_en = tuple_sets;
3125 break;
3126 case UDP_V4_FLOW:
3127 req->ipv4_udp_en = tuple_sets;
3128 break;
3129 case UDP_V6_FLOW:
3130 req->ipv6_udp_en = tuple_sets;
3131 break;
3132 case SCTP_V4_FLOW:
3133 req->ipv4_sctp_en = tuple_sets;
3134 break;
3135 case SCTP_V6_FLOW:
3136 if ((nfc->data & RXH_L4_B_0_1) ||
3137 (nfc->data & RXH_L4_B_2_3))
3138 return -EINVAL;
3139
3140 req->ipv6_sctp_en = tuple_sets;
3141 break;
3142 case IPV4_FLOW:
3143 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3144 break;
3145 case IPV6_FLOW:
3146 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3147 break;
3148 default:
3149 return -EINVAL;
3150 }
3151
3152 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3153 if (ret)
3154 dev_err(&hdev->pdev->dev,
3155 "Set rss tuple fail, status = %d\n", ret);
3156
3157 return ret;
3158}
3159
07d29954
L
3160static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3161 struct ethtool_rxnfc *nfc)
3162{
3163 struct hclge_vport *vport = hclge_get_vport(handle);
3164 struct hclge_dev *hdev = vport->back;
3165 struct hclge_rss_input_tuple_cmd *req;
3166 struct hclge_desc desc;
3167 u8 tuple_sets;
3168 int ret;
3169
3170 nfc->data = 0;
3171
3172 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3173 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true);
3174 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3175 if (ret) {
3176 dev_err(&hdev->pdev->dev,
3177 "Read rss tuple fail, status = %d\n", ret);
3178 return ret;
3179 }
3180
3181 switch (nfc->flow_type) {
3182 case TCP_V4_FLOW:
3183 tuple_sets = req->ipv4_tcp_en;
3184 break;
3185 case UDP_V4_FLOW:
3186 tuple_sets = req->ipv4_udp_en;
3187 break;
3188 case TCP_V6_FLOW:
3189 tuple_sets = req->ipv6_tcp_en;
3190 break;
3191 case UDP_V6_FLOW:
3192 tuple_sets = req->ipv6_udp_en;
3193 break;
3194 case SCTP_V4_FLOW:
3195 tuple_sets = req->ipv4_sctp_en;
3196 break;
3197 case SCTP_V6_FLOW:
3198 tuple_sets = req->ipv6_sctp_en;
3199 break;
3200 case IPV4_FLOW:
3201 case IPV6_FLOW:
3202 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3203 break;
3204 default:
3205 return -EINVAL;
3206 }
3207
3208 if (!tuple_sets)
3209 return 0;
3210
3211 if (tuple_sets & HCLGE_D_PORT_BIT)
3212 nfc->data |= RXH_L4_B_2_3;
3213 if (tuple_sets & HCLGE_S_PORT_BIT)
3214 nfc->data |= RXH_L4_B_0_1;
3215 if (tuple_sets & HCLGE_D_IP_BIT)
3216 nfc->data |= RXH_IP_DST;
3217 if (tuple_sets & HCLGE_S_IP_BIT)
3218 nfc->data |= RXH_IP_SRC;
3219
3220 return 0;
3221}
3222
46a3df9f
S
3223static int hclge_get_tc_size(struct hnae3_handle *handle)
3224{
3225 struct hclge_vport *vport = hclge_get_vport(handle);
3226 struct hclge_dev *hdev = vport->back;
3227
3228 return hdev->rss_size_max;
3229}
3230
77f255c1 3231int hclge_rss_init_hw(struct hclge_dev *hdev)
46a3df9f
S
3232{
3233 const u8 hfunc = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3234 struct hclge_vport *vport = hdev->vport;
3235 u16 tc_offset[HCLGE_MAX_TC_NUM];
3236 u8 rss_key[HCLGE_RSS_KEY_SIZE];
3237 u16 tc_valid[HCLGE_MAX_TC_NUM];
3238 u16 tc_size[HCLGE_MAX_TC_NUM];
3239 u32 *rss_indir = NULL;
68ece54e 3240 u16 rss_size = 0, roundup_size;
46a3df9f
S
3241 const u8 *key;
3242 int i, ret, j;
3243
3244 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
3245 if (!rss_indir)
3246 return -ENOMEM;
3247
3248 /* Get default RSS key */
3249 netdev_rss_key_fill(rss_key, HCLGE_RSS_KEY_SIZE);
3250
3251 /* Initialize RSS indirect table for each vport */
3252 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3253 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) {
3254 vport[j].rss_indirection_tbl[i] =
68ece54e
YL
3255 i % vport[j].alloc_rss_size;
3256
3257 /* vport 0 is for PF */
3258 if (j != 0)
3259 continue;
3260
3261 rss_size = vport[j].alloc_rss_size;
46a3df9f
S
3262 rss_indir[i] = vport[j].rss_indirection_tbl[i];
3263 }
3264 }
3265 ret = hclge_set_rss_indir_table(hdev, rss_indir);
3266 if (ret)
3267 goto err;
3268
3269 key = rss_key;
3270 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3271 if (ret)
3272 goto err;
3273
3274 ret = hclge_set_rss_input_tuple(hdev);
3275 if (ret)
3276 goto err;
3277
68ece54e
YL
3278 /* Each TC have the same queue size, and tc_size set to hardware is
3279 * the log2 of roundup power of two of rss_size, the acutal queue
3280 * size is limited by indirection table.
3281 */
3282 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3283 dev_err(&hdev->pdev->dev,
3284 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3285 rss_size);
81359617
CJ
3286 ret = -EINVAL;
3287 goto err;
68ece54e
YL
3288 }
3289
3290 roundup_size = roundup_pow_of_two(rss_size);
3291 roundup_size = ilog2(roundup_size);
3292
46a3df9f 3293 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
68ece54e 3294 tc_valid[i] = 0;
46a3df9f 3295
68ece54e
YL
3296 if (!(hdev->hw_tc_map & BIT(i)))
3297 continue;
3298
3299 tc_valid[i] = 1;
3300 tc_size[i] = roundup_size;
3301 tc_offset[i] = rss_size * i;
46a3df9f 3302 }
68ece54e 3303
46a3df9f
S
3304 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3305
3306err:
3307 kfree(rss_indir);
3308
3309 return ret;
3310}
3311
63d7e66f
SM
3312int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3313 int vector_id, bool en,
3314 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
3315{
3316 struct hclge_dev *hdev = vport->back;
46a3df9f
S
3317 struct hnae3_ring_chain_node *node;
3318 struct hclge_desc desc;
63d7e66f
SM
3319 struct hclge_ctrl_vector_chain_cmd *req
3320 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3321 enum hclge_cmd_status status;
3322 enum hclge_opcode_type op;
3323 u16 tqp_type_and_id;
46a3df9f
S
3324 int i;
3325
63d7e66f
SM
3326 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3327 hclge_cmd_setup_basic_desc(&desc, op, false);
46a3df9f
S
3328 req->int_vector_id = vector_id;
3329
3330 i = 0;
3331 for (node = ring_chain; node; node = node->next) {
63d7e66f
SM
3332 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3333 hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
3334 HCLGE_INT_TYPE_S,
46a3df9f 3335 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
63d7e66f
SM
3336 hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3337 HCLGE_TQP_ID_S, node->tqp_index);
3338 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
46a3df9f
S
3339 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3340 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
63d7e66f 3341 req->vfid = vport->vport_id;
46a3df9f 3342
63d7e66f
SM
3343 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3344 if (status) {
46a3df9f
S
3345 dev_err(&hdev->pdev->dev,
3346 "Map TQP fail, status is %d.\n",
63d7e66f
SM
3347 status);
3348 return -EIO;
46a3df9f
S
3349 }
3350 i = 0;
3351
3352 hclge_cmd_setup_basic_desc(&desc,
63d7e66f 3353 op,
46a3df9f
S
3354 false);
3355 req->int_vector_id = vector_id;
3356 }
3357 }
3358
3359 if (i > 0) {
3360 req->int_cause_num = i;
63d7e66f
SM
3361 req->vfid = vport->vport_id;
3362 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3363 if (status) {
46a3df9f 3364 dev_err(&hdev->pdev->dev,
63d7e66f
SM
3365 "Map TQP fail, status is %d.\n", status);
3366 return -EIO;
46a3df9f
S
3367 }
3368 }
3369
3370 return 0;
3371}
3372
63d7e66f
SM
3373static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3374 int vector,
3375 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
3376{
3377 struct hclge_vport *vport = hclge_get_vport(handle);
3378 struct hclge_dev *hdev = vport->back;
3379 int vector_id;
3380
3381 vector_id = hclge_get_vector_index(hdev, vector);
3382 if (vector_id < 0) {
3383 dev_err(&hdev->pdev->dev,
63d7e66f 3384 "Get vector index fail. vector_id =%d\n", vector_id);
46a3df9f
S
3385 return vector_id;
3386 }
3387
63d7e66f 3388 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
46a3df9f
S
3389}
3390
63d7e66f
SM
3391static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3392 int vector,
3393 struct hnae3_ring_chain_node *ring_chain)
46a3df9f
S
3394{
3395 struct hclge_vport *vport = hclge_get_vport(handle);
3396 struct hclge_dev *hdev = vport->back;
63d7e66f 3397 int vector_id, ret;
46a3df9f
S
3398
3399 vector_id = hclge_get_vector_index(hdev, vector);
3400 if (vector_id < 0) {
3401 dev_err(&handle->pdev->dev,
3402 "Get vector index fail. ret =%d\n", vector_id);
3403 return vector_id;
3404 }
3405
63d7e66f
SM
3406 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3407 if (ret) {
3408 dev_err(&handle->pdev->dev,
3409 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3410 vector_id,
3411 ret);
3412 return ret;
46a3df9f
S
3413 }
3414
63d7e66f
SM
3415 /* Free this MSIX or MSI vector */
3416 hclge_free_vector(hdev, vector_id);
46a3df9f
S
3417
3418 return 0;
3419}
3420
3421int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3422 struct hclge_promisc_param *param)
3423{
d44f9b63 3424 struct hclge_promisc_cfg_cmd *req;
46a3df9f
S
3425 struct hclge_desc desc;
3426 int ret;
3427
3428 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3429
d44f9b63 3430 req = (struct hclge_promisc_cfg_cmd *)desc.data;
46a3df9f
S
3431 req->vf_id = param->vf_id;
3432 req->flag = (param->enable << HCLGE_PROMISC_EN_B);
3433
3434 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3435 if (ret) {
3436 dev_err(&hdev->pdev->dev,
3437 "Set promisc mode fail, status is %d.\n", ret);
3438 return ret;
3439 }
3440 return 0;
3441}
3442
3443void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3444 bool en_mc, bool en_bc, int vport_id)
3445{
3446 if (!param)
3447 return;
3448
3449 memset(param, 0, sizeof(struct hclge_promisc_param));
3450 if (en_uc)
3451 param->enable = HCLGE_PROMISC_EN_UC;
3452 if (en_mc)
3453 param->enable |= HCLGE_PROMISC_EN_MC;
3454 if (en_bc)
3455 param->enable |= HCLGE_PROMISC_EN_BC;
3456 param->vf_id = vport_id;
3457}
3458
3459static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en)
3460{
3461 struct hclge_vport *vport = hclge_get_vport(handle);
3462 struct hclge_dev *hdev = vport->back;
3463 struct hclge_promisc_param param;
3464
3465 hclge_promisc_param_init(&param, en, en, true, vport->vport_id);
3466 hclge_cmd_set_promisc_mode(hdev, &param);
3467}
3468
3469static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
3470{
3471 struct hclge_desc desc;
d44f9b63
YL
3472 struct hclge_config_mac_mode_cmd *req =
3473 (struct hclge_config_mac_mode_cmd *)desc.data;
a90bb9a5 3474 u32 loop_en = 0;
46a3df9f
S
3475 int ret;
3476
3477 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
a90bb9a5
YL
3478 hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
3479 hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
3480 hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
3481 hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
3482 hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
3483 hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
3484 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
3485 hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
3486 hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
3487 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
3488 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
3489 hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
3490 hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
3491 hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
3492 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
46a3df9f
S
3493
3494 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3495 if (ret)
3496 dev_err(&hdev->pdev->dev,
3497 "mac enable fail, ret =%d.\n", ret);
3498}
3499
c39c4d98
YL
3500static int hclge_set_loopback(struct hnae3_handle *handle,
3501 enum hnae3_loop loop_mode, bool en)
3502{
3503 struct hclge_vport *vport = hclge_get_vport(handle);
3504 struct hclge_config_mac_mode_cmd *req;
3505 struct hclge_dev *hdev = vport->back;
3506 struct hclge_desc desc;
3507 u32 loop_en;
3508 int ret;
3509
3510 switch (loop_mode) {
3511 case HNAE3_MAC_INTER_LOOP_MAC:
3512 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
3513 /* 1 Read out the MAC mode config at first */
3514 hclge_cmd_setup_basic_desc(&desc,
3515 HCLGE_OPC_CONFIG_MAC_MODE,
3516 true);
3517 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3518 if (ret) {
3519 dev_err(&hdev->pdev->dev,
3520 "mac loopback get fail, ret =%d.\n",
3521 ret);
3522 return ret;
3523 }
3524
3525 /* 2 Then setup the loopback flag */
3526 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
3527 if (en)
3528 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 1);
3529 else
3530 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
3531
3532 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
3533
3534 /* 3 Config mac work mode with loopback flag
3535 * and its original configure parameters
3536 */
3537 hclge_cmd_reuse_desc(&desc, false);
3538 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3539 if (ret)
3540 dev_err(&hdev->pdev->dev,
3541 "mac loopback set fail, ret =%d.\n", ret);
3542 break;
3543 default:
3544 ret = -ENOTSUPP;
3545 dev_err(&hdev->pdev->dev,
3546 "loop_mode %d is not supported\n", loop_mode);
3547 break;
3548 }
3549
3550 return ret;
3551}
3552
46a3df9f
S
3553static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
3554 int stream_id, bool enable)
3555{
3556 struct hclge_desc desc;
d44f9b63
YL
3557 struct hclge_cfg_com_tqp_queue_cmd *req =
3558 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
46a3df9f
S
3559 int ret;
3560
3561 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
3562 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
3563 req->stream_id = cpu_to_le16(stream_id);
3564 req->enable |= enable << HCLGE_TQP_ENABLE_B;
3565
3566 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3567 if (ret)
3568 dev_err(&hdev->pdev->dev,
3569 "Tqp enable fail, status =%d.\n", ret);
3570 return ret;
3571}
3572
3573static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
3574{
3575 struct hclge_vport *vport = hclge_get_vport(handle);
3576 struct hnae3_queue *queue;
3577 struct hclge_tqp *tqp;
3578 int i;
3579
3580 for (i = 0; i < vport->alloc_tqps; i++) {
3581 queue = handle->kinfo.tqp[i];
3582 tqp = container_of(queue, struct hclge_tqp, q);
3583 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
3584 }
3585}
3586
3587static int hclge_ae_start(struct hnae3_handle *handle)
3588{
3589 struct hclge_vport *vport = hclge_get_vport(handle);
3590 struct hclge_dev *hdev = vport->back;
3591 int i, queue_id, ret;
3592
3593 for (i = 0; i < vport->alloc_tqps; i++) {
3594 /* todo clear interrupt */
3595 /* ring enable */
3596 queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]);
3597 if (queue_id < 0) {
3598 dev_warn(&hdev->pdev->dev,
3599 "Get invalid queue id, ignore it\n");
3600 continue;
3601 }
3602
3603 hclge_tqp_enable(hdev, queue_id, 0, true);
3604 }
3605 /* mac enable */
3606 hclge_cfg_mac_mode(hdev, true);
3607 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
d039ef68 3608 mod_timer(&hdev->service_timer, jiffies + HZ);
46a3df9f
S
3609
3610 ret = hclge_mac_start_phy(hdev);
3611 if (ret)
3612 return ret;
3613
3614 /* reset tqp stats */
3615 hclge_reset_tqp_stats(handle);
3616
3617 return 0;
3618}
3619
3620static void hclge_ae_stop(struct hnae3_handle *handle)
3621{
3622 struct hclge_vport *vport = hclge_get_vport(handle);
3623 struct hclge_dev *hdev = vport->back;
3624 int i, queue_id;
3625
3626 for (i = 0; i < vport->alloc_tqps; i++) {
3627 /* Ring disable */
3628 queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]);
3629 if (queue_id < 0) {
3630 dev_warn(&hdev->pdev->dev,
3631 "Get invalid queue id, ignore it\n");
3632 continue;
3633 }
3634
3635 hclge_tqp_enable(hdev, queue_id, 0, false);
3636 }
3637 /* Mac disable */
3638 hclge_cfg_mac_mode(hdev, false);
3639
3640 hclge_mac_stop_phy(hdev);
3641
3642 /* reset tqp stats */
3643 hclge_reset_tqp_stats(handle);
3644}
3645
3646static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
3647 u16 cmdq_resp, u8 resp_code,
3648 enum hclge_mac_vlan_tbl_opcode op)
3649{
3650 struct hclge_dev *hdev = vport->back;
3651 int return_status = -EIO;
3652
3653 if (cmdq_resp) {
3654 dev_err(&hdev->pdev->dev,
3655 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
3656 cmdq_resp);
3657 return -EIO;
3658 }
3659
3660 if (op == HCLGE_MAC_VLAN_ADD) {
3661 if ((!resp_code) || (resp_code == 1)) {
3662 return_status = 0;
3663 } else if (resp_code == 2) {
3664 return_status = -EIO;
3665 dev_err(&hdev->pdev->dev,
3666 "add mac addr failed for uc_overflow.\n");
3667 } else if (resp_code == 3) {
3668 return_status = -EIO;
3669 dev_err(&hdev->pdev->dev,
3670 "add mac addr failed for mc_overflow.\n");
3671 } else {
3672 dev_err(&hdev->pdev->dev,
3673 "add mac addr failed for undefined, code=%d.\n",
3674 resp_code);
3675 }
3676 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
3677 if (!resp_code) {
3678 return_status = 0;
3679 } else if (resp_code == 1) {
3680 return_status = -EIO;
3681 dev_dbg(&hdev->pdev->dev,
3682 "remove mac addr failed for miss.\n");
3683 } else {
3684 dev_err(&hdev->pdev->dev,
3685 "remove mac addr failed for undefined, code=%d.\n",
3686 resp_code);
3687 }
3688 } else if (op == HCLGE_MAC_VLAN_LKUP) {
3689 if (!resp_code) {
3690 return_status = 0;
3691 } else if (resp_code == 1) {
3692 return_status = -EIO;
3693 dev_dbg(&hdev->pdev->dev,
3694 "lookup mac addr failed for miss.\n");
3695 } else {
3696 dev_err(&hdev->pdev->dev,
3697 "lookup mac addr failed for undefined, code=%d.\n",
3698 resp_code);
3699 }
3700 } else {
3701 return_status = -EIO;
3702 dev_err(&hdev->pdev->dev,
3703 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
3704 op);
3705 }
3706
3707 return return_status;
3708}
3709
3710static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
3711{
3712 int word_num;
3713 int bit_num;
3714
3715 if (vfid > 255 || vfid < 0)
3716 return -EIO;
3717
3718 if (vfid >= 0 && vfid <= 191) {
3719 word_num = vfid / 32;
3720 bit_num = vfid % 32;
3721 if (clr)
a90bb9a5 3722 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
46a3df9f 3723 else
a90bb9a5 3724 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
46a3df9f
S
3725 } else {
3726 word_num = (vfid - 192) / 32;
3727 bit_num = vfid % 32;
3728 if (clr)
a90bb9a5 3729 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
46a3df9f 3730 else
a90bb9a5 3731 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
46a3df9f
S
3732 }
3733
3734 return 0;
3735}
3736
3737static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
3738{
3739#define HCLGE_DESC_NUMBER 3
3740#define HCLGE_FUNC_NUMBER_PER_DESC 6
3741 int i, j;
3742
3743 for (i = 0; i < HCLGE_DESC_NUMBER; i++)
3744 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
3745 if (desc[i].data[j])
3746 return false;
3747
3748 return true;
3749}
3750
d44f9b63 3751static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
46a3df9f
S
3752 const u8 *addr)
3753{
3754 const unsigned char *mac_addr = addr;
3755 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
3756 (mac_addr[0]) | (mac_addr[1] << 8);
3757 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
3758
3759 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
3760 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
3761}
3762
1db9b1bf
YL
3763static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
3764 const u8 *addr)
46a3df9f
S
3765{
3766 u16 high_val = addr[1] | (addr[0] << 8);
3767 struct hclge_dev *hdev = vport->back;
3768 u32 rsh = 4 - hdev->mta_mac_sel_type;
3769 u16 ret_val = (high_val >> rsh) & 0xfff;
3770
3771 return ret_val;
3772}
3773
3774static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
3775 enum hclge_mta_dmac_sel_type mta_mac_sel,
3776 bool enable)
3777{
d44f9b63 3778 struct hclge_mta_filter_mode_cmd *req;
46a3df9f
S
3779 struct hclge_desc desc;
3780 int ret;
3781
d44f9b63 3782 req = (struct hclge_mta_filter_mode_cmd *)desc.data;
46a3df9f
S
3783 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
3784
3785 hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
3786 enable);
3787 hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
3788 HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
3789
3790 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3791 if (ret) {
3792 dev_err(&hdev->pdev->dev,
3793 "Config mat filter mode failed for cmd_send, ret =%d.\n",
3794 ret);
3795 return ret;
3796 }
3797
3798 return 0;
3799}
3800
3801int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
3802 u8 func_id,
3803 bool enable)
3804{
d44f9b63 3805 struct hclge_cfg_func_mta_filter_cmd *req;
46a3df9f
S
3806 struct hclge_desc desc;
3807 int ret;
3808
d44f9b63 3809 req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
46a3df9f
S
3810 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
3811
3812 hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
3813 enable);
3814 req->function_id = func_id;
3815
3816 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3817 if (ret) {
3818 dev_err(&hdev->pdev->dev,
3819 "Config func_id enable failed for cmd_send, ret =%d.\n",
3820 ret);
3821 return ret;
3822 }
3823
3824 return 0;
3825}
3826
3827static int hclge_set_mta_table_item(struct hclge_vport *vport,
3828 u16 idx,
3829 bool enable)
3830{
3831 struct hclge_dev *hdev = vport->back;
d44f9b63 3832 struct hclge_cfg_func_mta_item_cmd *req;
46a3df9f 3833 struct hclge_desc desc;
a90bb9a5 3834 u16 item_idx = 0;
46a3df9f
S
3835 int ret;
3836
d44f9b63 3837 req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
46a3df9f
S
3838 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
3839 hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
3840
a90bb9a5 3841 hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
46a3df9f 3842 HCLGE_CFG_MTA_ITEM_IDX_S, idx);
a90bb9a5 3843 req->item_idx = cpu_to_le16(item_idx);
46a3df9f
S
3844
3845 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3846 if (ret) {
3847 dev_err(&hdev->pdev->dev,
3848 "Config mta table item failed for cmd_send, ret =%d.\n",
3849 ret);
3850 return ret;
3851 }
3852
3853 return 0;
3854}
3855
3856static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 3857 struct hclge_mac_vlan_tbl_entry_cmd *req)
46a3df9f
S
3858{
3859 struct hclge_dev *hdev = vport->back;
3860 struct hclge_desc desc;
3861 u8 resp_code;
a90bb9a5 3862 u16 retval;
46a3df9f
S
3863 int ret;
3864
3865 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
3866
d44f9b63 3867 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
3868
3869 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3870 if (ret) {
3871 dev_err(&hdev->pdev->dev,
3872 "del mac addr failed for cmd_send, ret =%d.\n",
3873 ret);
3874 return ret;
3875 }
a90bb9a5
YL
3876 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
3877 retval = le16_to_cpu(desc.retval);
46a3df9f 3878
a90bb9a5 3879 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
46a3df9f
S
3880 HCLGE_MAC_VLAN_REMOVE);
3881}
3882
3883static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 3884 struct hclge_mac_vlan_tbl_entry_cmd *req,
46a3df9f
S
3885 struct hclge_desc *desc,
3886 bool is_mc)
3887{
3888 struct hclge_dev *hdev = vport->back;
3889 u8 resp_code;
a90bb9a5 3890 u16 retval;
46a3df9f
S
3891 int ret;
3892
3893 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
3894 if (is_mc) {
3895 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3896 memcpy(desc[0].data,
3897 req,
d44f9b63 3898 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
3899 hclge_cmd_setup_basic_desc(&desc[1],
3900 HCLGE_OPC_MAC_VLAN_ADD,
3901 true);
3902 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3903 hclge_cmd_setup_basic_desc(&desc[2],
3904 HCLGE_OPC_MAC_VLAN_ADD,
3905 true);
3906 ret = hclge_cmd_send(&hdev->hw, desc, 3);
3907 } else {
3908 memcpy(desc[0].data,
3909 req,
d44f9b63 3910 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f
S
3911 ret = hclge_cmd_send(&hdev->hw, desc, 1);
3912 }
3913 if (ret) {
3914 dev_err(&hdev->pdev->dev,
3915 "lookup mac addr failed for cmd_send, ret =%d.\n",
3916 ret);
3917 return ret;
3918 }
a90bb9a5
YL
3919 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
3920 retval = le16_to_cpu(desc[0].retval);
46a3df9f 3921
a90bb9a5 3922 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
46a3df9f
S
3923 HCLGE_MAC_VLAN_LKUP);
3924}
3925
3926static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
d44f9b63 3927 struct hclge_mac_vlan_tbl_entry_cmd *req,
46a3df9f
S
3928 struct hclge_desc *mc_desc)
3929{
3930 struct hclge_dev *hdev = vport->back;
3931 int cfg_status;
3932 u8 resp_code;
a90bb9a5 3933 u16 retval;
46a3df9f
S
3934 int ret;
3935
3936 if (!mc_desc) {
3937 struct hclge_desc desc;
3938
3939 hclge_cmd_setup_basic_desc(&desc,
3940 HCLGE_OPC_MAC_VLAN_ADD,
3941 false);
d44f9b63
YL
3942 memcpy(desc.data, req,
3943 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f 3944 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
a90bb9a5
YL
3945 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
3946 retval = le16_to_cpu(desc.retval);
3947
3948 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
46a3df9f
S
3949 resp_code,
3950 HCLGE_MAC_VLAN_ADD);
3951 } else {
c3b6f755 3952 hclge_cmd_reuse_desc(&mc_desc[0], false);
46a3df9f 3953 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
c3b6f755 3954 hclge_cmd_reuse_desc(&mc_desc[1], false);
46a3df9f 3955 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
c3b6f755 3956 hclge_cmd_reuse_desc(&mc_desc[2], false);
46a3df9f
S
3957 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
3958 memcpy(mc_desc[0].data, req,
d44f9b63 3959 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
46a3df9f 3960 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
a90bb9a5
YL
3961 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
3962 retval = le16_to_cpu(mc_desc[0].retval);
3963
3964 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
46a3df9f
S
3965 resp_code,
3966 HCLGE_MAC_VLAN_ADD);
3967 }
3968
3969 if (ret) {
3970 dev_err(&hdev->pdev->dev,
3971 "add mac addr failed for cmd_send, ret =%d.\n",
3972 ret);
3973 return ret;
3974 }
3975
3976 return cfg_status;
3977}
3978
3979static int hclge_add_uc_addr(struct hnae3_handle *handle,
3980 const unsigned char *addr)
3981{
3982 struct hclge_vport *vport = hclge_get_vport(handle);
3983
3984 return hclge_add_uc_addr_common(vport, addr);
3985}
3986
3987int hclge_add_uc_addr_common(struct hclge_vport *vport,
3988 const unsigned char *addr)
3989{
3990 struct hclge_dev *hdev = vport->back;
d44f9b63 3991 struct hclge_mac_vlan_tbl_entry_cmd req;
46a3df9f 3992 enum hclge_cmd_status status;
a90bb9a5 3993 u16 egress_port = 0;
46a3df9f
S
3994
3995 /* mac addr check */
3996 if (is_zero_ether_addr(addr) ||
3997 is_broadcast_ether_addr(addr) ||
3998 is_multicast_ether_addr(addr)) {
3999 dev_err(&hdev->pdev->dev,
4000 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
4001 addr,
4002 is_zero_ether_addr(addr),
4003 is_broadcast_ether_addr(addr),
4004 is_multicast_ether_addr(addr));
4005 return -EINVAL;
4006 }
4007
4008 memset(&req, 0, sizeof(req));
4009 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4010 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4011 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0);
4012 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
a90bb9a5
YL
4013
4014 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0);
4015 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0);
4016 hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
46a3df9f 4017 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
a90bb9a5 4018 hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M,
46a3df9f 4019 HCLGE_MAC_EPORT_PFID_S, 0);
a90bb9a5
YL
4020
4021 req.egress_port = cpu_to_le16(egress_port);
46a3df9f
S
4022
4023 hclge_prepare_mac_addr(&req, addr);
4024
4025 status = hclge_add_mac_vlan_tbl(vport, &req, NULL);
4026
4027 return status;
4028}
4029
4030static int hclge_rm_uc_addr(struct hnae3_handle *handle,
4031 const unsigned char *addr)
4032{
4033 struct hclge_vport *vport = hclge_get_vport(handle);
4034
4035 return hclge_rm_uc_addr_common(vport, addr);
4036}
4037
4038int hclge_rm_uc_addr_common(struct hclge_vport *vport,
4039 const unsigned char *addr)
4040{
4041 struct hclge_dev *hdev = vport->back;
d44f9b63 4042 struct hclge_mac_vlan_tbl_entry_cmd req;
46a3df9f
S
4043 enum hclge_cmd_status status;
4044
4045 /* mac addr check */
4046 if (is_zero_ether_addr(addr) ||
4047 is_broadcast_ether_addr(addr) ||
4048 is_multicast_ether_addr(addr)) {
4049 dev_dbg(&hdev->pdev->dev,
4050 "Remove mac err! invalid mac:%pM.\n",
4051 addr);
4052 return -EINVAL;
4053 }
4054
4055 memset(&req, 0, sizeof(req));
4056 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4057 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4058 hclge_prepare_mac_addr(&req, addr);
4059 status = hclge_remove_mac_vlan_tbl(vport, &req);
4060
4061 return status;
4062}
4063
4064static int hclge_add_mc_addr(struct hnae3_handle *handle,
4065 const unsigned char *addr)
4066{
4067 struct hclge_vport *vport = hclge_get_vport(handle);
4068
4069 return hclge_add_mc_addr_common(vport, addr);
4070}
4071
4072int hclge_add_mc_addr_common(struct hclge_vport *vport,
4073 const unsigned char *addr)
4074{
4075 struct hclge_dev *hdev = vport->back;
d44f9b63 4076 struct hclge_mac_vlan_tbl_entry_cmd req;
46a3df9f
S
4077 struct hclge_desc desc[3];
4078 u16 tbl_idx;
4079 int status;
4080
4081 /* mac addr check */
4082 if (!is_multicast_ether_addr(addr)) {
4083 dev_err(&hdev->pdev->dev,
4084 "Add mc mac err! invalid mac:%pM.\n",
4085 addr);
4086 return -EINVAL;
4087 }
4088 memset(&req, 0, sizeof(req));
4089 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4090 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4091 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
4092 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4093 hclge_prepare_mac_addr(&req, addr);
4094 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
4095 if (!status) {
4096 /* This mac addr exist, update VFID for it */
4097 hclge_update_desc_vfid(desc, vport->vport_id, false);
4098 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
4099 } else {
4100 /* This mac addr do not exist, add new entry for it */
4101 memset(desc[0].data, 0, sizeof(desc[0].data));
4102 memset(desc[1].data, 0, sizeof(desc[0].data));
4103 memset(desc[2].data, 0, sizeof(desc[0].data));
4104 hclge_update_desc_vfid(desc, vport->vport_id, false);
4105 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
4106 }
4107
4108 /* Set MTA table for this MAC address */
4109 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
4110 status = hclge_set_mta_table_item(vport, tbl_idx, true);
4111
4112 return status;
4113}
4114
4115static int hclge_rm_mc_addr(struct hnae3_handle *handle,
4116 const unsigned char *addr)
4117{
4118 struct hclge_vport *vport = hclge_get_vport(handle);
4119
4120 return hclge_rm_mc_addr_common(vport, addr);
4121}
4122
4123int hclge_rm_mc_addr_common(struct hclge_vport *vport,
4124 const unsigned char *addr)
4125{
4126 struct hclge_dev *hdev = vport->back;
d44f9b63 4127 struct hclge_mac_vlan_tbl_entry_cmd req;
46a3df9f
S
4128 enum hclge_cmd_status status;
4129 struct hclge_desc desc[3];
4130 u16 tbl_idx;
4131
4132 /* mac addr check */
4133 if (!is_multicast_ether_addr(addr)) {
4134 dev_dbg(&hdev->pdev->dev,
4135 "Remove mc mac err! invalid mac:%pM.\n",
4136 addr);
4137 return -EINVAL;
4138 }
4139
4140 memset(&req, 0, sizeof(req));
4141 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4142 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4143 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
4144 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4145 hclge_prepare_mac_addr(&req, addr);
4146 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
4147 if (!status) {
4148 /* This mac addr exist, remove this handle's VFID for it */
4149 hclge_update_desc_vfid(desc, vport->vport_id, true);
4150
4151 if (hclge_is_all_function_id_zero(desc))
4152 /* All the vfid is zero, so need to delete this entry */
4153 status = hclge_remove_mac_vlan_tbl(vport, &req);
4154 else
4155 /* Not all the vfid is zero, update the vfid */
4156 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
4157
4158 } else {
4159 /* This mac addr do not exist, can't delete it */
4160 dev_err(&hdev->pdev->dev,
d7629e74 4161 "Rm multicast mac addr failed, ret = %d.\n",
46a3df9f
S
4162 status);
4163 return -EIO;
4164 }
4165
4166 /* Set MTB table for this MAC address */
4167 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
4168 status = hclge_set_mta_table_item(vport, tbl_idx, false);
4169
4170 return status;
4171}
4172
4173static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
4174{
4175 struct hclge_vport *vport = hclge_get_vport(handle);
4176 struct hclge_dev *hdev = vport->back;
4177
4178 ether_addr_copy(p, hdev->hw.mac.mac_addr);
4179}
4180
4181static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
4182{
4183 const unsigned char *new_addr = (const unsigned char *)p;
4184 struct hclge_vport *vport = hclge_get_vport(handle);
4185 struct hclge_dev *hdev = vport->back;
4186
4187 /* mac addr check */
4188 if (is_zero_ether_addr(new_addr) ||
4189 is_broadcast_ether_addr(new_addr) ||
4190 is_multicast_ether_addr(new_addr)) {
4191 dev_err(&hdev->pdev->dev,
4192 "Change uc mac err! invalid mac:%p.\n",
4193 new_addr);
4194 return -EINVAL;
4195 }
4196
4197 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
4198
4199 if (!hclge_add_uc_addr(handle, new_addr)) {
4200 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
4201 return 0;
4202 }
4203
4204 return -EIO;
4205}
4206
4207static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
4208 bool filter_en)
4209{
d44f9b63 4210 struct hclge_vlan_filter_ctrl_cmd *req;
46a3df9f
S
4211 struct hclge_desc desc;
4212 int ret;
4213
4214 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
4215
d44f9b63 4216 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
46a3df9f
S
4217 req->vlan_type = vlan_type;
4218 req->vlan_fe = filter_en;
4219
4220 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4221 if (ret) {
4222 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
4223 ret);
4224 return ret;
4225 }
4226
4227 return 0;
4228}
4229
4230int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
4231 bool is_kill, u16 vlan, u8 qos, __be16 proto)
4232{
4233#define HCLGE_MAX_VF_BYTES 16
d44f9b63
YL
4234 struct hclge_vlan_filter_vf_cfg_cmd *req0;
4235 struct hclge_vlan_filter_vf_cfg_cmd *req1;
46a3df9f
S
4236 struct hclge_desc desc[2];
4237 u8 vf_byte_val;
4238 u8 vf_byte_off;
4239 int ret;
4240
4241 hclge_cmd_setup_basic_desc(&desc[0],
4242 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
4243 hclge_cmd_setup_basic_desc(&desc[1],
4244 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
4245
4246 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4247
4248 vf_byte_off = vfid / 8;
4249 vf_byte_val = 1 << (vfid % 8);
4250
d44f9b63
YL
4251 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
4252 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
46a3df9f 4253
a90bb9a5 4254 req0->vlan_id = cpu_to_le16(vlan);
46a3df9f
S
4255 req0->vlan_cfg = is_kill;
4256
4257 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
4258 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
4259 else
4260 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
4261
4262 ret = hclge_cmd_send(&hdev->hw, desc, 2);
4263 if (ret) {
4264 dev_err(&hdev->pdev->dev,
4265 "Send vf vlan command fail, ret =%d.\n",
4266 ret);
4267 return ret;
4268 }
4269
4270 if (!is_kill) {
4271 if (!req0->resp_code || req0->resp_code == 1)
4272 return 0;
4273
4274 dev_err(&hdev->pdev->dev,
4275 "Add vf vlan filter fail, ret =%d.\n",
4276 req0->resp_code);
4277 } else {
4278 if (!req0->resp_code)
4279 return 0;
4280
4281 dev_err(&hdev->pdev->dev,
4282 "Kill vf vlan filter fail, ret =%d.\n",
4283 req0->resp_code);
4284 }
4285
4286 return -EIO;
4287}
4288
4289static int hclge_set_port_vlan_filter(struct hnae3_handle *handle,
4290 __be16 proto, u16 vlan_id,
4291 bool is_kill)
4292{
4293 struct hclge_vport *vport = hclge_get_vport(handle);
4294 struct hclge_dev *hdev = vport->back;
d44f9b63 4295 struct hclge_vlan_filter_pf_cfg_cmd *req;
46a3df9f
S
4296 struct hclge_desc desc;
4297 u8 vlan_offset_byte_val;
4298 u8 vlan_offset_byte;
4299 u8 vlan_offset_160;
4300 int ret;
4301
4302 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
4303
4304 vlan_offset_160 = vlan_id / 160;
4305 vlan_offset_byte = (vlan_id % 160) / 8;
4306 vlan_offset_byte_val = 1 << (vlan_id % 8);
4307
d44f9b63 4308 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
46a3df9f
S
4309 req->vlan_offset = vlan_offset_160;
4310 req->vlan_cfg = is_kill;
4311 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
4312
4313 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4314 if (ret) {
4315 dev_err(&hdev->pdev->dev,
4316 "port vlan command, send fail, ret =%d.\n",
4317 ret);
4318 return ret;
4319 }
4320
4321 ret = hclge_set_vf_vlan_common(hdev, 0, is_kill, vlan_id, 0, proto);
4322 if (ret) {
4323 dev_err(&hdev->pdev->dev,
4324 "Set pf vlan filter config fail, ret =%d.\n",
4325 ret);
4326 return -EIO;
4327 }
4328
4329 return 0;
4330}
4331
4332static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
4333 u16 vlan, u8 qos, __be16 proto)
4334{
4335 struct hclge_vport *vport = hclge_get_vport(handle);
4336 struct hclge_dev *hdev = vport->back;
4337
4338 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
4339 return -EINVAL;
4340 if (proto != htons(ETH_P_8021Q))
4341 return -EPROTONOSUPPORT;
4342
4343 return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto);
4344}
4345
4346static int hclge_init_vlan_config(struct hclge_dev *hdev)
4347{
4348#define HCLGE_VLAN_TYPE_VF_TABLE 0
4349#define HCLGE_VLAN_TYPE_PORT_TABLE 1
5e43aef8 4350 struct hnae3_handle *handle;
46a3df9f
S
4351 int ret;
4352
4353 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE,
4354 true);
4355 if (ret)
4356 return ret;
4357
4358 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE,
4359 true);
5e43aef8
L
4360 if (ret)
4361 return ret;
46a3df9f 4362
5e43aef8
L
4363 handle = &hdev->vport[0].nic;
4364 return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
46a3df9f
S
4365}
4366
4367static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
4368{
4369 struct hclge_vport *vport = hclge_get_vport(handle);
d44f9b63 4370 struct hclge_config_max_frm_size_cmd *req;
46a3df9f
S
4371 struct hclge_dev *hdev = vport->back;
4372 struct hclge_desc desc;
4373 int ret;
4374
4375 if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU))
4376 return -EINVAL;
4377
4378 hdev->mps = new_mtu;
4379 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
4380
d44f9b63 4381 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
46a3df9f
S
4382 req->max_frm_size = cpu_to_le16(new_mtu);
4383
4384 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4385 if (ret) {
4386 dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
4387 return ret;
4388 }
4389
4390 return 0;
4391}
4392
4393static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
4394 bool enable)
4395{
d44f9b63 4396 struct hclge_reset_tqp_queue_cmd *req;
46a3df9f
S
4397 struct hclge_desc desc;
4398 int ret;
4399
4400 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
4401
d44f9b63 4402 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
46a3df9f
S
4403 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
4404 hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
4405
4406 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4407 if (ret) {
4408 dev_err(&hdev->pdev->dev,
4409 "Send tqp reset cmd error, status =%d\n", ret);
4410 return ret;
4411 }
4412
4413 return 0;
4414}
4415
4416static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
4417{
d44f9b63 4418 struct hclge_reset_tqp_queue_cmd *req;
46a3df9f
S
4419 struct hclge_desc desc;
4420 int ret;
4421
4422 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
4423
d44f9b63 4424 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
46a3df9f
S
4425 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
4426
4427 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4428 if (ret) {
4429 dev_err(&hdev->pdev->dev,
4430 "Get reset status error, status =%d\n", ret);
4431 return ret;
4432 }
4433
4434 return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
4435}
4436
63d7e66f 4437void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
46a3df9f
S
4438{
4439 struct hclge_vport *vport = hclge_get_vport(handle);
4440 struct hclge_dev *hdev = vport->back;
4441 int reset_try_times = 0;
4442 int reset_status;
4443 int ret;
4444
4445 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
4446 if (ret) {
4447 dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
4448 return;
4449 }
4450
4451 ret = hclge_send_reset_tqp_cmd(hdev, queue_id, true);
4452 if (ret) {
4453 dev_warn(&hdev->pdev->dev,
4454 "Send reset tqp cmd fail, ret = %d\n", ret);
4455 return;
4456 }
4457
4458 reset_try_times = 0;
4459 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
4460 /* Wait for tqp hw reset */
4461 msleep(20);
4462 reset_status = hclge_get_reset_status(hdev, queue_id);
4463 if (reset_status)
4464 break;
4465 }
4466
4467 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
4468 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
4469 return;
4470 }
4471
4472 ret = hclge_send_reset_tqp_cmd(hdev, queue_id, false);
4473 if (ret) {
4474 dev_warn(&hdev->pdev->dev,
4475 "Deassert the soft reset fail, ret = %d\n", ret);
4476 return;
4477 }
4478}
4479
4480static u32 hclge_get_fw_version(struct hnae3_handle *handle)
4481{
4482 struct hclge_vport *vport = hclge_get_vport(handle);
4483 struct hclge_dev *hdev = vport->back;
4484
4485 return hdev->fw_version;
4486}
4487
4488static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
4489 u32 *rx_en, u32 *tx_en)
4490{
4491 struct hclge_vport *vport = hclge_get_vport(handle);
4492 struct hclge_dev *hdev = vport->back;
4493
4494 *auto_neg = hclge_get_autoneg(handle);
4495
4496 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
4497 *rx_en = 0;
4498 *tx_en = 0;
4499 return;
4500 }
4501
4502 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
4503 *rx_en = 1;
4504 *tx_en = 0;
4505 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
4506 *tx_en = 1;
4507 *rx_en = 0;
4508 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
4509 *rx_en = 1;
4510 *tx_en = 1;
4511 } else {
4512 *rx_en = 0;
4513 *tx_en = 0;
4514 }
4515}
4516
4517static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
4518 u8 *auto_neg, u32 *speed, u8 *duplex)
4519{
4520 struct hclge_vport *vport = hclge_get_vport(handle);
4521 struct hclge_dev *hdev = vport->back;
4522
4523 if (speed)
4524 *speed = hdev->hw.mac.speed;
4525 if (duplex)
4526 *duplex = hdev->hw.mac.duplex;
4527 if (auto_neg)
4528 *auto_neg = hdev->hw.mac.autoneg;
4529}
4530
4531static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
4532{
4533 struct hclge_vport *vport = hclge_get_vport(handle);
4534 struct hclge_dev *hdev = vport->back;
4535
4536 if (media_type)
4537 *media_type = hdev->hw.mac.media_type;
4538}
4539
4540static void hclge_get_mdix_mode(struct hnae3_handle *handle,
4541 u8 *tp_mdix_ctrl, u8 *tp_mdix)
4542{
4543 struct hclge_vport *vport = hclge_get_vport(handle);
4544 struct hclge_dev *hdev = vport->back;
4545 struct phy_device *phydev = hdev->hw.mac.phydev;
4546 int mdix_ctrl, mdix, retval, is_resolved;
4547
4548 if (!phydev) {
4549 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
4550 *tp_mdix = ETH_TP_MDI_INVALID;
4551 return;
4552 }
4553
4554 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
4555
4556 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
4557 mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
4558 HCLGE_PHY_MDIX_CTRL_S);
4559
4560 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
4561 mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
4562 is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
4563
4564 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
4565
4566 switch (mdix_ctrl) {
4567 case 0x0:
4568 *tp_mdix_ctrl = ETH_TP_MDI;
4569 break;
4570 case 0x1:
4571 *tp_mdix_ctrl = ETH_TP_MDI_X;
4572 break;
4573 case 0x3:
4574 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
4575 break;
4576 default:
4577 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
4578 break;
4579 }
4580
4581 if (!is_resolved)
4582 *tp_mdix = ETH_TP_MDI_INVALID;
4583 else if (mdix)
4584 *tp_mdix = ETH_TP_MDI_X;
4585 else
4586 *tp_mdix = ETH_TP_MDI;
4587}
4588
4589static int hclge_init_client_instance(struct hnae3_client *client,
4590 struct hnae3_ae_dev *ae_dev)
4591{
4592 struct hclge_dev *hdev = ae_dev->priv;
4593 struct hclge_vport *vport;
4594 int i, ret;
4595
4596 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4597 vport = &hdev->vport[i];
4598
4599 switch (client->type) {
4600 case HNAE3_CLIENT_KNIC:
4601
4602 hdev->nic_client = client;
4603 vport->nic.client = client;
4604 ret = client->ops->init_instance(&vport->nic);
4605 if (ret)
4606 goto err;
4607
4608 if (hdev->roce_client &&
e92a0843 4609 hnae3_dev_roce_supported(hdev)) {
46a3df9f
S
4610 struct hnae3_client *rc = hdev->roce_client;
4611
4612 ret = hclge_init_roce_base_info(vport);
4613 if (ret)
4614 goto err;
4615
4616 ret = rc->ops->init_instance(&vport->roce);
4617 if (ret)
4618 goto err;
4619 }
4620
4621 break;
4622 case HNAE3_CLIENT_UNIC:
4623 hdev->nic_client = client;
4624 vport->nic.client = client;
4625
4626 ret = client->ops->init_instance(&vport->nic);
4627 if (ret)
4628 goto err;
4629
4630 break;
4631 case HNAE3_CLIENT_ROCE:
e92a0843 4632 if (hnae3_dev_roce_supported(hdev)) {
46a3df9f
S
4633 hdev->roce_client = client;
4634 vport->roce.client = client;
4635 }
4636
3a46f34d 4637 if (hdev->roce_client && hdev->nic_client) {
46a3df9f
S
4638 ret = hclge_init_roce_base_info(vport);
4639 if (ret)
4640 goto err;
4641
4642 ret = client->ops->init_instance(&vport->roce);
4643 if (ret)
4644 goto err;
4645 }
4646 }
4647 }
4648
4649 return 0;
4650err:
4651 return ret;
4652}
4653
4654static void hclge_uninit_client_instance(struct hnae3_client *client,
4655 struct hnae3_ae_dev *ae_dev)
4656{
4657 struct hclge_dev *hdev = ae_dev->priv;
4658 struct hclge_vport *vport;
4659 int i;
4660
4661 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4662 vport = &hdev->vport[i];
a17dcf3f 4663 if (hdev->roce_client) {
46a3df9f
S
4664 hdev->roce_client->ops->uninit_instance(&vport->roce,
4665 0);
a17dcf3f
L
4666 hdev->roce_client = NULL;
4667 vport->roce.client = NULL;
4668 }
46a3df9f
S
4669 if (client->type == HNAE3_CLIENT_ROCE)
4670 return;
a17dcf3f 4671 if (client->ops->uninit_instance) {
46a3df9f 4672 client->ops->uninit_instance(&vport->nic, 0);
a17dcf3f
L
4673 hdev->nic_client = NULL;
4674 vport->nic.client = NULL;
4675 }
46a3df9f
S
4676 }
4677}
4678
4679static int hclge_pci_init(struct hclge_dev *hdev)
4680{
4681 struct pci_dev *pdev = hdev->pdev;
4682 struct hclge_hw *hw;
4683 int ret;
4684
4685 ret = pci_enable_device(pdev);
4686 if (ret) {
4687 dev_err(&pdev->dev, "failed to enable PCI device\n");
4688 goto err_no_drvdata;
4689 }
4690
4691 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4692 if (ret) {
4693 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4694 if (ret) {
4695 dev_err(&pdev->dev,
4696 "can't set consistent PCI DMA");
4697 goto err_disable_device;
4698 }
4699 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
4700 }
4701
4702 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
4703 if (ret) {
4704 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
4705 goto err_disable_device;
4706 }
4707
4708 pci_set_master(pdev);
4709 hw = &hdev->hw;
4710 hw->back = hdev;
4711 hw->io_base = pcim_iomap(pdev, 2, 0);
4712 if (!hw->io_base) {
4713 dev_err(&pdev->dev, "Can't map configuration register space\n");
4714 ret = -ENOMEM;
4715 goto err_clr_master;
4716 }
4717
709eb41a
L
4718 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
4719
46a3df9f
S
4720 return 0;
4721err_clr_master:
4722 pci_clear_master(pdev);
4723 pci_release_regions(pdev);
4724err_disable_device:
4725 pci_disable_device(pdev);
4726err_no_drvdata:
4727 pci_set_drvdata(pdev, NULL);
4728
4729 return ret;
4730}
4731
4732static void hclge_pci_uninit(struct hclge_dev *hdev)
4733{
4734 struct pci_dev *pdev = hdev->pdev;
4735
887c3820 4736 pci_free_irq_vectors(pdev);
46a3df9f
S
4737 pci_clear_master(pdev);
4738 pci_release_mem_regions(pdev);
4739 pci_disable_device(pdev);
4740}
4741
4742static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
4743{
4744 struct pci_dev *pdev = ae_dev->pdev;
46a3df9f
S
4745 struct hclge_dev *hdev;
4746 int ret;
4747
4748 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
4749 if (!hdev) {
4750 ret = -ENOMEM;
4751 goto err_hclge_dev;
4752 }
4753
46a3df9f
S
4754 hdev->pdev = pdev;
4755 hdev->ae_dev = ae_dev;
4ed340ab 4756 hdev->reset_type = HNAE3_NONE_RESET;
ed4a1bb8 4757 hdev->reset_request = 0;
202f2014 4758 hdev->reset_pending = 0;
46a3df9f
S
4759 ae_dev->priv = hdev;
4760
46a3df9f
S
4761 ret = hclge_pci_init(hdev);
4762 if (ret) {
4763 dev_err(&pdev->dev, "PCI init failed\n");
4764 goto err_pci_init;
4765 }
4766
3efb960f
L
4767 /* Firmware command queue initialize */
4768 ret = hclge_cmd_queue_init(hdev);
4769 if (ret) {
4770 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
4771 return ret;
4772 }
4773
4774 /* Firmware command initialize */
46a3df9f
S
4775 ret = hclge_cmd_init(hdev);
4776 if (ret)
4777 goto err_cmd_init;
4778
4779 ret = hclge_get_cap(hdev);
4780 if (ret) {
e00e2197
CIK
4781 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
4782 ret);
46a3df9f
S
4783 return ret;
4784 }
4785
4786 ret = hclge_configure(hdev);
4787 if (ret) {
4788 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
4789 return ret;
4790 }
4791
887c3820 4792 ret = hclge_init_msi(hdev);
46a3df9f 4793 if (ret) {
887c3820 4794 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
46a3df9f
S
4795 return ret;
4796 }
4797
466b0c00
L
4798 ret = hclge_misc_irq_init(hdev);
4799 if (ret) {
4800 dev_err(&pdev->dev,
4801 "Misc IRQ(vector0) init error, ret = %d.\n",
4802 ret);
4803 return ret;
4804 }
4805
46a3df9f
S
4806 ret = hclge_alloc_tqps(hdev);
4807 if (ret) {
4808 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
4809 return ret;
4810 }
4811
4812 ret = hclge_alloc_vport(hdev);
4813 if (ret) {
4814 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
4815 return ret;
4816 }
4817
7df7dad6
L
4818 ret = hclge_map_tqp(hdev);
4819 if (ret) {
4820 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
4821 return ret;
4822 }
4823
cf9cca2d 4824 ret = hclge_mac_mdio_config(hdev);
4825 if (ret) {
4826 dev_warn(&hdev->pdev->dev,
4827 "mdio config fail ret=%d\n", ret);
4828 return ret;
4829 }
4830
46a3df9f
S
4831 ret = hclge_mac_init(hdev);
4832 if (ret) {
4833 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
4834 return ret;
4835 }
4836 ret = hclge_buffer_alloc(hdev);
4837 if (ret) {
4838 dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret);
4839 return ret;
4840 }
4841
4842 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
4843 if (ret) {
4844 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
4845 return ret;
4846 }
4847
46a3df9f
S
4848 ret = hclge_init_vlan_config(hdev);
4849 if (ret) {
4850 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
4851 return ret;
4852 }
4853
4854 ret = hclge_tm_schd_init(hdev);
4855 if (ret) {
4856 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
4857 return ret;
68ece54e
YL
4858 }
4859
4860 ret = hclge_rss_init_hw(hdev);
4861 if (ret) {
4862 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
4863 return ret;
46a3df9f
S
4864 }
4865
cacde272
YL
4866 hclge_dcb_ops_set(hdev);
4867
d039ef68 4868 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
46a3df9f 4869 INIT_WORK(&hdev->service_task, hclge_service_task);
ed4a1bb8 4870 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
22fd3468 4871 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
46a3df9f 4872
466b0c00
L
4873 /* Enable MISC vector(vector0) */
4874 hclge_enable_vector(&hdev->misc_vector, true);
4875
46a3df9f
S
4876 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
4877 set_bit(HCLGE_STATE_DOWN, &hdev->state);
ed4a1bb8
SM
4878 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
4879 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
22fd3468
SM
4880 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
4881 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
46a3df9f
S
4882
4883 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
4884 return 0;
4885
4886err_cmd_init:
4887 pci_release_regions(pdev);
4888err_pci_init:
4889 pci_set_drvdata(pdev, NULL);
4890err_hclge_dev:
4891 return ret;
4892}
4893
c6dc5213 4894static void hclge_stats_clear(struct hclge_dev *hdev)
4895{
4896 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
4897}
4898
4ed340ab
L
4899static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
4900{
4901 struct hclge_dev *hdev = ae_dev->priv;
4902 struct pci_dev *pdev = ae_dev->pdev;
4903 int ret;
4904
4905 set_bit(HCLGE_STATE_DOWN, &hdev->state);
4906
c6dc5213 4907 hclge_stats_clear(hdev);
4908
4ed340ab
L
4909 ret = hclge_cmd_init(hdev);
4910 if (ret) {
4911 dev_err(&pdev->dev, "Cmd queue init failed\n");
4912 return ret;
4913 }
4914
4915 ret = hclge_get_cap(hdev);
4916 if (ret) {
4917 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
4918 ret);
4919 return ret;
4920 }
4921
4922 ret = hclge_configure(hdev);
4923 if (ret) {
4924 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
4925 return ret;
4926 }
4927
4928 ret = hclge_map_tqp(hdev);
4929 if (ret) {
4930 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
4931 return ret;
4932 }
4933
4934 ret = hclge_mac_init(hdev);
4935 if (ret) {
4936 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
4937 return ret;
4938 }
4939
4940 ret = hclge_buffer_alloc(hdev);
4941 if (ret) {
4942 dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret);
4943 return ret;
4944 }
4945
4946 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
4947 if (ret) {
4948 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
4949 return ret;
4950 }
4951
4952 ret = hclge_init_vlan_config(hdev);
4953 if (ret) {
4954 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
4955 return ret;
4956 }
4957
4958 ret = hclge_tm_schd_init(hdev);
4959 if (ret) {
4960 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
4961 return ret;
4962 }
4963
4964 ret = hclge_rss_init_hw(hdev);
4965 if (ret) {
4966 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
4967 return ret;
4968 }
4969
4970 /* Enable MISC vector(vector0) */
4971 hclge_enable_vector(&hdev->misc_vector, true);
4972
4973 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
4974 HCLGE_DRIVER_NAME);
4975
4976 return 0;
4977}
4978
46a3df9f
S
4979static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
4980{
4981 struct hclge_dev *hdev = ae_dev->priv;
4982 struct hclge_mac *mac = &hdev->hw.mac;
4983
4984 set_bit(HCLGE_STATE_DOWN, &hdev->state);
4985
2a32ca13
AB
4986 if (IS_ENABLED(CONFIG_PCI_IOV))
4987 hclge_disable_sriov(hdev);
46a3df9f 4988
d039ef68 4989 if (hdev->service_timer.function)
46a3df9f
S
4990 del_timer_sync(&hdev->service_timer);
4991 if (hdev->service_task.func)
4992 cancel_work_sync(&hdev->service_task);
ed4a1bb8
SM
4993 if (hdev->rst_service_task.func)
4994 cancel_work_sync(&hdev->rst_service_task);
22fd3468
SM
4995 if (hdev->mbx_service_task.func)
4996 cancel_work_sync(&hdev->mbx_service_task);
46a3df9f
S
4997
4998 if (mac->phydev)
4999 mdiobus_unregister(mac->mdio_bus);
5000
466b0c00
L
5001 /* Disable MISC vector(vector0) */
5002 hclge_enable_vector(&hdev->misc_vector, false);
46a3df9f 5003 hclge_destroy_cmd_queue(&hdev->hw);
202f2014 5004 hclge_misc_irq_uninit(hdev);
46a3df9f
S
5005 hclge_pci_uninit(hdev);
5006 ae_dev->priv = NULL;
5007}
5008
4f645a90
PL
5009static u32 hclge_get_max_channels(struct hnae3_handle *handle)
5010{
5011 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
5012 struct hclge_vport *vport = hclge_get_vport(handle);
5013 struct hclge_dev *hdev = vport->back;
5014
5015 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
5016}
5017
5018static void hclge_get_channels(struct hnae3_handle *handle,
5019 struct ethtool_channels *ch)
5020{
5021 struct hclge_vport *vport = hclge_get_vport(handle);
5022
5023 ch->max_combined = hclge_get_max_channels(handle);
5024 ch->other_count = 1;
5025 ch->max_other = 1;
5026 ch->combined_count = vport->alloc_tqps;
5027}
5028
f1f779ce
PL
5029static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
5030 u16 *free_tqps, u16 *max_rss_size)
5031{
5032 struct hclge_vport *vport = hclge_get_vport(handle);
5033 struct hclge_dev *hdev = vport->back;
5034 u16 temp_tqps = 0;
5035 int i;
5036
5037 for (i = 0; i < hdev->num_tqps; i++) {
5038 if (!hdev->htqp[i].alloced)
5039 temp_tqps++;
5040 }
5041 *free_tqps = temp_tqps;
5042 *max_rss_size = hdev->rss_size_max;
5043}
5044
5045static void hclge_release_tqp(struct hclge_vport *vport)
5046{
5047 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5048 struct hclge_dev *hdev = vport->back;
5049 int i;
5050
5051 for (i = 0; i < kinfo->num_tqps; i++) {
5052 struct hclge_tqp *tqp =
5053 container_of(kinfo->tqp[i], struct hclge_tqp, q);
5054
5055 tqp->q.handle = NULL;
5056 tqp->q.tqp_index = 0;
5057 tqp->alloced = false;
5058 }
5059
5060 devm_kfree(&hdev->pdev->dev, kinfo->tqp);
5061 kinfo->tqp = NULL;
5062}
5063
5064static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
5065{
5066 struct hclge_vport *vport = hclge_get_vport(handle);
5067 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5068 struct hclge_dev *hdev = vport->back;
5069 int cur_rss_size = kinfo->rss_size;
5070 int cur_tqps = kinfo->num_tqps;
5071 u16 tc_offset[HCLGE_MAX_TC_NUM];
5072 u16 tc_valid[HCLGE_MAX_TC_NUM];
5073 u16 tc_size[HCLGE_MAX_TC_NUM];
5074 u16 roundup_size;
5075 u32 *rss_indir;
5076 int ret, i;
5077
5078 hclge_release_tqp(vport);
5079
5080 ret = hclge_knic_setup(vport, new_tqps_num);
5081 if (ret) {
5082 dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
5083 return ret;
5084 }
5085
5086 ret = hclge_map_tqp_to_vport(hdev, vport);
5087 if (ret) {
5088 dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
5089 return ret;
5090 }
5091
5092 ret = hclge_tm_schd_init(hdev);
5093 if (ret) {
5094 dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
5095 return ret;
5096 }
5097
5098 roundup_size = roundup_pow_of_two(kinfo->rss_size);
5099 roundup_size = ilog2(roundup_size);
5100 /* Set the RSS TC mode according to the new RSS size */
5101 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
5102 tc_valid[i] = 0;
5103
5104 if (!(hdev->hw_tc_map & BIT(i)))
5105 continue;
5106
5107 tc_valid[i] = 1;
5108 tc_size[i] = roundup_size;
5109 tc_offset[i] = kinfo->rss_size * i;
5110 }
5111 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5112 if (ret)
5113 return ret;
5114
5115 /* Reinitializes the rss indirect table according to the new RSS size */
5116 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
5117 if (!rss_indir)
5118 return -ENOMEM;
5119
5120 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
5121 rss_indir[i] = i % kinfo->rss_size;
5122
5123 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
5124 if (ret)
5125 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
5126 ret);
5127
5128 kfree(rss_indir);
5129
5130 if (!ret)
5131 dev_info(&hdev->pdev->dev,
5132 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
5133 cur_rss_size, kinfo->rss_size,
5134 cur_tqps, kinfo->rss_size * kinfo->num_tc);
5135
5136 return ret;
5137}
5138
46a3df9f
S
5139static const struct hnae3_ae_ops hclge_ops = {
5140 .init_ae_dev = hclge_init_ae_dev,
5141 .uninit_ae_dev = hclge_uninit_ae_dev,
5142 .init_client_instance = hclge_init_client_instance,
5143 .uninit_client_instance = hclge_uninit_client_instance,
63d7e66f
SM
5144 .map_ring_to_vector = hclge_map_ring_to_vector,
5145 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
46a3df9f
S
5146 .get_vector = hclge_get_vector,
5147 .set_promisc_mode = hclge_set_promisc_mode,
c39c4d98 5148 .set_loopback = hclge_set_loopback,
46a3df9f
S
5149 .start = hclge_ae_start,
5150 .stop = hclge_ae_stop,
5151 .get_status = hclge_get_status,
5152 .get_ksettings_an_result = hclge_get_ksettings_an_result,
5153 .update_speed_duplex_h = hclge_update_speed_duplex_h,
5154 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
5155 .get_media_type = hclge_get_media_type,
5156 .get_rss_key_size = hclge_get_rss_key_size,
5157 .get_rss_indir_size = hclge_get_rss_indir_size,
5158 .get_rss = hclge_get_rss,
5159 .set_rss = hclge_set_rss,
f7db940a 5160 .set_rss_tuple = hclge_set_rss_tuple,
07d29954 5161 .get_rss_tuple = hclge_get_rss_tuple,
46a3df9f
S
5162 .get_tc_size = hclge_get_tc_size,
5163 .get_mac_addr = hclge_get_mac_addr,
5164 .set_mac_addr = hclge_set_mac_addr,
5165 .add_uc_addr = hclge_add_uc_addr,
5166 .rm_uc_addr = hclge_rm_uc_addr,
5167 .add_mc_addr = hclge_add_mc_addr,
5168 .rm_mc_addr = hclge_rm_mc_addr,
5169 .set_autoneg = hclge_set_autoneg,
5170 .get_autoneg = hclge_get_autoneg,
5171 .get_pauseparam = hclge_get_pauseparam,
5172 .set_mtu = hclge_set_mtu,
5173 .reset_queue = hclge_reset_tqp,
5174 .get_stats = hclge_get_stats,
5175 .update_stats = hclge_update_stats,
5176 .get_strings = hclge_get_strings,
5177 .get_sset_count = hclge_get_sset_count,
5178 .get_fw_version = hclge_get_fw_version,
5179 .get_mdix_mode = hclge_get_mdix_mode,
5180 .set_vlan_filter = hclge_set_port_vlan_filter,
5181 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
4ed340ab 5182 .reset_event = hclge_reset_event,
f1f779ce
PL
5183 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
5184 .set_channels = hclge_set_channels,
4f645a90 5185 .get_channels = hclge_get_channels,
46a3df9f
S
5186};
5187
5188static struct hnae3_ae_algo ae_algo = {
5189 .ops = &hclge_ops,
5190 .name = HCLGE_NAME,
5191 .pdev_id_table = ae_algo_pci_tbl,
5192};
5193
5194static int hclge_init(void)
5195{
5196 pr_info("%s is initializing\n", HCLGE_NAME);
5197
5198 return hnae3_register_ae_algo(&ae_algo);
5199}
5200
5201static void hclge_exit(void)
5202{
5203 hnae3_unregister_ae_algo(&ae_algo);
5204}
5205module_init(hclge_init);
5206module_exit(hclge_exit);
5207
5208MODULE_LICENSE("GPL");
5209MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
5210MODULE_DESCRIPTION("HCLGE Driver");
5211MODULE_VERSION(HCLGE_MOD_VERSION);