]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
net: hns3: Support for dynamically assigning tx buffer to TC
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
CommitLineData
46a3df9f
S
1/*
2 * Copyright (c) 2016-2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/acpi.h>
11#include <linux/device.h>
12#include <linux/etherdevice.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/netdevice.h>
18#include <linux/pci.h>
19#include <linux/platform_device.h>
20
21#include "hclge_cmd.h"
22#include "hclge_main.h"
23#include "hclge_mdio.h"
24#include "hclge_tm.h"
25#include "hnae3.h"
26
27#define HCLGE_NAME "hclge"
28#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
29#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30#define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
31#define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
32
33static int hclge_rss_init_hw(struct hclge_dev *hdev);
34static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
35 enum hclge_mta_dmac_sel_type mta_mac_sel,
36 bool enable);
37static int hclge_init_vlan_config(struct hclge_dev *hdev);
38
39static struct hnae3_ae_algo ae_algo;
40
41static const struct pci_device_id ae_algo_pci_tbl[] = {
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
e92a0843 49 /* required last entry */
46a3df9f
S
50 {0, }
51};
52
53static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
54 "Mac Loopback test",
55 "Serdes Loopback test",
56 "Phy Loopback test"
57};
58
59static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = {
60 {"igu_rx_oversize_pkt",
61 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)},
62 {"igu_rx_undersize_pkt",
63 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)},
64 {"igu_rx_out_all_pkt",
65 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)},
66 {"igu_rx_uni_pkt",
67 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)},
68 {"igu_rx_multi_pkt",
69 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)},
70 {"igu_rx_broad_pkt",
71 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)},
72 {"egu_tx_out_all_pkt",
73 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)},
74 {"egu_tx_uni_pkt",
75 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)},
76 {"egu_tx_multi_pkt",
77 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)},
78 {"egu_tx_broad_pkt",
79 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)},
80 {"ssu_ppp_mac_key_num",
81 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)},
82 {"ssu_ppp_host_key_num",
83 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)},
84 {"ppp_ssu_mac_rlt_num",
85 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)},
86 {"ppp_ssu_host_rlt_num",
87 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)},
88 {"ssu_tx_in_num",
89 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)},
90 {"ssu_tx_out_num",
91 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)},
92 {"ssu_rx_in_num",
93 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)},
94 {"ssu_rx_out_num",
95 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)}
96};
97
98static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = {
99 {"igu_rx_err_pkt",
100 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)},
101 {"igu_rx_no_eof_pkt",
102 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)},
103 {"igu_rx_no_sof_pkt",
104 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)},
105 {"egu_tx_1588_pkt",
106 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)},
107 {"ssu_full_drop_num",
108 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)},
109 {"ssu_part_drop_num",
110 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)},
111 {"ppp_key_drop_num",
112 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)},
113 {"ppp_rlt_drop_num",
114 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)},
115 {"ssu_key_drop_num",
116 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)},
117 {"pkt_curr_buf_cnt",
118 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)},
119 {"qcn_fb_rcv_cnt",
120 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)},
121 {"qcn_fb_drop_cnt",
122 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)},
123 {"qcn_fb_invaild_cnt",
124 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)},
125 {"rx_packet_tc0_in_cnt",
126 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)},
127 {"rx_packet_tc1_in_cnt",
128 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)},
129 {"rx_packet_tc2_in_cnt",
130 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)},
131 {"rx_packet_tc3_in_cnt",
132 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)},
133 {"rx_packet_tc4_in_cnt",
134 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)},
135 {"rx_packet_tc5_in_cnt",
136 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)},
137 {"rx_packet_tc6_in_cnt",
138 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)},
139 {"rx_packet_tc7_in_cnt",
140 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)},
141 {"rx_packet_tc0_out_cnt",
142 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)},
143 {"rx_packet_tc1_out_cnt",
144 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)},
145 {"rx_packet_tc2_out_cnt",
146 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)},
147 {"rx_packet_tc3_out_cnt",
148 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)},
149 {"rx_packet_tc4_out_cnt",
150 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)},
151 {"rx_packet_tc5_out_cnt",
152 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)},
153 {"rx_packet_tc6_out_cnt",
154 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)},
155 {"rx_packet_tc7_out_cnt",
156 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)},
157 {"tx_packet_tc0_in_cnt",
158 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)},
159 {"tx_packet_tc1_in_cnt",
160 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)},
161 {"tx_packet_tc2_in_cnt",
162 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)},
163 {"tx_packet_tc3_in_cnt",
164 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)},
165 {"tx_packet_tc4_in_cnt",
166 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)},
167 {"tx_packet_tc5_in_cnt",
168 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)},
169 {"tx_packet_tc6_in_cnt",
170 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)},
171 {"tx_packet_tc7_in_cnt",
172 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)},
173 {"tx_packet_tc0_out_cnt",
174 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)},
175 {"tx_packet_tc1_out_cnt",
176 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)},
177 {"tx_packet_tc2_out_cnt",
178 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)},
179 {"tx_packet_tc3_out_cnt",
180 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)},
181 {"tx_packet_tc4_out_cnt",
182 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)},
183 {"tx_packet_tc5_out_cnt",
184 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)},
185 {"tx_packet_tc6_out_cnt",
186 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)},
187 {"tx_packet_tc7_out_cnt",
188 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)},
189 {"pkt_curr_buf_tc0_cnt",
190 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)},
191 {"pkt_curr_buf_tc1_cnt",
192 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)},
193 {"pkt_curr_buf_tc2_cnt",
194 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)},
195 {"pkt_curr_buf_tc3_cnt",
196 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)},
197 {"pkt_curr_buf_tc4_cnt",
198 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)},
199 {"pkt_curr_buf_tc5_cnt",
200 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)},
201 {"pkt_curr_buf_tc6_cnt",
202 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)},
203 {"pkt_curr_buf_tc7_cnt",
204 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)},
205 {"mb_uncopy_num",
206 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)},
207 {"lo_pri_unicast_rlt_drop_num",
208 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)},
209 {"hi_pri_multicast_rlt_drop_num",
210 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)},
211 {"lo_pri_multicast_rlt_drop_num",
212 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)},
213 {"rx_oq_drop_pkt_cnt",
214 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)},
215 {"tx_oq_drop_pkt_cnt",
216 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)},
217 {"nic_l2_err_drop_pkt_cnt",
218 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)},
219 {"roc_l2_err_drop_pkt_cnt",
220 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)}
221};
222
223static const struct hclge_comm_stats_str g_mac_stats_string[] = {
224 {"mac_tx_mac_pause_num",
225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
226 {"mac_rx_mac_pause_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
228 {"mac_tx_pfc_pri0_pkt_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
230 {"mac_tx_pfc_pri1_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
232 {"mac_tx_pfc_pri2_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
234 {"mac_tx_pfc_pri3_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
236 {"mac_tx_pfc_pri4_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
238 {"mac_tx_pfc_pri5_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
240 {"mac_tx_pfc_pri6_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
242 {"mac_tx_pfc_pri7_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
244 {"mac_rx_pfc_pri0_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
246 {"mac_rx_pfc_pri1_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
248 {"mac_rx_pfc_pri2_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
250 {"mac_rx_pfc_pri3_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
252 {"mac_rx_pfc_pri4_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
254 {"mac_rx_pfc_pri5_pkt_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
256 {"mac_rx_pfc_pri6_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
258 {"mac_rx_pfc_pri7_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
260 {"mac_tx_total_pkt_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
262 {"mac_tx_total_oct_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
264 {"mac_tx_good_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
266 {"mac_tx_bad_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
268 {"mac_tx_good_oct_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
270 {"mac_tx_bad_oct_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
272 {"mac_tx_uni_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
274 {"mac_tx_multi_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
276 {"mac_tx_broad_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
278 {"mac_tx_undersize_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
280 {"mac_tx_overrsize_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)},
282 {"mac_tx_64_oct_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
284 {"mac_tx_65_127_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
286 {"mac_tx_128_255_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
288 {"mac_tx_256_511_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
290 {"mac_tx_512_1023_oct_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
292 {"mac_tx_1024_1518_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
294 {"mac_tx_1519_max_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)},
296 {"mac_rx_total_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
298 {"mac_rx_total_oct_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
300 {"mac_rx_good_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
302 {"mac_rx_bad_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
304 {"mac_rx_good_oct_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
306 {"mac_rx_bad_oct_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
308 {"mac_rx_uni_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
310 {"mac_rx_multi_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
312 {"mac_rx_broad_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
314 {"mac_rx_undersize_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
316 {"mac_rx_overrsize_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)},
318 {"mac_rx_64_oct_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
320 {"mac_rx_65_127_oct_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
322 {"mac_rx_128_255_oct_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
324 {"mac_rx_256_511_oct_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
326 {"mac_rx_512_1023_oct_pkt_num",
327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
328 {"mac_rx_1024_1518_oct_pkt_num",
329 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
330 {"mac_rx_1519_max_oct_pkt_num",
331 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)},
332
333 {"mac_trans_fragment_pkt_num",
334 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)},
335 {"mac_trans_undermin_pkt_num",
336 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)},
337 {"mac_trans_jabber_pkt_num",
338 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)},
339 {"mac_trans_err_all_pkt_num",
340 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)},
341 {"mac_trans_from_app_good_pkt_num",
342 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)},
343 {"mac_trans_from_app_bad_pkt_num",
344 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)},
345 {"mac_rcv_fragment_pkt_num",
346 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)},
347 {"mac_rcv_undermin_pkt_num",
348 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)},
349 {"mac_rcv_jabber_pkt_num",
350 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)},
351 {"mac_rcv_fcs_err_pkt_num",
352 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)},
353 {"mac_rcv_send_app_good_pkt_num",
354 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)},
355 {"mac_rcv_send_app_bad_pkt_num",
356 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)}
357};
358
359static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
360{
361#define HCLGE_64_BIT_CMD_NUM 5
362#define HCLGE_64_BIT_RTN_DATANUM 4
363 u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats);
364 struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM];
365 u64 *desc_data;
366 int i, k, n;
367 int ret;
368
369 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true);
370 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM);
371 if (ret) {
372 dev_err(&hdev->pdev->dev,
373 "Get 64 bit pkt stats fail, status = %d.\n", ret);
374 return ret;
375 }
376
377 for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) {
378 if (unlikely(i == 0)) {
379 desc_data = (u64 *)(&desc[i].data[0]);
380 n = HCLGE_64_BIT_RTN_DATANUM - 1;
381 } else {
382 desc_data = (u64 *)(&desc[i]);
383 n = HCLGE_64_BIT_RTN_DATANUM;
384 }
385 for (k = 0; k < n; k++) {
386 *data++ += cpu_to_le64(*desc_data);
387 desc_data++;
388 }
389 }
390
391 return 0;
392}
393
394static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats)
395{
396 stats->pkt_curr_buf_cnt = 0;
397 stats->pkt_curr_buf_tc0_cnt = 0;
398 stats->pkt_curr_buf_tc1_cnt = 0;
399 stats->pkt_curr_buf_tc2_cnt = 0;
400 stats->pkt_curr_buf_tc3_cnt = 0;
401 stats->pkt_curr_buf_tc4_cnt = 0;
402 stats->pkt_curr_buf_tc5_cnt = 0;
403 stats->pkt_curr_buf_tc6_cnt = 0;
404 stats->pkt_curr_buf_tc7_cnt = 0;
405}
406
407static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
408{
409#define HCLGE_32_BIT_CMD_NUM 8
410#define HCLGE_32_BIT_RTN_DATANUM 8
411
412 struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM];
413 struct hclge_32_bit_stats *all_32_bit_stats;
414 u32 *desc_data;
415 int i, k, n;
416 u64 *data;
417 int ret;
418
419 all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats;
420 data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt);
421
422 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true);
423 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM);
424 if (ret) {
425 dev_err(&hdev->pdev->dev,
426 "Get 32 bit pkt stats fail, status = %d.\n", ret);
427
428 return ret;
429 }
430
431 hclge_reset_partial_32bit_counter(all_32_bit_stats);
432 for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) {
433 if (unlikely(i == 0)) {
434 all_32_bit_stats->igu_rx_err_pkt +=
435 cpu_to_le32(desc[i].data[0]);
436 all_32_bit_stats->igu_rx_no_eof_pkt +=
437 cpu_to_le32(desc[i].data[1] & 0xffff);
438 all_32_bit_stats->igu_rx_no_sof_pkt +=
439 cpu_to_le32((desc[i].data[1] >> 16) & 0xffff);
440
441 desc_data = (u32 *)(&desc[i].data[2]);
442 n = HCLGE_32_BIT_RTN_DATANUM - 4;
443 } else {
444 desc_data = (u32 *)(&desc[i]);
445 n = HCLGE_32_BIT_RTN_DATANUM;
446 }
447 for (k = 0; k < n; k++) {
448 *data++ += cpu_to_le32(*desc_data);
449 desc_data++;
450 }
451 }
452
453 return 0;
454}
455
456static int hclge_mac_update_stats(struct hclge_dev *hdev)
457{
458#define HCLGE_MAC_CMD_NUM 17
459#define HCLGE_RTN_DATA_NUM 4
460
461 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
462 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
463 u64 *desc_data;
464 int i, k, n;
465 int ret;
466
467 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
468 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
469 if (ret) {
470 dev_err(&hdev->pdev->dev,
471 "Get MAC pkt stats fail, status = %d.\n", ret);
472
473 return ret;
474 }
475
476 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
477 if (unlikely(i == 0)) {
478 desc_data = (u64 *)(&desc[i].data[0]);
479 n = HCLGE_RTN_DATA_NUM - 2;
480 } else {
481 desc_data = (u64 *)(&desc[i]);
482 n = HCLGE_RTN_DATA_NUM;
483 }
484 for (k = 0; k < n; k++) {
485 *data++ += cpu_to_le64(*desc_data);
486 desc_data++;
487 }
488 }
489
490 return 0;
491}
492
493static int hclge_tqps_update_stats(struct hnae3_handle *handle)
494{
495 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
496 struct hclge_vport *vport = hclge_get_vport(handle);
497 struct hclge_dev *hdev = vport->back;
498 struct hnae3_queue *queue;
499 struct hclge_desc desc[1];
500 struct hclge_tqp *tqp;
501 int ret, i;
502
503 for (i = 0; i < kinfo->num_tqps; i++) {
504 queue = handle->kinfo.tqp[i];
505 tqp = container_of(queue, struct hclge_tqp, q);
506 /* command : HCLGE_OPC_QUERY_IGU_STAT */
507 hclge_cmd_setup_basic_desc(&desc[0],
508 HCLGE_OPC_QUERY_RX_STATUS,
509 true);
510
511 desc[0].data[0] = (tqp->index & 0x1ff);
512 ret = hclge_cmd_send(&hdev->hw, desc, 1);
513 if (ret) {
514 dev_err(&hdev->pdev->dev,
515 "Query tqp stat fail, status = %d,queue = %d\n",
516 ret, i);
517 return ret;
518 }
519 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
520 cpu_to_le32(desc[0].data[4]);
521 }
522
523 for (i = 0; i < kinfo->num_tqps; i++) {
524 queue = handle->kinfo.tqp[i];
525 tqp = container_of(queue, struct hclge_tqp, q);
526 /* command : HCLGE_OPC_QUERY_IGU_STAT */
527 hclge_cmd_setup_basic_desc(&desc[0],
528 HCLGE_OPC_QUERY_TX_STATUS,
529 true);
530
531 desc[0].data[0] = (tqp->index & 0x1ff);
532 ret = hclge_cmd_send(&hdev->hw, desc, 1);
533 if (ret) {
534 dev_err(&hdev->pdev->dev,
535 "Query tqp stat fail, status = %d,queue = %d\n",
536 ret, i);
537 return ret;
538 }
539 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
540 cpu_to_le32(desc[0].data[4]);
541 }
542
543 return 0;
544}
545
546static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
547{
548 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
549 struct hclge_tqp *tqp;
550 u64 *buff = data;
551 int i;
552
553 for (i = 0; i < kinfo->num_tqps; i++) {
554 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
555 *buff++ = cpu_to_le64(tqp->tqp_stats.rcb_tx_ring_pktnum_rcd);
556 }
557
558 for (i = 0; i < kinfo->num_tqps; i++) {
559 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
560 *buff++ = cpu_to_le64(tqp->tqp_stats.rcb_rx_ring_pktnum_rcd);
561 }
562
563 return buff;
564}
565
566static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
567{
568 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
569
570 return kinfo->num_tqps * (2);
571}
572
573static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
574{
575 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
576 u8 *buff = data;
577 int i = 0;
578
579 for (i = 0; i < kinfo->num_tqps; i++) {
580 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
581 struct hclge_tqp, q);
582 snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd",
583 tqp->index);
584 buff = buff + ETH_GSTRING_LEN;
585 }
586
587 for (i = 0; i < kinfo->num_tqps; i++) {
588 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
589 struct hclge_tqp, q);
590 snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd",
591 tqp->index);
592 buff = buff + ETH_GSTRING_LEN;
593 }
594
595 return buff;
596}
597
598static u64 *hclge_comm_get_stats(void *comm_stats,
599 const struct hclge_comm_stats_str strs[],
600 int size, u64 *data)
601{
602 u64 *buf = data;
603 u32 i;
604
605 for (i = 0; i < size; i++)
606 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
607
608 return buf + size;
609}
610
611static u8 *hclge_comm_get_strings(u32 stringset,
612 const struct hclge_comm_stats_str strs[],
613 int size, u8 *data)
614{
615 char *buff = (char *)data;
616 u32 i;
617
618 if (stringset != ETH_SS_STATS)
619 return buff;
620
621 for (i = 0; i < size; i++) {
622 snprintf(buff, ETH_GSTRING_LEN,
623 strs[i].desc);
624 buff = buff + ETH_GSTRING_LEN;
625 }
626
627 return (u8 *)buff;
628}
629
630static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
631 struct net_device_stats *net_stats)
632{
633 net_stats->tx_dropped = 0;
634 net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num;
635 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
636 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
637
638 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
639 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
640 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt;
641 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
642 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
643 net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
644
645 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
646 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
647
648 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
649 net_stats->rx_length_errors =
650 hw_stats->mac_stats.mac_rx_undersize_pkt_num;
651 net_stats->rx_length_errors +=
652 hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
653 net_stats->rx_over_errors =
654 hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
655}
656
657static void hclge_update_stats_for_all(struct hclge_dev *hdev)
658{
659 struct hnae3_handle *handle;
660 int status;
661
662 handle = &hdev->vport[0].nic;
663 if (handle->client) {
664 status = hclge_tqps_update_stats(handle);
665 if (status) {
666 dev_err(&hdev->pdev->dev,
667 "Update TQPS stats fail, status = %d.\n",
668 status);
669 }
670 }
671
672 status = hclge_mac_update_stats(hdev);
673 if (status)
674 dev_err(&hdev->pdev->dev,
675 "Update MAC stats fail, status = %d.\n", status);
676
677 status = hclge_32_bit_update_stats(hdev);
678 if (status)
679 dev_err(&hdev->pdev->dev,
680 "Update 32 bit stats fail, status = %d.\n",
681 status);
682
683 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
684}
685
686static void hclge_update_stats(struct hnae3_handle *handle,
687 struct net_device_stats *net_stats)
688{
689 struct hclge_vport *vport = hclge_get_vport(handle);
690 struct hclge_dev *hdev = vport->back;
691 struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
692 int status;
693
694 status = hclge_mac_update_stats(hdev);
695 if (status)
696 dev_err(&hdev->pdev->dev,
697 "Update MAC stats fail, status = %d.\n",
698 status);
699
700 status = hclge_32_bit_update_stats(hdev);
701 if (status)
702 dev_err(&hdev->pdev->dev,
703 "Update 32 bit stats fail, status = %d.\n",
704 status);
705
706 status = hclge_64_bit_update_stats(hdev);
707 if (status)
708 dev_err(&hdev->pdev->dev,
709 "Update 64 bit stats fail, status = %d.\n",
710 status);
711
712 status = hclge_tqps_update_stats(handle);
713 if (status)
714 dev_err(&hdev->pdev->dev,
715 "Update TQPS stats fail, status = %d.\n",
716 status);
717
718 hclge_update_netstat(hw_stats, net_stats);
719}
720
721static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
722{
723#define HCLGE_LOOPBACK_TEST_FLAGS 0x7
724
725 struct hclge_vport *vport = hclge_get_vport(handle);
726 struct hclge_dev *hdev = vport->back;
727 int count = 0;
728
729 /* Loopback test support rules:
730 * mac: only GE mode support
731 * serdes: all mac mode will support include GE/XGE/LGE/CGE
732 * phy: only support when phy device exist on board
733 */
734 if (stringset == ETH_SS_TEST) {
735 /* clear loopback bit flags at first */
736 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
737 if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
738 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
739 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
740 count += 1;
741 handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK;
742 } else {
743 count = -EOPNOTSUPP;
744 }
745 } else if (stringset == ETH_SS_STATS) {
746 count = ARRAY_SIZE(g_mac_stats_string) +
747 ARRAY_SIZE(g_all_32bit_stats_string) +
748 ARRAY_SIZE(g_all_64bit_stats_string) +
749 hclge_tqps_get_sset_count(handle, stringset);
750 }
751
752 return count;
753}
754
755static void hclge_get_strings(struct hnae3_handle *handle,
756 u32 stringset,
757 u8 *data)
758{
759 u8 *p = (char *)data;
760 int size;
761
762 if (stringset == ETH_SS_STATS) {
763 size = ARRAY_SIZE(g_mac_stats_string);
764 p = hclge_comm_get_strings(stringset,
765 g_mac_stats_string,
766 size,
767 p);
768 size = ARRAY_SIZE(g_all_32bit_stats_string);
769 p = hclge_comm_get_strings(stringset,
770 g_all_32bit_stats_string,
771 size,
772 p);
773 size = ARRAY_SIZE(g_all_64bit_stats_string);
774 p = hclge_comm_get_strings(stringset,
775 g_all_64bit_stats_string,
776 size,
777 p);
778 p = hclge_tqps_get_strings(handle, p);
779 } else if (stringset == ETH_SS_TEST) {
780 if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) {
781 memcpy(p,
782 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC],
783 ETH_GSTRING_LEN);
784 p += ETH_GSTRING_LEN;
785 }
786 if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) {
787 memcpy(p,
788 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES],
789 ETH_GSTRING_LEN);
790 p += ETH_GSTRING_LEN;
791 }
792 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
793 memcpy(p,
794 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY],
795 ETH_GSTRING_LEN);
796 p += ETH_GSTRING_LEN;
797 }
798 }
799}
800
801static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
802{
803 struct hclge_vport *vport = hclge_get_vport(handle);
804 struct hclge_dev *hdev = vport->back;
805 u64 *p;
806
807 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
808 g_mac_stats_string,
809 ARRAY_SIZE(g_mac_stats_string),
810 data);
811 p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats,
812 g_all_32bit_stats_string,
813 ARRAY_SIZE(g_all_32bit_stats_string),
814 p);
815 p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats,
816 g_all_64bit_stats_string,
817 ARRAY_SIZE(g_all_64bit_stats_string),
818 p);
819 p = hclge_tqps_get_stats(handle, p);
820}
821
822static int hclge_parse_func_status(struct hclge_dev *hdev,
823 struct hclge_func_status *status)
824{
825 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
826 return -EINVAL;
827
828 /* Set the pf to main pf */
829 if (status->pf_state & HCLGE_PF_STATE_MAIN)
830 hdev->flag |= HCLGE_FLAG_MAIN;
831 else
832 hdev->flag &= ~HCLGE_FLAG_MAIN;
833
834 hdev->num_req_vfs = status->vf_num / status->pf_num;
835 return 0;
836}
837
838static int hclge_query_function_status(struct hclge_dev *hdev)
839{
840 struct hclge_func_status *req;
841 struct hclge_desc desc;
842 int timeout = 0;
843 int ret;
844
845 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
846 req = (struct hclge_func_status *)desc.data;
847
848 do {
849 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
850 if (ret) {
851 dev_err(&hdev->pdev->dev,
852 "query function status failed %d.\n",
853 ret);
854
855 return ret;
856 }
857
858 /* Check pf reset is done */
859 if (req->pf_state)
860 break;
861 usleep_range(1000, 2000);
862 } while (timeout++ < 5);
863
864 ret = hclge_parse_func_status(hdev, req);
865
866 return ret;
867}
868
869static int hclge_query_pf_resource(struct hclge_dev *hdev)
870{
871 struct hclge_pf_res *req;
872 struct hclge_desc desc;
873 int ret;
874
875 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
876 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
877 if (ret) {
878 dev_err(&hdev->pdev->dev,
879 "query pf resource failed %d.\n", ret);
880 return ret;
881 }
882
883 req = (struct hclge_pf_res *)desc.data;
884 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
885 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
886
e92a0843 887 if (hnae3_dev_roce_supported(hdev)) {
46a3df9f
S
888 hdev->num_roce_msix =
889 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
890 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
891
892 /* PF should have NIC vectors and Roce vectors,
893 * NIC vectors are queued before Roce vectors.
894 */
895 hdev->num_msi = hdev->num_roce_msix + HCLGE_ROCE_VECTOR_OFFSET;
896 } else {
897 hdev->num_msi =
898 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
899 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
900 }
901
902 return 0;
903}
904
905static int hclge_parse_speed(int speed_cmd, int *speed)
906{
907 switch (speed_cmd) {
908 case 6:
909 *speed = HCLGE_MAC_SPEED_10M;
910 break;
911 case 7:
912 *speed = HCLGE_MAC_SPEED_100M;
913 break;
914 case 0:
915 *speed = HCLGE_MAC_SPEED_1G;
916 break;
917 case 1:
918 *speed = HCLGE_MAC_SPEED_10G;
919 break;
920 case 2:
921 *speed = HCLGE_MAC_SPEED_25G;
922 break;
923 case 3:
924 *speed = HCLGE_MAC_SPEED_40G;
925 break;
926 case 4:
927 *speed = HCLGE_MAC_SPEED_50G;
928 break;
929 case 5:
930 *speed = HCLGE_MAC_SPEED_100G;
931 break;
932 default:
933 return -EINVAL;
934 }
935
936 return 0;
937}
938
939static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
940{
941 struct hclge_cfg_param *req;
942 u64 mac_addr_tmp_high;
943 u64 mac_addr_tmp;
944 int i;
945
946 req = (struct hclge_cfg_param *)desc[0].data;
947
948 /* get the configuration */
949 cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]),
950 HCLGE_CFG_VMDQ_M,
951 HCLGE_CFG_VMDQ_S);
952 cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
953 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
954 cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
955 HCLGE_CFG_TQP_DESC_N_M,
956 HCLGE_CFG_TQP_DESC_N_S);
957
958 cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]),
959 HCLGE_CFG_PHY_ADDR_M,
960 HCLGE_CFG_PHY_ADDR_S);
961 cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]),
962 HCLGE_CFG_MEDIA_TP_M,
963 HCLGE_CFG_MEDIA_TP_S);
964 cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]),
965 HCLGE_CFG_RX_BUF_LEN_M,
966 HCLGE_CFG_RX_BUF_LEN_S);
967 /* get mac_address */
968 mac_addr_tmp = __le32_to_cpu(req->param[2]);
969 mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]),
970 HCLGE_CFG_MAC_ADDR_H_M,
971 HCLGE_CFG_MAC_ADDR_H_S);
972
973 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
974
975 cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
976 HCLGE_CFG_DEFAULT_SPEED_M,
977 HCLGE_CFG_DEFAULT_SPEED_S);
978 for (i = 0; i < ETH_ALEN; i++)
979 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
980
981 req = (struct hclge_cfg_param *)desc[1].data;
982 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
983}
984
985/* hclge_get_cfg: query the static parameter from flash
986 * @hdev: pointer to struct hclge_dev
987 * @hcfg: the config structure to be getted
988 */
989static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
990{
991 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
992 struct hclge_cfg_param *req;
993 int i, ret;
994
995 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
996 req = (struct hclge_cfg_param *)desc[i].data;
997 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
998 true);
999 hnae_set_field(req->offset, HCLGE_CFG_OFFSET_M,
1000 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1001 /* Len should be united by 4 bytes when send to hardware */
1002 hnae_set_field(req->offset, HCLGE_CFG_RD_LEN_M,
1003 HCLGE_CFG_RD_LEN_S,
1004 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1005 req->offset = cpu_to_le32(req->offset);
1006 }
1007
1008 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1009 if (ret) {
1010 dev_err(&hdev->pdev->dev,
1011 "get config failed %d.\n", ret);
1012 return ret;
1013 }
1014
1015 hclge_parse_cfg(hcfg, desc);
1016 return 0;
1017}
1018
1019static int hclge_get_cap(struct hclge_dev *hdev)
1020{
1021 int ret;
1022
1023 ret = hclge_query_function_status(hdev);
1024 if (ret) {
1025 dev_err(&hdev->pdev->dev,
1026 "query function status error %d.\n", ret);
1027 return ret;
1028 }
1029
1030 /* get pf resource */
1031 ret = hclge_query_pf_resource(hdev);
1032 if (ret) {
1033 dev_err(&hdev->pdev->dev,
1034 "query pf resource error %d.\n", ret);
1035 return ret;
1036 }
1037
1038 return 0;
1039}
1040
1041static int hclge_configure(struct hclge_dev *hdev)
1042{
1043 struct hclge_cfg cfg;
1044 int ret, i;
1045
1046 ret = hclge_get_cfg(hdev, &cfg);
1047 if (ret) {
1048 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1049 return ret;
1050 }
1051
1052 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1053 hdev->base_tqp_pid = 0;
1054 hdev->rss_size_max = 1;
1055 hdev->rx_buf_len = cfg.rx_buf_len;
fbbb1536 1056 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
46a3df9f 1057 hdev->hw.mac.media_type = cfg.media_type;
2a4776e1 1058 hdev->hw.mac.phy_addr = cfg.phy_addr;
46a3df9f
S
1059 hdev->num_desc = cfg.tqp_desc_num;
1060 hdev->tm_info.num_pg = 1;
1061 hdev->tm_info.num_tc = cfg.tc_num;
1062 hdev->tm_info.hw_pfc_map = 0;
1063
1064 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1065 if (ret) {
1066 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1067 return ret;
1068 }
1069
1070 if ((hdev->tm_info.num_tc > HNAE3_MAX_TC) ||
1071 (hdev->tm_info.num_tc < 1)) {
1072 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1073 hdev->tm_info.num_tc);
1074 hdev->tm_info.num_tc = 1;
1075 }
1076
1077 /* Currently not support uncontiuous tc */
1078 for (i = 0; i < cfg.tc_num; i++)
1079 hnae_set_bit(hdev->hw_tc_map, i, 1);
1080
1081 if (!hdev->num_vmdq_vport && !hdev->num_req_vfs)
1082 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1083 else
1084 hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE;
1085
1086 return ret;
1087}
1088
1089static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1090 int tso_mss_max)
1091{
1092 struct hclge_cfg_tso_status *req;
1093 struct hclge_desc desc;
1094
1095 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1096
1097 req = (struct hclge_cfg_tso_status *)desc.data;
1098 hnae_set_field(req->tso_mss_min, HCLGE_TSO_MSS_MIN_M,
1099 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1100 hnae_set_field(req->tso_mss_max, HCLGE_TSO_MSS_MIN_M,
1101 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1102
1103 return hclge_cmd_send(&hdev->hw, &desc, 1);
1104}
1105
1106static int hclge_alloc_tqps(struct hclge_dev *hdev)
1107{
1108 struct hclge_tqp *tqp;
1109 int i;
1110
1111 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1112 sizeof(struct hclge_tqp), GFP_KERNEL);
1113 if (!hdev->htqp)
1114 return -ENOMEM;
1115
1116 tqp = hdev->htqp;
1117
1118 for (i = 0; i < hdev->num_tqps; i++) {
1119 tqp->dev = &hdev->pdev->dev;
1120 tqp->index = i;
1121
1122 tqp->q.ae_algo = &ae_algo;
1123 tqp->q.buf_size = hdev->rx_buf_len;
1124 tqp->q.desc_num = hdev->num_desc;
1125 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1126 i * HCLGE_TQP_REG_SIZE;
1127
1128 tqp++;
1129 }
1130
1131 return 0;
1132}
1133
1134static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1135 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1136{
1137 struct hclge_tqp_map *req;
1138 struct hclge_desc desc;
1139 int ret;
1140
1141 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1142
1143 req = (struct hclge_tqp_map *)desc.data;
1144 req->tqp_id = cpu_to_le16(tqp_pid);
1145 req->tqp_vf = cpu_to_le16(func_id);
1146 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1147 1 << HCLGE_TQP_MAP_EN_B;
1148 req->tqp_vid = cpu_to_le16(tqp_vid);
1149
1150 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1151 if (ret) {
1152 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n",
1153 ret);
1154 return ret;
1155 }
1156
1157 return 0;
1158}
1159
1160static int hclge_assign_tqp(struct hclge_vport *vport,
1161 struct hnae3_queue **tqp, u16 num_tqps)
1162{
1163 struct hclge_dev *hdev = vport->back;
1164 int i, alloced, func_id, ret;
1165 bool is_pf;
1166
1167 func_id = vport->vport_id;
1168 is_pf = (vport->vport_id == 0) ? true : false;
1169
1170 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1171 alloced < num_tqps; i++) {
1172 if (!hdev->htqp[i].alloced) {
1173 hdev->htqp[i].q.handle = &vport->nic;
1174 hdev->htqp[i].q.tqp_index = alloced;
1175 tqp[alloced] = &hdev->htqp[i].q;
1176 hdev->htqp[i].alloced = true;
1177 ret = hclge_map_tqps_to_func(hdev, func_id,
1178 hdev->htqp[i].index,
1179 alloced, is_pf);
1180 if (ret)
1181 return ret;
1182
1183 alloced++;
1184 }
1185 }
1186 vport->alloc_tqps = num_tqps;
1187
1188 return 0;
1189}
1190
1191static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps)
1192{
1193 struct hnae3_handle *nic = &vport->nic;
1194 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1195 struct hclge_dev *hdev = vport->back;
1196 int i, ret;
1197
1198 kinfo->num_desc = hdev->num_desc;
1199 kinfo->rx_buf_len = hdev->rx_buf_len;
1200 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1201 kinfo->rss_size
1202 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1203 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
1204
1205 for (i = 0; i < HNAE3_MAX_TC; i++) {
1206 if (hdev->hw_tc_map & BIT(i)) {
1207 kinfo->tc_info[i].enable = true;
1208 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1209 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1210 kinfo->tc_info[i].tc = i;
1211 } else {
1212 /* Set to default queue if TC is disable */
1213 kinfo->tc_info[i].enable = false;
1214 kinfo->tc_info[i].tqp_offset = 0;
1215 kinfo->tc_info[i].tqp_count = 1;
1216 kinfo->tc_info[i].tc = 0;
1217 }
1218 }
1219
1220 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1221 sizeof(struct hnae3_queue *), GFP_KERNEL);
1222 if (!kinfo->tqp)
1223 return -ENOMEM;
1224
1225 ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps);
1226 if (ret) {
1227 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1228 return -EINVAL;
1229 }
1230
1231 return 0;
1232}
1233
1234static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1235{
1236 /* this would be initialized later */
1237}
1238
1239static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1240{
1241 struct hnae3_handle *nic = &vport->nic;
1242 struct hclge_dev *hdev = vport->back;
1243 int ret;
1244
1245 nic->pdev = hdev->pdev;
1246 nic->ae_algo = &ae_algo;
1247 nic->numa_node_mask = hdev->numa_node_mask;
1248
1249 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1250 ret = hclge_knic_setup(vport, num_tqps);
1251 if (ret) {
1252 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1253 ret);
1254 return ret;
1255 }
1256 } else {
1257 hclge_unic_setup(vport, num_tqps);
1258 }
1259
1260 return 0;
1261}
1262
1263static int hclge_alloc_vport(struct hclge_dev *hdev)
1264{
1265 struct pci_dev *pdev = hdev->pdev;
1266 struct hclge_vport *vport;
1267 u32 tqp_main_vport;
1268 u32 tqp_per_vport;
1269 int num_vport, i;
1270 int ret;
1271
1272 /* We need to alloc a vport for main NIC of PF */
1273 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1274
1275 if (hdev->num_tqps < num_vport)
1276 num_vport = hdev->num_tqps;
1277
1278 /* Alloc the same number of TQPs for every vport */
1279 tqp_per_vport = hdev->num_tqps / num_vport;
1280 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1281
1282 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1283 GFP_KERNEL);
1284 if (!vport)
1285 return -ENOMEM;
1286
1287 hdev->vport = vport;
1288 hdev->num_alloc_vport = num_vport;
1289
1290#ifdef CONFIG_PCI_IOV
1291 /* Enable SRIOV */
1292 if (hdev->num_req_vfs) {
1293 dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n",
1294 hdev->num_req_vfs);
1295 ret = pci_enable_sriov(hdev->pdev, hdev->num_req_vfs);
1296 if (ret) {
1297 hdev->num_alloc_vfs = 0;
1298 dev_err(&pdev->dev, "SRIOV enable failed %d\n",
1299 ret);
1300 return ret;
1301 }
1302 }
1303 hdev->num_alloc_vfs = hdev->num_req_vfs;
1304#endif
1305
1306 for (i = 0; i < num_vport; i++) {
1307 vport->back = hdev;
1308 vport->vport_id = i;
1309
1310 if (i == 0)
1311 ret = hclge_vport_setup(vport, tqp_main_vport);
1312 else
1313 ret = hclge_vport_setup(vport, tqp_per_vport);
1314 if (ret) {
1315 dev_err(&pdev->dev,
1316 "vport setup failed for vport %d, %d\n",
1317 i, ret);
1318 return ret;
1319 }
1320
1321 vport++;
1322 }
1323
1324 return 0;
1325}
1326
9ffe79a9 1327static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev)
46a3df9f
S
1328{
1329/* TX buffer size is unit by 128 byte */
1330#define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1331#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1332 struct hclge_tx_buff_alloc *req;
1333 struct hclge_desc desc;
1334 int ret;
1335 u8 i;
1336
1337 req = (struct hclge_tx_buff_alloc *)desc.data;
1338
1339 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
9ffe79a9
YL
1340 for (i = 0; i < HCLGE_TC_NUM; i++) {
1341 u32 buf_size = hdev->priv_buf[i].tx_buf_size;
1342
46a3df9f
S
1343 req->tx_pkt_buff[i] =
1344 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1345 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
9ffe79a9 1346 }
46a3df9f
S
1347
1348 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1349 if (ret) {
1350 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1351 ret);
1352 return ret;
1353 }
1354
1355 return 0;
1356}
1357
9ffe79a9 1358static int hclge_tx_buffer_alloc(struct hclge_dev *hdev)
46a3df9f 1359{
9ffe79a9 1360 int ret = hclge_cmd_alloc_tx_buff(hdev);
46a3df9f
S
1361
1362 if (ret) {
1363 dev_err(&hdev->pdev->dev,
1364 "tx buffer alloc failed %d\n", ret);
1365 return ret;
1366 }
1367
1368 return 0;
1369}
1370
1371static int hclge_get_tc_num(struct hclge_dev *hdev)
1372{
1373 int i, cnt = 0;
1374
1375 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1376 if (hdev->hw_tc_map & BIT(i))
1377 cnt++;
1378 return cnt;
1379}
1380
1381static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1382{
1383 int i, cnt = 0;
1384
1385 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1386 if (hdev->hw_tc_map & BIT(i) &&
1387 hdev->tm_info.hw_pfc_map & BIT(i))
1388 cnt++;
1389 return cnt;
1390}
1391
1392/* Get the number of pfc enabled TCs, which have private buffer */
1393static int hclge_get_pfc_priv_num(struct hclge_dev *hdev)
1394{
1395 struct hclge_priv_buf *priv;
1396 int i, cnt = 0;
1397
1398 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1399 priv = &hdev->priv_buf[i];
1400 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1401 priv->enable)
1402 cnt++;
1403 }
1404
1405 return cnt;
1406}
1407
1408/* Get the number of pfc disabled TCs, which have private buffer */
1409static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev)
1410{
1411 struct hclge_priv_buf *priv;
1412 int i, cnt = 0;
1413
1414 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1415 priv = &hdev->priv_buf[i];
1416 if (hdev->hw_tc_map & BIT(i) &&
1417 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1418 priv->enable)
1419 cnt++;
1420 }
1421
1422 return cnt;
1423}
1424
1425static u32 hclge_get_rx_priv_buff_alloced(struct hclge_dev *hdev)
1426{
1427 struct hclge_priv_buf *priv;
1428 u32 rx_priv = 0;
1429 int i;
1430
1431 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1432 priv = &hdev->priv_buf[i];
1433 if (priv->enable)
1434 rx_priv += priv->buf_size;
1435 }
1436 return rx_priv;
1437}
1438
9ffe79a9
YL
1439static u32 hclge_get_tx_buff_alloced(struct hclge_dev *hdev)
1440{
1441 u32 i, total_tx_size = 0;
1442
1443 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1444 total_tx_size += hdev->priv_buf[i].tx_buf_size;
1445
1446 return total_tx_size;
1447}
1448
46a3df9f
S
1449static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all)
1450{
1451 u32 shared_buf_min, shared_buf_tc, shared_std;
1452 int tc_num, pfc_enable_num;
1453 u32 shared_buf;
1454 u32 rx_priv;
1455 int i;
1456
1457 tc_num = hclge_get_tc_num(hdev);
1458 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1459
d221df4e
YL
1460 if (hnae3_dev_dcb_supported(hdev))
1461 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1462 else
1463 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
1464
46a3df9f
S
1465 shared_buf_tc = pfc_enable_num * hdev->mps +
1466 (tc_num - pfc_enable_num) * hdev->mps / 2 +
1467 hdev->mps;
1468 shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1469
1470 rx_priv = hclge_get_rx_priv_buff_alloced(hdev);
1471 if (rx_all <= rx_priv + shared_std)
1472 return false;
1473
1474 shared_buf = rx_all - rx_priv;
1475 hdev->s_buf.buf_size = shared_buf;
1476 hdev->s_buf.self.high = shared_buf;
1477 hdev->s_buf.self.low = 2 * hdev->mps;
1478
1479 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1480 if ((hdev->hw_tc_map & BIT(i)) &&
1481 (hdev->tm_info.hw_pfc_map & BIT(i))) {
1482 hdev->s_buf.tc_thrd[i].low = hdev->mps;
1483 hdev->s_buf.tc_thrd[i].high = 2 * hdev->mps;
1484 } else {
1485 hdev->s_buf.tc_thrd[i].low = 0;
1486 hdev->s_buf.tc_thrd[i].high = hdev->mps;
1487 }
1488 }
1489
1490 return true;
1491}
1492
9ffe79a9
YL
1493static int hclge_tx_buffer_calc(struct hclge_dev *hdev)
1494{
1495 u32 i, total_size;
1496
1497 total_size = hdev->pkt_buf_size;
1498
1499 /* alloc tx buffer for all enabled tc */
1500 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1501 struct hclge_priv_buf *priv = &hdev->priv_buf[i];
1502
1503 if (total_size < HCLGE_DEFAULT_TX_BUF)
1504 return -ENOMEM;
1505
1506 if (hdev->hw_tc_map & BIT(i))
1507 priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1508 else
1509 priv->tx_buf_size = 0;
1510
1511 total_size -= priv->tx_buf_size;
1512 }
1513
1514 return 0;
1515}
1516
46a3df9f
S
1517/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1518 * @hdev: pointer to struct hclge_dev
46a3df9f
S
1519 * @return: 0: calculate sucessful, negative: fail
1520 */
9ffe79a9 1521int hclge_rx_buffer_calc(struct hclge_dev *hdev)
46a3df9f 1522{
9ffe79a9 1523 u32 rx_all = hdev->pkt_buf_size;
46a3df9f
S
1524 int no_pfc_priv_num, pfc_priv_num;
1525 struct hclge_priv_buf *priv;
1526 int i;
1527
9ffe79a9
YL
1528 rx_all -= hclge_get_tx_buff_alloced(hdev);
1529
d602a525
YL
1530 /* When DCB is not supported, rx private
1531 * buffer is not allocated.
1532 */
1533 if (!hnae3_dev_dcb_supported(hdev)) {
1534 if (!hclge_is_rx_buf_ok(hdev, rx_all))
1535 return -ENOMEM;
1536
1537 return 0;
1538 }
1539
46a3df9f
S
1540 /* step 1, try to alloc private buffer for all enabled tc */
1541 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1542 priv = &hdev->priv_buf[i];
1543 if (hdev->hw_tc_map & BIT(i)) {
1544 priv->enable = 1;
1545 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1546 priv->wl.low = hdev->mps;
1547 priv->wl.high = priv->wl.low + hdev->mps;
1548 priv->buf_size = priv->wl.high +
1549 HCLGE_DEFAULT_DV;
1550 } else {
1551 priv->wl.low = 0;
1552 priv->wl.high = 2 * hdev->mps;
1553 priv->buf_size = priv->wl.high;
1554 }
bb1fe9ea
YL
1555 } else {
1556 priv->enable = 0;
1557 priv->wl.low = 0;
1558 priv->wl.high = 0;
1559 priv->buf_size = 0;
46a3df9f
S
1560 }
1561 }
1562
1563 if (hclge_is_rx_buf_ok(hdev, rx_all))
1564 return 0;
1565
1566 /* step 2, try to decrease the buffer size of
1567 * no pfc TC's private buffer
1568 */
1569 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1570 priv = &hdev->priv_buf[i];
1571
bb1fe9ea
YL
1572 priv->enable = 0;
1573 priv->wl.low = 0;
1574 priv->wl.high = 0;
1575 priv->buf_size = 0;
1576
1577 if (!(hdev->hw_tc_map & BIT(i)))
1578 continue;
1579
1580 priv->enable = 1;
46a3df9f
S
1581
1582 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1583 priv->wl.low = 128;
1584 priv->wl.high = priv->wl.low + hdev->mps;
1585 priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
1586 } else {
1587 priv->wl.low = 0;
1588 priv->wl.high = hdev->mps;
1589 priv->buf_size = priv->wl.high;
1590 }
1591 }
1592
1593 if (hclge_is_rx_buf_ok(hdev, rx_all))
1594 return 0;
1595
1596 /* step 3, try to reduce the number of pfc disabled TCs,
1597 * which have private buffer
1598 */
1599 /* get the total no pfc enable TC number, which have private buffer */
1600 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev);
1601
1602 /* let the last to be cleared first */
1603 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1604 priv = &hdev->priv_buf[i];
1605
1606 if (hdev->hw_tc_map & BIT(i) &&
1607 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1608 /* Clear the no pfc TC private buffer */
1609 priv->wl.low = 0;
1610 priv->wl.high = 0;
1611 priv->buf_size = 0;
1612 priv->enable = 0;
1613 no_pfc_priv_num--;
1614 }
1615
1616 if (hclge_is_rx_buf_ok(hdev, rx_all) ||
1617 no_pfc_priv_num == 0)
1618 break;
1619 }
1620
1621 if (hclge_is_rx_buf_ok(hdev, rx_all))
1622 return 0;
1623
1624 /* step 4, try to reduce the number of pfc enabled TCs
1625 * which have private buffer.
1626 */
1627 pfc_priv_num = hclge_get_pfc_priv_num(hdev);
1628
1629 /* let the last to be cleared first */
1630 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1631 priv = &hdev->priv_buf[i];
1632
1633 if (hdev->hw_tc_map & BIT(i) &&
1634 hdev->tm_info.hw_pfc_map & BIT(i)) {
1635 /* Reduce the number of pfc TC with private buffer */
1636 priv->wl.low = 0;
1637 priv->enable = 0;
1638 priv->wl.high = 0;
1639 priv->buf_size = 0;
1640 pfc_priv_num--;
1641 }
1642
1643 if (hclge_is_rx_buf_ok(hdev, rx_all) ||
1644 pfc_priv_num == 0)
1645 break;
1646 }
1647 if (hclge_is_rx_buf_ok(hdev, rx_all))
1648 return 0;
1649
1650 return -ENOMEM;
1651}
1652
1653static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev)
1654{
1655 struct hclge_rx_priv_buff *req;
1656 struct hclge_desc desc;
1657 int ret;
1658 int i;
1659
1660 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1661 req = (struct hclge_rx_priv_buff *)desc.data;
1662
1663 /* Alloc private buffer TCs */
1664 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1665 struct hclge_priv_buf *priv = &hdev->priv_buf[i];
1666
1667 req->buf_num[i] =
1668 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1669 req->buf_num[i] |=
1670 cpu_to_le16(true << HCLGE_TC0_PRI_BUF_EN_B);
1671 }
1672
b8c8bf47
YL
1673 req->shared_buf =
1674 cpu_to_le16((hdev->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1675 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1676
46a3df9f
S
1677 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1678 if (ret) {
1679 dev_err(&hdev->pdev->dev,
1680 "rx private buffer alloc cmd failed %d\n", ret);
1681 return ret;
1682 }
1683
1684 return 0;
1685}
1686
1687#define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
1688
1689static int hclge_rx_priv_wl_config(struct hclge_dev *hdev)
1690{
1691 struct hclge_rx_priv_wl_buf *req;
1692 struct hclge_priv_buf *priv;
1693 struct hclge_desc desc[2];
1694 int i, j;
1695 int ret;
1696
1697 for (i = 0; i < 2; i++) {
1698 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1699 false);
1700 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1701
1702 /* The first descriptor set the NEXT bit to 1 */
1703 if (i == 0)
1704 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1705 else
1706 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1707
1708 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1709 priv = &hdev->priv_buf[i * HCLGE_TC_NUM_ONE_DESC + j];
1710 req->tc_wl[j].high =
1711 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1712 req->tc_wl[j].high |=
1713 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) <<
1714 HCLGE_RX_PRIV_EN_B);
1715 req->tc_wl[j].low =
1716 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1717 req->tc_wl[j].low |=
1718 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) <<
1719 HCLGE_RX_PRIV_EN_B);
1720 }
1721 }
1722
1723 /* Send 2 descriptor at one time */
1724 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1725 if (ret) {
1726 dev_err(&hdev->pdev->dev,
1727 "rx private waterline config cmd failed %d\n",
1728 ret);
1729 return ret;
1730 }
1731 return 0;
1732}
1733
1734static int hclge_common_thrd_config(struct hclge_dev *hdev)
1735{
1736 struct hclge_shared_buf *s_buf = &hdev->s_buf;
1737 struct hclge_rx_com_thrd *req;
1738 struct hclge_desc desc[2];
1739 struct hclge_tc_thrd *tc;
1740 int i, j;
1741 int ret;
1742
1743 for (i = 0; i < 2; i++) {
1744 hclge_cmd_setup_basic_desc(&desc[i],
1745 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1746 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1747
1748 /* The first descriptor set the NEXT bit to 1 */
1749 if (i == 0)
1750 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1751 else
1752 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1753
1754 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1755 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1756
1757 req->com_thrd[j].high =
1758 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1759 req->com_thrd[j].high |=
1760 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) <<
1761 HCLGE_RX_PRIV_EN_B);
1762 req->com_thrd[j].low =
1763 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1764 req->com_thrd[j].low |=
1765 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) <<
1766 HCLGE_RX_PRIV_EN_B);
1767 }
1768 }
1769
1770 /* Send 2 descriptors at one time */
1771 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1772 if (ret) {
1773 dev_err(&hdev->pdev->dev,
1774 "common threshold config cmd failed %d\n", ret);
1775 return ret;
1776 }
1777 return 0;
1778}
1779
1780static int hclge_common_wl_config(struct hclge_dev *hdev)
1781{
1782 struct hclge_shared_buf *buf = &hdev->s_buf;
1783 struct hclge_rx_com_wl *req;
1784 struct hclge_desc desc;
1785 int ret;
1786
1787 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1788
1789 req = (struct hclge_rx_com_wl *)desc.data;
1790 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1791 req->com_wl.high |=
1792 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) <<
1793 HCLGE_RX_PRIV_EN_B);
1794
1795 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1796 req->com_wl.low |=
1797 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) <<
1798 HCLGE_RX_PRIV_EN_B);
1799
1800 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1801 if (ret) {
1802 dev_err(&hdev->pdev->dev,
1803 "common waterline config cmd failed %d\n", ret);
1804 return ret;
1805 }
1806
1807 return 0;
1808}
1809
1810int hclge_buffer_alloc(struct hclge_dev *hdev)
1811{
46a3df9f
S
1812 int ret;
1813
1814 hdev->priv_buf = devm_kmalloc_array(&hdev->pdev->dev, HCLGE_MAX_TC_NUM,
1815 sizeof(struct hclge_priv_buf),
1816 GFP_KERNEL | __GFP_ZERO);
1817 if (!hdev->priv_buf)
1818 return -ENOMEM;
1819
9ffe79a9
YL
1820 ret = hclge_tx_buffer_calc(hdev);
1821 if (ret) {
1822 dev_err(&hdev->pdev->dev,
1823 "could not calc tx buffer size for all TCs %d\n", ret);
1824 return ret;
1825 }
1826
1827 ret = hclge_tx_buffer_alloc(hdev);
46a3df9f
S
1828 if (ret) {
1829 dev_err(&hdev->pdev->dev,
1830 "could not alloc tx buffers %d\n", ret);
1831 return ret;
1832 }
1833
9ffe79a9 1834 ret = hclge_rx_buffer_calc(hdev);
46a3df9f
S
1835 if (ret) {
1836 dev_err(&hdev->pdev->dev,
1837 "could not calc rx priv buffer size for all TCs %d\n",
1838 ret);
1839 return ret;
1840 }
1841
1842 ret = hclge_rx_priv_buf_alloc(hdev);
1843 if (ret) {
1844 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1845 ret);
1846 return ret;
1847 }
1848
2daf4a65
YL
1849 if (hnae3_dev_dcb_supported(hdev)) {
1850 ret = hclge_rx_priv_wl_config(hdev);
1851 if (ret) {
1852 dev_err(&hdev->pdev->dev,
1853 "could not configure rx private waterline %d\n",
1854 ret);
1855 return ret;
1856 }
46a3df9f 1857
2daf4a65
YL
1858 ret = hclge_common_thrd_config(hdev);
1859 if (ret) {
1860 dev_err(&hdev->pdev->dev,
1861 "could not configure common threshold %d\n",
1862 ret);
1863 return ret;
1864 }
46a3df9f
S
1865 }
1866
1867 ret = hclge_common_wl_config(hdev);
1868 if (ret) {
1869 dev_err(&hdev->pdev->dev,
1870 "could not configure common waterline %d\n", ret);
1871 return ret;
1872 }
1873
1874 return 0;
1875}
1876
1877static int hclge_init_roce_base_info(struct hclge_vport *vport)
1878{
1879 struct hnae3_handle *roce = &vport->roce;
1880 struct hnae3_handle *nic = &vport->nic;
1881
1882 roce->rinfo.num_vectors = vport->back->num_roce_msix;
1883
1884 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1885 vport->back->num_msi_left == 0)
1886 return -EINVAL;
1887
1888 roce->rinfo.base_vector = vport->back->roce_base_vector;
1889
1890 roce->rinfo.netdev = nic->kinfo.netdev;
1891 roce->rinfo.roce_io_base = vport->back->hw.io_base;
1892
1893 roce->pdev = nic->pdev;
1894 roce->ae_algo = nic->ae_algo;
1895 roce->numa_node_mask = nic->numa_node_mask;
1896
1897 return 0;
1898}
1899
1900static int hclge_init_msix(struct hclge_dev *hdev)
1901{
1902 struct pci_dev *pdev = hdev->pdev;
1903 int ret, i;
1904
1905 hdev->msix_entries = devm_kcalloc(&pdev->dev, hdev->num_msi,
1906 sizeof(struct msix_entry),
1907 GFP_KERNEL);
1908 if (!hdev->msix_entries)
1909 return -ENOMEM;
1910
1911 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1912 sizeof(u16), GFP_KERNEL);
1913 if (!hdev->vector_status)
1914 return -ENOMEM;
1915
1916 for (i = 0; i < hdev->num_msi; i++) {
1917 hdev->msix_entries[i].entry = i;
1918 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1919 }
1920
1921 hdev->num_msi_left = hdev->num_msi;
1922 hdev->base_msi_vector = hdev->pdev->irq;
1923 hdev->roce_base_vector = hdev->base_msi_vector +
1924 HCLGE_ROCE_VECTOR_OFFSET;
1925
1926 ret = pci_enable_msix_range(hdev->pdev, hdev->msix_entries,
1927 hdev->num_msi, hdev->num_msi);
1928 if (ret < 0) {
1929 dev_info(&hdev->pdev->dev,
1930 "MSI-X vector alloc failed: %d\n", ret);
1931 return ret;
1932 }
1933
1934 return 0;
1935}
1936
1937static int hclge_init_msi(struct hclge_dev *hdev)
1938{
1939 struct pci_dev *pdev = hdev->pdev;
1940 int vectors;
1941 int i;
1942
1943 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1944 sizeof(u16), GFP_KERNEL);
1945 if (!hdev->vector_status)
1946 return -ENOMEM;
1947
1948 for (i = 0; i < hdev->num_msi; i++)
1949 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1950
1951 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, PCI_IRQ_MSI);
1952 if (vectors < 0) {
1953 dev_err(&pdev->dev, "MSI vectors enable failed %d\n", vectors);
1954 return -EINVAL;
1955 }
1956 hdev->num_msi = vectors;
1957 hdev->num_msi_left = vectors;
1958 hdev->base_msi_vector = pdev->irq;
1959 hdev->roce_base_vector = hdev->base_msi_vector +
1960 HCLGE_ROCE_VECTOR_OFFSET;
1961
1962 return 0;
1963}
1964
1965static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed)
1966{
1967 struct hclge_mac *mac = &hdev->hw.mac;
1968
1969 if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M))
1970 mac->duplex = (u8)duplex;
1971 else
1972 mac->duplex = HCLGE_MAC_FULL;
1973
1974 mac->speed = speed;
1975}
1976
1977int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
1978{
1979 struct hclge_config_mac_speed_dup *req;
1980 struct hclge_desc desc;
1981 int ret;
1982
1983 req = (struct hclge_config_mac_speed_dup *)desc.data;
1984
1985 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
1986
1987 hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
1988
1989 switch (speed) {
1990 case HCLGE_MAC_SPEED_10M:
1991 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1992 HCLGE_CFG_SPEED_S, 6);
1993 break;
1994 case HCLGE_MAC_SPEED_100M:
1995 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1996 HCLGE_CFG_SPEED_S, 7);
1997 break;
1998 case HCLGE_MAC_SPEED_1G:
1999 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2000 HCLGE_CFG_SPEED_S, 0);
2001 break;
2002 case HCLGE_MAC_SPEED_10G:
2003 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2004 HCLGE_CFG_SPEED_S, 1);
2005 break;
2006 case HCLGE_MAC_SPEED_25G:
2007 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2008 HCLGE_CFG_SPEED_S, 2);
2009 break;
2010 case HCLGE_MAC_SPEED_40G:
2011 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2012 HCLGE_CFG_SPEED_S, 3);
2013 break;
2014 case HCLGE_MAC_SPEED_50G:
2015 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2016 HCLGE_CFG_SPEED_S, 4);
2017 break;
2018 case HCLGE_MAC_SPEED_100G:
2019 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2020 HCLGE_CFG_SPEED_S, 5);
2021 break;
2022 default:
d7629e74 2023 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
46a3df9f
S
2024 return -EINVAL;
2025 }
2026
2027 hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2028 1);
2029
2030 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2031 if (ret) {
2032 dev_err(&hdev->pdev->dev,
2033 "mac speed/duplex config cmd failed %d.\n", ret);
2034 return ret;
2035 }
2036
2037 hclge_check_speed_dup(hdev, duplex, speed);
2038
2039 return 0;
2040}
2041
2042static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2043 u8 duplex)
2044{
2045 struct hclge_vport *vport = hclge_get_vport(handle);
2046 struct hclge_dev *hdev = vport->back;
2047
2048 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2049}
2050
2051static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
2052 u8 *duplex)
2053{
2054 struct hclge_query_an_speed_dup *req;
2055 struct hclge_desc desc;
2056 int speed_tmp;
2057 int ret;
2058
2059 req = (struct hclge_query_an_speed_dup *)desc.data;
2060
2061 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
2062 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2063 if (ret) {
2064 dev_err(&hdev->pdev->dev,
2065 "mac speed/autoneg/duplex query cmd failed %d\n",
2066 ret);
2067 return ret;
2068 }
2069
2070 *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
2071 speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
2072 HCLGE_QUERY_SPEED_S);
2073
2074 ret = hclge_parse_speed(speed_tmp, speed);
2075 if (ret) {
2076 dev_err(&hdev->pdev->dev,
2077 "could not parse speed(=%d), %d\n", speed_tmp, ret);
2078 return -EIO;
2079 }
2080
2081 return 0;
2082}
2083
2084static int hclge_query_autoneg_result(struct hclge_dev *hdev)
2085{
2086 struct hclge_mac *mac = &hdev->hw.mac;
2087 struct hclge_query_an_speed_dup *req;
2088 struct hclge_desc desc;
2089 int ret;
2090
2091 req = (struct hclge_query_an_speed_dup *)desc.data;
2092
2093 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
2094 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2095 if (ret) {
2096 dev_err(&hdev->pdev->dev,
2097 "autoneg result query cmd failed %d.\n", ret);
2098 return ret;
2099 }
2100
2101 mac->autoneg = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_AN_B);
2102
2103 return 0;
2104}
2105
2106static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2107{
2108 struct hclge_config_auto_neg *req;
2109 struct hclge_desc desc;
2110 int ret;
2111
2112 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2113
2114 req = (struct hclge_config_auto_neg *)desc.data;
2115 hnae_set_bit(req->cfg_an_cmd_flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2116
2117 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2118 if (ret) {
2119 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2120 ret);
2121 return ret;
2122 }
2123
2124 return 0;
2125}
2126
2127static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2128{
2129 struct hclge_vport *vport = hclge_get_vport(handle);
2130 struct hclge_dev *hdev = vport->back;
2131
2132 return hclge_set_autoneg_en(hdev, enable);
2133}
2134
2135static int hclge_get_autoneg(struct hnae3_handle *handle)
2136{
2137 struct hclge_vport *vport = hclge_get_vport(handle);
2138 struct hclge_dev *hdev = vport->back;
2139
2140 hclge_query_autoneg_result(hdev);
2141
2142 return hdev->hw.mac.autoneg;
2143}
2144
2145static int hclge_mac_init(struct hclge_dev *hdev)
2146{
2147 struct hclge_mac *mac = &hdev->hw.mac;
2148 int ret;
2149
2150 ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
2151 if (ret) {
2152 dev_err(&hdev->pdev->dev,
2153 "Config mac speed dup fail ret=%d\n", ret);
2154 return ret;
2155 }
2156
2157 mac->link = 0;
2158
2159 ret = hclge_mac_mdio_config(hdev);
2160 if (ret) {
2161 dev_warn(&hdev->pdev->dev,
2162 "mdio config fail ret=%d\n", ret);
2163 return ret;
2164 }
2165
2166 /* Initialize the MTA table work mode */
2167 hdev->accept_mta_mc = true;
2168 hdev->enable_mta = true;
2169 hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36;
2170
2171 ret = hclge_set_mta_filter_mode(hdev,
2172 hdev->mta_mac_sel_type,
2173 hdev->enable_mta);
2174 if (ret) {
2175 dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n",
2176 ret);
2177 return ret;
2178 }
2179
2180 return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
2181}
2182
2183static void hclge_task_schedule(struct hclge_dev *hdev)
2184{
2185 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2186 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2187 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2188 (void)schedule_work(&hdev->service_task);
2189}
2190
2191static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2192{
2193 struct hclge_link_status *req;
2194 struct hclge_desc desc;
2195 int link_status;
2196 int ret;
2197
2198 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2199 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2200 if (ret) {
2201 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2202 ret);
2203 return ret;
2204 }
2205
2206 req = (struct hclge_link_status *)desc.data;
2207 link_status = req->status & HCLGE_LINK_STATUS;
2208
2209 return !!link_status;
2210}
2211
2212static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2213{
2214 int mac_state;
2215 int link_stat;
2216
2217 mac_state = hclge_get_mac_link_status(hdev);
2218
2219 if (hdev->hw.mac.phydev) {
2220 if (!genphy_read_status(hdev->hw.mac.phydev))
2221 link_stat = mac_state &
2222 hdev->hw.mac.phydev->link;
2223 else
2224 link_stat = 0;
2225
2226 } else {
2227 link_stat = mac_state;
2228 }
2229
2230 return !!link_stat;
2231}
2232
2233static void hclge_update_link_status(struct hclge_dev *hdev)
2234{
2235 struct hnae3_client *client = hdev->nic_client;
2236 struct hnae3_handle *handle;
2237 int state;
2238 int i;
2239
2240 if (!client)
2241 return;
2242 state = hclge_get_mac_phy_link(hdev);
2243 if (state != hdev->hw.mac.link) {
2244 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2245 handle = &hdev->vport[i].nic;
2246 client->ops->link_status_change(handle, state);
2247 }
2248 hdev->hw.mac.link = state;
2249 }
2250}
2251
2252static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2253{
2254 struct hclge_mac mac = hdev->hw.mac;
2255 u8 duplex;
2256 int speed;
2257 int ret;
2258
2259 /* get the speed and duplex as autoneg'result from mac cmd when phy
2260 * doesn't exit.
2261 */
2262 if (mac.phydev)
2263 return 0;
2264
2265 /* update mac->antoneg. */
2266 ret = hclge_query_autoneg_result(hdev);
2267 if (ret) {
2268 dev_err(&hdev->pdev->dev,
2269 "autoneg result query failed %d\n", ret);
2270 return ret;
2271 }
2272
2273 if (!mac.autoneg)
2274 return 0;
2275
2276 ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex);
2277 if (ret) {
2278 dev_err(&hdev->pdev->dev,
2279 "mac autoneg/speed/duplex query failed %d\n", ret);
2280 return ret;
2281 }
2282
2283 if ((mac.speed != speed) || (mac.duplex != duplex)) {
2284 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2285 if (ret) {
2286 dev_err(&hdev->pdev->dev,
2287 "mac speed/duplex config failed %d\n", ret);
2288 return ret;
2289 }
2290 }
2291
2292 return 0;
2293}
2294
2295static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2296{
2297 struct hclge_vport *vport = hclge_get_vport(handle);
2298 struct hclge_dev *hdev = vport->back;
2299
2300 return hclge_update_speed_duplex(hdev);
2301}
2302
2303static int hclge_get_status(struct hnae3_handle *handle)
2304{
2305 struct hclge_vport *vport = hclge_get_vport(handle);
2306 struct hclge_dev *hdev = vport->back;
2307
2308 hclge_update_link_status(hdev);
2309
2310 return hdev->hw.mac.link;
2311}
2312
2313static void hclge_service_timer(unsigned long data)
2314{
2315 struct hclge_dev *hdev = (struct hclge_dev *)data;
2316 (void)mod_timer(&hdev->service_timer, jiffies + HZ);
2317
2318 hclge_task_schedule(hdev);
2319}
2320
2321static void hclge_service_complete(struct hclge_dev *hdev)
2322{
2323 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2324
2325 /* Flush memory before next watchdog */
2326 smp_mb__before_atomic();
2327 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2328}
2329
2330static void hclge_service_task(struct work_struct *work)
2331{
2332 struct hclge_dev *hdev =
2333 container_of(work, struct hclge_dev, service_task);
2334
2335 hclge_update_speed_duplex(hdev);
2336 hclge_update_link_status(hdev);
2337 hclge_update_stats_for_all(hdev);
2338 hclge_service_complete(hdev);
2339}
2340
2341static void hclge_disable_sriov(struct hclge_dev *hdev)
2342{
2a32ca13
AB
2343 /* If our VFs are assigned we cannot shut down SR-IOV
2344 * without causing issues, so just leave the hardware
2345 * available but disabled
2346 */
2347 if (pci_vfs_assigned(hdev->pdev)) {
2348 dev_warn(&hdev->pdev->dev,
2349 "disabling driver while VFs are assigned\n");
2350 return;
2351 }
46a3df9f 2352
2a32ca13 2353 pci_disable_sriov(hdev->pdev);
46a3df9f
S
2354}
2355
2356struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
2357{
2358 /* VF handle has no client */
2359 if (!handle->client)
2360 return container_of(handle, struct hclge_vport, nic);
2361 else if (handle->client->type == HNAE3_CLIENT_ROCE)
2362 return container_of(handle, struct hclge_vport, roce);
2363 else
2364 return container_of(handle, struct hclge_vport, nic);
2365}
2366
2367static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
2368 struct hnae3_vector_info *vector_info)
2369{
2370 struct hclge_vport *vport = hclge_get_vport(handle);
2371 struct hnae3_vector_info *vector = vector_info;
2372 struct hclge_dev *hdev = vport->back;
2373 int alloc = 0;
2374 int i, j;
2375
2376 vector_num = min(hdev->num_msi_left, vector_num);
2377
2378 for (j = 0; j < vector_num; j++) {
2379 for (i = 1; i < hdev->num_msi; i++) {
2380 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
2381 vector->vector = pci_irq_vector(hdev->pdev, i);
2382 vector->io_addr = hdev->hw.io_base +
2383 HCLGE_VECTOR_REG_BASE +
2384 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
2385 vport->vport_id *
2386 HCLGE_VECTOR_VF_OFFSET;
2387 hdev->vector_status[i] = vport->vport_id;
2388
2389 vector++;
2390 alloc++;
2391
2392 break;
2393 }
2394 }
2395 }
2396 hdev->num_msi_left -= alloc;
2397 hdev->num_msi_used += alloc;
2398
2399 return alloc;
2400}
2401
2402static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
2403{
2404 int i;
2405
2406 for (i = 0; i < hdev->num_msi; i++) {
2407 if (hdev->msix_entries) {
2408 if (vector == hdev->msix_entries[i].vector)
2409 return i;
2410 } else {
2411 if (vector == (hdev->base_msi_vector + i))
2412 return i;
2413 }
2414 }
2415 return -EINVAL;
2416}
2417
2418static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
2419{
2420 return HCLGE_RSS_KEY_SIZE;
2421}
2422
2423static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
2424{
2425 return HCLGE_RSS_IND_TBL_SIZE;
2426}
2427
2428static int hclge_get_rss_algo(struct hclge_dev *hdev)
2429{
2430 struct hclge_rss_config *req;
2431 struct hclge_desc desc;
2432 int rss_hash_algo;
2433 int ret;
2434
2435 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, true);
2436
2437 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2438 if (ret) {
2439 dev_err(&hdev->pdev->dev,
2440 "Get link status error, status =%d\n", ret);
2441 return ret;
2442 }
2443
2444 req = (struct hclge_rss_config *)desc.data;
2445 rss_hash_algo = (req->hash_config & HCLGE_RSS_HASH_ALGO_MASK);
2446
2447 if (rss_hash_algo == HCLGE_RSS_HASH_ALGO_TOEPLITZ)
2448 return ETH_RSS_HASH_TOP;
2449
2450 return -EINVAL;
2451}
2452
2453static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
2454 const u8 hfunc, const u8 *key)
2455{
2456 struct hclge_rss_config *req;
2457 struct hclge_desc desc;
2458 int key_offset;
2459 int key_size;
2460 int ret;
2461
2462 req = (struct hclge_rss_config *)desc.data;
2463
2464 for (key_offset = 0; key_offset < 3; key_offset++) {
2465 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
2466 false);
2467
2468 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
2469 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
2470
2471 if (key_offset == 2)
2472 key_size =
2473 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
2474 else
2475 key_size = HCLGE_RSS_HASH_KEY_NUM;
2476
2477 memcpy(req->hash_key,
2478 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
2479
2480 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2481 if (ret) {
2482 dev_err(&hdev->pdev->dev,
2483 "Configure RSS config fail, status = %d\n",
2484 ret);
2485 return ret;
2486 }
2487 }
2488 return 0;
2489}
2490
2491static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir)
2492{
2493 struct hclge_rss_indirection_table *req;
2494 struct hclge_desc desc;
2495 int i, j;
2496 int ret;
2497
2498 req = (struct hclge_rss_indirection_table *)desc.data;
2499
2500 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
2501 hclge_cmd_setup_basic_desc
2502 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
2503
2504 req->start_table_index = i * HCLGE_RSS_CFG_TBL_SIZE;
2505 req->rss_set_bitmap = HCLGE_RSS_SET_BITMAP_MSK;
2506
2507 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
2508 req->rss_result[j] =
2509 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
2510
2511 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2512 if (ret) {
2513 dev_err(&hdev->pdev->dev,
2514 "Configure rss indir table fail,status = %d\n",
2515 ret);
2516 return ret;
2517 }
2518 }
2519 return 0;
2520}
2521
2522static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
2523 u16 *tc_size, u16 *tc_offset)
2524{
2525 struct hclge_rss_tc_mode *req;
2526 struct hclge_desc desc;
2527 int ret;
2528 int i;
2529
2530 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
2531 req = (struct hclge_rss_tc_mode *)desc.data;
2532
2533 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2534 hnae_set_bit(req->rss_tc_mode[i], HCLGE_RSS_TC_VALID_B,
2535 (tc_valid[i] & 0x1));
2536 hnae_set_field(req->rss_tc_mode[i], HCLGE_RSS_TC_SIZE_M,
2537 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
2538 hnae_set_field(req->rss_tc_mode[i], HCLGE_RSS_TC_OFFSET_M,
2539 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
2540 }
2541
2542 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2543 if (ret) {
2544 dev_err(&hdev->pdev->dev,
2545 "Configure rss tc mode fail, status = %d\n", ret);
2546 return ret;
2547 }
2548
2549 return 0;
2550}
2551
2552static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
2553{
2554#define HCLGE_RSS_INPUT_TUPLE_OTHER 0xf
2555#define HCLGE_RSS_INPUT_TUPLE_SCTP 0x1f
2556 struct hclge_rss_input_tuple *req;
2557 struct hclge_desc desc;
2558 int ret;
2559
2560 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
2561
2562 req = (struct hclge_rss_input_tuple *)desc.data;
2563 req->ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2564 req->ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2565 req->ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
2566 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2567 req->ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2568 req->ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2569 req->ipv6_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
2570 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2571 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2572 if (ret) {
2573 dev_err(&hdev->pdev->dev,
2574 "Configure rss input fail, status = %d\n", ret);
2575 return ret;
2576 }
2577
2578 return 0;
2579}
2580
2581static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
2582 u8 *key, u8 *hfunc)
2583{
2584 struct hclge_vport *vport = hclge_get_vport(handle);
2585 struct hclge_dev *hdev = vport->back;
2586 int i;
2587
2588 /* Get hash algorithm */
2589 if (hfunc)
2590 *hfunc = hclge_get_rss_algo(hdev);
2591
2592 /* Get the RSS Key required by the user */
2593 if (key)
2594 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
2595
2596 /* Get indirect table */
2597 if (indir)
2598 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
2599 indir[i] = vport->rss_indirection_tbl[i];
2600
2601 return 0;
2602}
2603
2604static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
2605 const u8 *key, const u8 hfunc)
2606{
2607 struct hclge_vport *vport = hclge_get_vport(handle);
2608 struct hclge_dev *hdev = vport->back;
2609 u8 hash_algo;
2610 int ret, i;
2611
2612 /* Set the RSS Hash Key if specififed by the user */
2613 if (key) {
2614 /* Update the shadow RSS key with user specified qids */
2615 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
2616
2617 if (hfunc == ETH_RSS_HASH_TOP ||
2618 hfunc == ETH_RSS_HASH_NO_CHANGE)
2619 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
2620 else
2621 return -EINVAL;
2622 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
2623 if (ret)
2624 return ret;
2625 }
2626
2627 /* Update the shadow RSS table with user specified qids */
2628 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
2629 vport->rss_indirection_tbl[i] = indir[i];
2630
2631 /* Update the hardware */
2632 ret = hclge_set_rss_indir_table(hdev, indir);
2633 return ret;
2634}
2635
2636static int hclge_get_tc_size(struct hnae3_handle *handle)
2637{
2638 struct hclge_vport *vport = hclge_get_vport(handle);
2639 struct hclge_dev *hdev = vport->back;
2640
2641 return hdev->rss_size_max;
2642}
2643
2644static int hclge_rss_init_hw(struct hclge_dev *hdev)
2645{
2646 const u8 hfunc = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
2647 struct hclge_vport *vport = hdev->vport;
2648 u16 tc_offset[HCLGE_MAX_TC_NUM];
2649 u8 rss_key[HCLGE_RSS_KEY_SIZE];
2650 u16 tc_valid[HCLGE_MAX_TC_NUM];
2651 u16 tc_size[HCLGE_MAX_TC_NUM];
2652 u32 *rss_indir = NULL;
68ece54e 2653 u16 rss_size = 0, roundup_size;
46a3df9f
S
2654 const u8 *key;
2655 int i, ret, j;
2656
2657 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
2658 if (!rss_indir)
2659 return -ENOMEM;
2660
2661 /* Get default RSS key */
2662 netdev_rss_key_fill(rss_key, HCLGE_RSS_KEY_SIZE);
2663
2664 /* Initialize RSS indirect table for each vport */
2665 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
2666 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) {
2667 vport[j].rss_indirection_tbl[i] =
68ece54e
YL
2668 i % vport[j].alloc_rss_size;
2669
2670 /* vport 0 is for PF */
2671 if (j != 0)
2672 continue;
2673
2674 rss_size = vport[j].alloc_rss_size;
46a3df9f
S
2675 rss_indir[i] = vport[j].rss_indirection_tbl[i];
2676 }
2677 }
2678 ret = hclge_set_rss_indir_table(hdev, rss_indir);
2679 if (ret)
2680 goto err;
2681
2682 key = rss_key;
2683 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
2684 if (ret)
2685 goto err;
2686
2687 ret = hclge_set_rss_input_tuple(hdev);
2688 if (ret)
2689 goto err;
2690
68ece54e
YL
2691 /* Each TC have the same queue size, and tc_size set to hardware is
2692 * the log2 of roundup power of two of rss_size, the acutal queue
2693 * size is limited by indirection table.
2694 */
2695 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
2696 dev_err(&hdev->pdev->dev,
2697 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
2698 rss_size);
2699 return -EINVAL;
2700 }
2701
2702 roundup_size = roundup_pow_of_two(rss_size);
2703 roundup_size = ilog2(roundup_size);
2704
46a3df9f 2705 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
68ece54e 2706 tc_valid[i] = 0;
46a3df9f 2707
68ece54e
YL
2708 if (!(hdev->hw_tc_map & BIT(i)))
2709 continue;
2710
2711 tc_valid[i] = 1;
2712 tc_size[i] = roundup_size;
2713 tc_offset[i] = rss_size * i;
46a3df9f 2714 }
68ece54e 2715
46a3df9f
S
2716 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
2717
2718err:
2719 kfree(rss_indir);
2720
2721 return ret;
2722}
2723
2724int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
2725 struct hnae3_ring_chain_node *ring_chain)
2726{
2727 struct hclge_dev *hdev = vport->back;
2728 struct hclge_ctrl_vector_chain *req;
2729 struct hnae3_ring_chain_node *node;
2730 struct hclge_desc desc;
2731 int ret;
2732 int i;
2733
2734 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false);
2735
2736 req = (struct hclge_ctrl_vector_chain *)desc.data;
2737 req->int_vector_id = vector_id;
2738
2739 i = 0;
2740 for (node = ring_chain; node; node = node->next) {
2741 hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_TYPE_M,
2742 HCLGE_INT_TYPE_S,
2743 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
2744 hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
2745 HCLGE_TQP_ID_S, node->tqp_index);
0305b443
L
2746 hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M,
2747 HCLGE_INT_GL_IDX_S,
2748 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
46a3df9f 2749 req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
0305b443 2750 req->vfid = vport->vport_id;
46a3df9f
S
2751
2752 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
2753 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
2754
2755 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2756 if (ret) {
2757 dev_err(&hdev->pdev->dev,
2758 "Map TQP fail, status is %d.\n",
2759 ret);
2760 return ret;
2761 }
2762 i = 0;
2763
2764 hclge_cmd_setup_basic_desc(&desc,
2765 HCLGE_OPC_ADD_RING_TO_VECTOR,
2766 false);
2767 req->int_vector_id = vector_id;
2768 }
2769 }
2770
2771 if (i > 0) {
2772 req->int_cause_num = i;
2773
2774 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2775 if (ret) {
2776 dev_err(&hdev->pdev->dev,
2777 "Map TQP fail, status is %d.\n", ret);
2778 return ret;
2779 }
2780 }
2781
2782 return 0;
2783}
2784
2785int hclge_map_handle_ring_to_vector(struct hnae3_handle *handle,
2786 int vector,
2787 struct hnae3_ring_chain_node *ring_chain)
2788{
2789 struct hclge_vport *vport = hclge_get_vport(handle);
2790 struct hclge_dev *hdev = vport->back;
2791 int vector_id;
2792
2793 vector_id = hclge_get_vector_index(hdev, vector);
2794 if (vector_id < 0) {
2795 dev_err(&hdev->pdev->dev,
2796 "Get vector index fail. ret =%d\n", vector_id);
2797 return vector_id;
2798 }
2799
2800 return hclge_map_vport_ring_to_vector(vport, vector_id, ring_chain);
2801}
2802
2803static int hclge_unmap_ring_from_vector(
2804 struct hnae3_handle *handle, int vector,
2805 struct hnae3_ring_chain_node *ring_chain)
2806{
2807 struct hclge_vport *vport = hclge_get_vport(handle);
2808 struct hclge_dev *hdev = vport->back;
2809 struct hclge_ctrl_vector_chain *req;
2810 struct hnae3_ring_chain_node *node;
2811 struct hclge_desc desc;
2812 int i, vector_id;
2813 int ret;
2814
2815 vector_id = hclge_get_vector_index(hdev, vector);
2816 if (vector_id < 0) {
2817 dev_err(&handle->pdev->dev,
2818 "Get vector index fail. ret =%d\n", vector_id);
2819 return vector_id;
2820 }
2821
2822 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false);
2823
2824 req = (struct hclge_ctrl_vector_chain *)desc.data;
2825 req->int_vector_id = vector_id;
2826
2827 i = 0;
2828 for (node = ring_chain; node; node = node->next) {
2829 hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_TYPE_M,
2830 HCLGE_INT_TYPE_S,
2831 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
2832 hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
2833 HCLGE_TQP_ID_S, node->tqp_index);
0305b443
L
2834 hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M,
2835 HCLGE_INT_GL_IDX_S,
2836 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
46a3df9f
S
2837
2838 req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
0305b443 2839 req->vfid = vport->vport_id;
46a3df9f
S
2840
2841 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
2842 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
2843
2844 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2845 if (ret) {
2846 dev_err(&hdev->pdev->dev,
2847 "Unmap TQP fail, status is %d.\n",
2848 ret);
2849 return ret;
2850 }
2851 i = 0;
2852 hclge_cmd_setup_basic_desc(&desc,
c5b1b975 2853 HCLGE_OPC_DEL_RING_TO_VECTOR,
46a3df9f
S
2854 false);
2855 req->int_vector_id = vector_id;
2856 }
2857 }
2858
2859 if (i > 0) {
2860 req->int_cause_num = i;
2861
2862 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2863 if (ret) {
2864 dev_err(&hdev->pdev->dev,
2865 "Unmap TQP fail, status is %d.\n", ret);
2866 return ret;
2867 }
2868 }
2869
2870 return 0;
2871}
2872
2873int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
2874 struct hclge_promisc_param *param)
2875{
2876 struct hclge_promisc_cfg *req;
2877 struct hclge_desc desc;
2878 int ret;
2879
2880 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
2881
2882 req = (struct hclge_promisc_cfg *)desc.data;
2883 req->vf_id = param->vf_id;
2884 req->flag = (param->enable << HCLGE_PROMISC_EN_B);
2885
2886 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2887 if (ret) {
2888 dev_err(&hdev->pdev->dev,
2889 "Set promisc mode fail, status is %d.\n", ret);
2890 return ret;
2891 }
2892 return 0;
2893}
2894
2895void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
2896 bool en_mc, bool en_bc, int vport_id)
2897{
2898 if (!param)
2899 return;
2900
2901 memset(param, 0, sizeof(struct hclge_promisc_param));
2902 if (en_uc)
2903 param->enable = HCLGE_PROMISC_EN_UC;
2904 if (en_mc)
2905 param->enable |= HCLGE_PROMISC_EN_MC;
2906 if (en_bc)
2907 param->enable |= HCLGE_PROMISC_EN_BC;
2908 param->vf_id = vport_id;
2909}
2910
2911static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en)
2912{
2913 struct hclge_vport *vport = hclge_get_vport(handle);
2914 struct hclge_dev *hdev = vport->back;
2915 struct hclge_promisc_param param;
2916
2917 hclge_promisc_param_init(&param, en, en, true, vport->vport_id);
2918 hclge_cmd_set_promisc_mode(hdev, &param);
2919}
2920
2921static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
2922{
2923 struct hclge_desc desc;
2924 struct hclge_config_mac_mode *req =
2925 (struct hclge_config_mac_mode *)desc.data;
2926 int ret;
2927
2928 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
2929 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_TX_EN_B, enable);
2930 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_RX_EN_B, enable);
2931 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_PAD_TX_B, enable);
2932 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_PAD_RX_B, enable);
2933 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_1588_TX_B, 0);
2934 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_1588_RX_B, 0);
2935 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_APP_LP_B, 0);
2936 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_LINE_LP_B, 0);
2937 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_FCS_TX_B, enable);
2938 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_RX_FCS_B, enable);
2939 hnae_set_bit(req->txrx_pad_fcs_loop_en,
2940 HCLGE_MAC_RX_FCS_STRIP_B, enable);
2941 hnae_set_bit(req->txrx_pad_fcs_loop_en,
2942 HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
2943 hnae_set_bit(req->txrx_pad_fcs_loop_en,
2944 HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
2945 hnae_set_bit(req->txrx_pad_fcs_loop_en,
2946 HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
2947
2948 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2949 if (ret)
2950 dev_err(&hdev->pdev->dev,
2951 "mac enable fail, ret =%d.\n", ret);
2952}
2953
2954static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
2955 int stream_id, bool enable)
2956{
2957 struct hclge_desc desc;
2958 struct hclge_cfg_com_tqp_queue *req =
2959 (struct hclge_cfg_com_tqp_queue *)desc.data;
2960 int ret;
2961
2962 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
2963 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
2964 req->stream_id = cpu_to_le16(stream_id);
2965 req->enable |= enable << HCLGE_TQP_ENABLE_B;
2966
2967 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2968 if (ret)
2969 dev_err(&hdev->pdev->dev,
2970 "Tqp enable fail, status =%d.\n", ret);
2971 return ret;
2972}
2973
2974static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
2975{
2976 struct hclge_vport *vport = hclge_get_vport(handle);
2977 struct hnae3_queue *queue;
2978 struct hclge_tqp *tqp;
2979 int i;
2980
2981 for (i = 0; i < vport->alloc_tqps; i++) {
2982 queue = handle->kinfo.tqp[i];
2983 tqp = container_of(queue, struct hclge_tqp, q);
2984 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
2985 }
2986}
2987
2988static int hclge_ae_start(struct hnae3_handle *handle)
2989{
2990 struct hclge_vport *vport = hclge_get_vport(handle);
2991 struct hclge_dev *hdev = vport->back;
2992 int i, queue_id, ret;
2993
2994 for (i = 0; i < vport->alloc_tqps; i++) {
2995 /* todo clear interrupt */
2996 /* ring enable */
2997 queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]);
2998 if (queue_id < 0) {
2999 dev_warn(&hdev->pdev->dev,
3000 "Get invalid queue id, ignore it\n");
3001 continue;
3002 }
3003
3004 hclge_tqp_enable(hdev, queue_id, 0, true);
3005 }
3006 /* mac enable */
3007 hclge_cfg_mac_mode(hdev, true);
3008 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
3009 (void)mod_timer(&hdev->service_timer, jiffies + HZ);
3010
3011 ret = hclge_mac_start_phy(hdev);
3012 if (ret)
3013 return ret;
3014
3015 /* reset tqp stats */
3016 hclge_reset_tqp_stats(handle);
3017
3018 return 0;
3019}
3020
3021static void hclge_ae_stop(struct hnae3_handle *handle)
3022{
3023 struct hclge_vport *vport = hclge_get_vport(handle);
3024 struct hclge_dev *hdev = vport->back;
3025 int i, queue_id;
3026
3027 for (i = 0; i < vport->alloc_tqps; i++) {
3028 /* Ring disable */
3029 queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]);
3030 if (queue_id < 0) {
3031 dev_warn(&hdev->pdev->dev,
3032 "Get invalid queue id, ignore it\n");
3033 continue;
3034 }
3035
3036 hclge_tqp_enable(hdev, queue_id, 0, false);
3037 }
3038 /* Mac disable */
3039 hclge_cfg_mac_mode(hdev, false);
3040
3041 hclge_mac_stop_phy(hdev);
3042
3043 /* reset tqp stats */
3044 hclge_reset_tqp_stats(handle);
3045}
3046
3047static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
3048 u16 cmdq_resp, u8 resp_code,
3049 enum hclge_mac_vlan_tbl_opcode op)
3050{
3051 struct hclge_dev *hdev = vport->back;
3052 int return_status = -EIO;
3053
3054 if (cmdq_resp) {
3055 dev_err(&hdev->pdev->dev,
3056 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
3057 cmdq_resp);
3058 return -EIO;
3059 }
3060
3061 if (op == HCLGE_MAC_VLAN_ADD) {
3062 if ((!resp_code) || (resp_code == 1)) {
3063 return_status = 0;
3064 } else if (resp_code == 2) {
3065 return_status = -EIO;
3066 dev_err(&hdev->pdev->dev,
3067 "add mac addr failed for uc_overflow.\n");
3068 } else if (resp_code == 3) {
3069 return_status = -EIO;
3070 dev_err(&hdev->pdev->dev,
3071 "add mac addr failed for mc_overflow.\n");
3072 } else {
3073 dev_err(&hdev->pdev->dev,
3074 "add mac addr failed for undefined, code=%d.\n",
3075 resp_code);
3076 }
3077 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
3078 if (!resp_code) {
3079 return_status = 0;
3080 } else if (resp_code == 1) {
3081 return_status = -EIO;
3082 dev_dbg(&hdev->pdev->dev,
3083 "remove mac addr failed for miss.\n");
3084 } else {
3085 dev_err(&hdev->pdev->dev,
3086 "remove mac addr failed for undefined, code=%d.\n",
3087 resp_code);
3088 }
3089 } else if (op == HCLGE_MAC_VLAN_LKUP) {
3090 if (!resp_code) {
3091 return_status = 0;
3092 } else if (resp_code == 1) {
3093 return_status = -EIO;
3094 dev_dbg(&hdev->pdev->dev,
3095 "lookup mac addr failed for miss.\n");
3096 } else {
3097 dev_err(&hdev->pdev->dev,
3098 "lookup mac addr failed for undefined, code=%d.\n",
3099 resp_code);
3100 }
3101 } else {
3102 return_status = -EIO;
3103 dev_err(&hdev->pdev->dev,
3104 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
3105 op);
3106 }
3107
3108 return return_status;
3109}
3110
3111static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
3112{
3113 int word_num;
3114 int bit_num;
3115
3116 if (vfid > 255 || vfid < 0)
3117 return -EIO;
3118
3119 if (vfid >= 0 && vfid <= 191) {
3120 word_num = vfid / 32;
3121 bit_num = vfid % 32;
3122 if (clr)
3123 desc[1].data[word_num] &= ~(1 << bit_num);
3124 else
3125 desc[1].data[word_num] |= (1 << bit_num);
3126 } else {
3127 word_num = (vfid - 192) / 32;
3128 bit_num = vfid % 32;
3129 if (clr)
3130 desc[2].data[word_num] &= ~(1 << bit_num);
3131 else
3132 desc[2].data[word_num] |= (1 << bit_num);
3133 }
3134
3135 return 0;
3136}
3137
3138static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
3139{
3140#define HCLGE_DESC_NUMBER 3
3141#define HCLGE_FUNC_NUMBER_PER_DESC 6
3142 int i, j;
3143
3144 for (i = 0; i < HCLGE_DESC_NUMBER; i++)
3145 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
3146 if (desc[i].data[j])
3147 return false;
3148
3149 return true;
3150}
3151
3152static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry *new_req,
3153 const u8 *addr)
3154{
3155 const unsigned char *mac_addr = addr;
3156 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
3157 (mac_addr[0]) | (mac_addr[1] << 8);
3158 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
3159
3160 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
3161 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
3162}
3163
3164u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
3165 const u8 *addr)
3166{
3167 u16 high_val = addr[1] | (addr[0] << 8);
3168 struct hclge_dev *hdev = vport->back;
3169 u32 rsh = 4 - hdev->mta_mac_sel_type;
3170 u16 ret_val = (high_val >> rsh) & 0xfff;
3171
3172 return ret_val;
3173}
3174
3175static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
3176 enum hclge_mta_dmac_sel_type mta_mac_sel,
3177 bool enable)
3178{
3179 struct hclge_mta_filter_mode *req;
3180 struct hclge_desc desc;
3181 int ret;
3182
3183 req = (struct hclge_mta_filter_mode *)desc.data;
3184 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
3185
3186 hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
3187 enable);
3188 hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
3189 HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
3190
3191 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3192 if (ret) {
3193 dev_err(&hdev->pdev->dev,
3194 "Config mat filter mode failed for cmd_send, ret =%d.\n",
3195 ret);
3196 return ret;
3197 }
3198
3199 return 0;
3200}
3201
3202int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
3203 u8 func_id,
3204 bool enable)
3205{
3206 struct hclge_cfg_func_mta_filter *req;
3207 struct hclge_desc desc;
3208 int ret;
3209
3210 req = (struct hclge_cfg_func_mta_filter *)desc.data;
3211 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
3212
3213 hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
3214 enable);
3215 req->function_id = func_id;
3216
3217 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3218 if (ret) {
3219 dev_err(&hdev->pdev->dev,
3220 "Config func_id enable failed for cmd_send, ret =%d.\n",
3221 ret);
3222 return ret;
3223 }
3224
3225 return 0;
3226}
3227
3228static int hclge_set_mta_table_item(struct hclge_vport *vport,
3229 u16 idx,
3230 bool enable)
3231{
3232 struct hclge_dev *hdev = vport->back;
3233 struct hclge_cfg_func_mta_item *req;
3234 struct hclge_desc desc;
3235 int ret;
3236
3237 req = (struct hclge_cfg_func_mta_item *)desc.data;
3238 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
3239 hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
3240
3241 hnae_set_field(req->item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
3242 HCLGE_CFG_MTA_ITEM_IDX_S, idx);
3243 req->item_idx = cpu_to_le16(req->item_idx);
3244
3245 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3246 if (ret) {
3247 dev_err(&hdev->pdev->dev,
3248 "Config mta table item failed for cmd_send, ret =%d.\n",
3249 ret);
3250 return ret;
3251 }
3252
3253 return 0;
3254}
3255
3256static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
3257 struct hclge_mac_vlan_tbl_entry *req)
3258{
3259 struct hclge_dev *hdev = vport->back;
3260 struct hclge_desc desc;
3261 u8 resp_code;
3262 int ret;
3263
3264 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
3265
3266 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry));
3267
3268 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3269 if (ret) {
3270 dev_err(&hdev->pdev->dev,
3271 "del mac addr failed for cmd_send, ret =%d.\n",
3272 ret);
3273 return ret;
3274 }
3275 resp_code = (desc.data[0] >> 8) & 0xff;
3276
3277 return hclge_get_mac_vlan_cmd_status(vport, desc.retval, resp_code,
3278 HCLGE_MAC_VLAN_REMOVE);
3279}
3280
3281static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
3282 struct hclge_mac_vlan_tbl_entry *req,
3283 struct hclge_desc *desc,
3284 bool is_mc)
3285{
3286 struct hclge_dev *hdev = vport->back;
3287 u8 resp_code;
3288 int ret;
3289
3290 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
3291 if (is_mc) {
3292 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3293 memcpy(desc[0].data,
3294 req,
3295 sizeof(struct hclge_mac_vlan_tbl_entry));
3296 hclge_cmd_setup_basic_desc(&desc[1],
3297 HCLGE_OPC_MAC_VLAN_ADD,
3298 true);
3299 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3300 hclge_cmd_setup_basic_desc(&desc[2],
3301 HCLGE_OPC_MAC_VLAN_ADD,
3302 true);
3303 ret = hclge_cmd_send(&hdev->hw, desc, 3);
3304 } else {
3305 memcpy(desc[0].data,
3306 req,
3307 sizeof(struct hclge_mac_vlan_tbl_entry));
3308 ret = hclge_cmd_send(&hdev->hw, desc, 1);
3309 }
3310 if (ret) {
3311 dev_err(&hdev->pdev->dev,
3312 "lookup mac addr failed for cmd_send, ret =%d.\n",
3313 ret);
3314 return ret;
3315 }
3316 resp_code = (desc[0].data[0] >> 8) & 0xff;
3317
3318 return hclge_get_mac_vlan_cmd_status(vport, desc[0].retval, resp_code,
3319 HCLGE_MAC_VLAN_LKUP);
3320}
3321
3322static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
3323 struct hclge_mac_vlan_tbl_entry *req,
3324 struct hclge_desc *mc_desc)
3325{
3326 struct hclge_dev *hdev = vport->back;
3327 int cfg_status;
3328 u8 resp_code;
3329 int ret;
3330
3331 if (!mc_desc) {
3332 struct hclge_desc desc;
3333
3334 hclge_cmd_setup_basic_desc(&desc,
3335 HCLGE_OPC_MAC_VLAN_ADD,
3336 false);
3337 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry));
3338 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3339 resp_code = (desc.data[0] >> 8) & 0xff;
3340 cfg_status = hclge_get_mac_vlan_cmd_status(vport, desc.retval,
3341 resp_code,
3342 HCLGE_MAC_VLAN_ADD);
3343 } else {
3344 mc_desc[0].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
3345 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3346 mc_desc[1].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
3347 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3348 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
3349 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
3350 memcpy(mc_desc[0].data, req,
3351 sizeof(struct hclge_mac_vlan_tbl_entry));
3352 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
3353 resp_code = (mc_desc[0].data[0] >> 8) & 0xff;
3354 cfg_status = hclge_get_mac_vlan_cmd_status(vport,
3355 mc_desc[0].retval,
3356 resp_code,
3357 HCLGE_MAC_VLAN_ADD);
3358 }
3359
3360 if (ret) {
3361 dev_err(&hdev->pdev->dev,
3362 "add mac addr failed for cmd_send, ret =%d.\n",
3363 ret);
3364 return ret;
3365 }
3366
3367 return cfg_status;
3368}
3369
3370static int hclge_add_uc_addr(struct hnae3_handle *handle,
3371 const unsigned char *addr)
3372{
3373 struct hclge_vport *vport = hclge_get_vport(handle);
3374
3375 return hclge_add_uc_addr_common(vport, addr);
3376}
3377
3378int hclge_add_uc_addr_common(struct hclge_vport *vport,
3379 const unsigned char *addr)
3380{
3381 struct hclge_dev *hdev = vport->back;
3382 struct hclge_mac_vlan_tbl_entry req;
3383 enum hclge_cmd_status status;
3384
3385 /* mac addr check */
3386 if (is_zero_ether_addr(addr) ||
3387 is_broadcast_ether_addr(addr) ||
3388 is_multicast_ether_addr(addr)) {
3389 dev_err(&hdev->pdev->dev,
3390 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
3391 addr,
3392 is_zero_ether_addr(addr),
3393 is_broadcast_ether_addr(addr),
3394 is_multicast_ether_addr(addr));
3395 return -EINVAL;
3396 }
3397
3398 memset(&req, 0, sizeof(req));
3399 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3400 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3401 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0);
3402 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3403 hnae_set_bit(req.egress_port,
3404 HCLGE_MAC_EPORT_SW_EN_B, 0);
3405 hnae_set_bit(req.egress_port,
3406 HCLGE_MAC_EPORT_TYPE_B, 0);
3407 hnae_set_field(req.egress_port, HCLGE_MAC_EPORT_VFID_M,
3408 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
3409 hnae_set_field(req.egress_port, HCLGE_MAC_EPORT_PFID_M,
3410 HCLGE_MAC_EPORT_PFID_S, 0);
3411 req.egress_port = cpu_to_le16(req.egress_port);
3412
3413 hclge_prepare_mac_addr(&req, addr);
3414
3415 status = hclge_add_mac_vlan_tbl(vport, &req, NULL);
3416
3417 return status;
3418}
3419
3420static int hclge_rm_uc_addr(struct hnae3_handle *handle,
3421 const unsigned char *addr)
3422{
3423 struct hclge_vport *vport = hclge_get_vport(handle);
3424
3425 return hclge_rm_uc_addr_common(vport, addr);
3426}
3427
3428int hclge_rm_uc_addr_common(struct hclge_vport *vport,
3429 const unsigned char *addr)
3430{
3431 struct hclge_dev *hdev = vport->back;
3432 struct hclge_mac_vlan_tbl_entry req;
3433 enum hclge_cmd_status status;
3434
3435 /* mac addr check */
3436 if (is_zero_ether_addr(addr) ||
3437 is_broadcast_ether_addr(addr) ||
3438 is_multicast_ether_addr(addr)) {
3439 dev_dbg(&hdev->pdev->dev,
3440 "Remove mac err! invalid mac:%pM.\n",
3441 addr);
3442 return -EINVAL;
3443 }
3444
3445 memset(&req, 0, sizeof(req));
3446 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3447 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3448 hclge_prepare_mac_addr(&req, addr);
3449 status = hclge_remove_mac_vlan_tbl(vport, &req);
3450
3451 return status;
3452}
3453
3454static int hclge_add_mc_addr(struct hnae3_handle *handle,
3455 const unsigned char *addr)
3456{
3457 struct hclge_vport *vport = hclge_get_vport(handle);
3458
3459 return hclge_add_mc_addr_common(vport, addr);
3460}
3461
3462int hclge_add_mc_addr_common(struct hclge_vport *vport,
3463 const unsigned char *addr)
3464{
3465 struct hclge_dev *hdev = vport->back;
3466 struct hclge_mac_vlan_tbl_entry req;
3467 struct hclge_desc desc[3];
3468 u16 tbl_idx;
3469 int status;
3470
3471 /* mac addr check */
3472 if (!is_multicast_ether_addr(addr)) {
3473 dev_err(&hdev->pdev->dev,
3474 "Add mc mac err! invalid mac:%pM.\n",
3475 addr);
3476 return -EINVAL;
3477 }
3478 memset(&req, 0, sizeof(req));
3479 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3480 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3481 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
3482 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3483 hclge_prepare_mac_addr(&req, addr);
3484 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
3485 if (!status) {
3486 /* This mac addr exist, update VFID for it */
3487 hclge_update_desc_vfid(desc, vport->vport_id, false);
3488 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
3489 } else {
3490 /* This mac addr do not exist, add new entry for it */
3491 memset(desc[0].data, 0, sizeof(desc[0].data));
3492 memset(desc[1].data, 0, sizeof(desc[0].data));
3493 memset(desc[2].data, 0, sizeof(desc[0].data));
3494 hclge_update_desc_vfid(desc, vport->vport_id, false);
3495 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
3496 }
3497
3498 /* Set MTA table for this MAC address */
3499 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
3500 status = hclge_set_mta_table_item(vport, tbl_idx, true);
3501
3502 return status;
3503}
3504
3505static int hclge_rm_mc_addr(struct hnae3_handle *handle,
3506 const unsigned char *addr)
3507{
3508 struct hclge_vport *vport = hclge_get_vport(handle);
3509
3510 return hclge_rm_mc_addr_common(vport, addr);
3511}
3512
3513int hclge_rm_mc_addr_common(struct hclge_vport *vport,
3514 const unsigned char *addr)
3515{
3516 struct hclge_dev *hdev = vport->back;
3517 struct hclge_mac_vlan_tbl_entry req;
3518 enum hclge_cmd_status status;
3519 struct hclge_desc desc[3];
3520 u16 tbl_idx;
3521
3522 /* mac addr check */
3523 if (!is_multicast_ether_addr(addr)) {
3524 dev_dbg(&hdev->pdev->dev,
3525 "Remove mc mac err! invalid mac:%pM.\n",
3526 addr);
3527 return -EINVAL;
3528 }
3529
3530 memset(&req, 0, sizeof(req));
3531 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3532 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3533 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
3534 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3535 hclge_prepare_mac_addr(&req, addr);
3536 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
3537 if (!status) {
3538 /* This mac addr exist, remove this handle's VFID for it */
3539 hclge_update_desc_vfid(desc, vport->vport_id, true);
3540
3541 if (hclge_is_all_function_id_zero(desc))
3542 /* All the vfid is zero, so need to delete this entry */
3543 status = hclge_remove_mac_vlan_tbl(vport, &req);
3544 else
3545 /* Not all the vfid is zero, update the vfid */
3546 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
3547
3548 } else {
3549 /* This mac addr do not exist, can't delete it */
3550 dev_err(&hdev->pdev->dev,
d7629e74 3551 "Rm multicast mac addr failed, ret = %d.\n",
46a3df9f
S
3552 status);
3553 return -EIO;
3554 }
3555
3556 /* Set MTB table for this MAC address */
3557 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
3558 status = hclge_set_mta_table_item(vport, tbl_idx, false);
3559
3560 return status;
3561}
3562
3563static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
3564{
3565 struct hclge_vport *vport = hclge_get_vport(handle);
3566 struct hclge_dev *hdev = vport->back;
3567
3568 ether_addr_copy(p, hdev->hw.mac.mac_addr);
3569}
3570
3571static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
3572{
3573 const unsigned char *new_addr = (const unsigned char *)p;
3574 struct hclge_vport *vport = hclge_get_vport(handle);
3575 struct hclge_dev *hdev = vport->back;
3576
3577 /* mac addr check */
3578 if (is_zero_ether_addr(new_addr) ||
3579 is_broadcast_ether_addr(new_addr) ||
3580 is_multicast_ether_addr(new_addr)) {
3581 dev_err(&hdev->pdev->dev,
3582 "Change uc mac err! invalid mac:%p.\n",
3583 new_addr);
3584 return -EINVAL;
3585 }
3586
3587 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
3588
3589 if (!hclge_add_uc_addr(handle, new_addr)) {
3590 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
3591 return 0;
3592 }
3593
3594 return -EIO;
3595}
3596
3597static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
3598 bool filter_en)
3599{
3600 struct hclge_vlan_filter_ctrl *req;
3601 struct hclge_desc desc;
3602 int ret;
3603
3604 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
3605
3606 req = (struct hclge_vlan_filter_ctrl *)desc.data;
3607 req->vlan_type = vlan_type;
3608 req->vlan_fe = filter_en;
3609
3610 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3611 if (ret) {
3612 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
3613 ret);
3614 return ret;
3615 }
3616
3617 return 0;
3618}
3619
3620int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
3621 bool is_kill, u16 vlan, u8 qos, __be16 proto)
3622{
3623#define HCLGE_MAX_VF_BYTES 16
3624 struct hclge_vlan_filter_vf_cfg *req0;
3625 struct hclge_vlan_filter_vf_cfg *req1;
3626 struct hclge_desc desc[2];
3627 u8 vf_byte_val;
3628 u8 vf_byte_off;
3629 int ret;
3630
3631 hclge_cmd_setup_basic_desc(&desc[0],
3632 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
3633 hclge_cmd_setup_basic_desc(&desc[1],
3634 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
3635
3636 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3637
3638 vf_byte_off = vfid / 8;
3639 vf_byte_val = 1 << (vfid % 8);
3640
3641 req0 = (struct hclge_vlan_filter_vf_cfg *)desc[0].data;
3642 req1 = (struct hclge_vlan_filter_vf_cfg *)desc[1].data;
3643
3644 req0->vlan_id = vlan;
3645 req0->vlan_cfg = is_kill;
3646
3647 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
3648 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
3649 else
3650 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
3651
3652 ret = hclge_cmd_send(&hdev->hw, desc, 2);
3653 if (ret) {
3654 dev_err(&hdev->pdev->dev,
3655 "Send vf vlan command fail, ret =%d.\n",
3656 ret);
3657 return ret;
3658 }
3659
3660 if (!is_kill) {
3661 if (!req0->resp_code || req0->resp_code == 1)
3662 return 0;
3663
3664 dev_err(&hdev->pdev->dev,
3665 "Add vf vlan filter fail, ret =%d.\n",
3666 req0->resp_code);
3667 } else {
3668 if (!req0->resp_code)
3669 return 0;
3670
3671 dev_err(&hdev->pdev->dev,
3672 "Kill vf vlan filter fail, ret =%d.\n",
3673 req0->resp_code);
3674 }
3675
3676 return -EIO;
3677}
3678
3679static int hclge_set_port_vlan_filter(struct hnae3_handle *handle,
3680 __be16 proto, u16 vlan_id,
3681 bool is_kill)
3682{
3683 struct hclge_vport *vport = hclge_get_vport(handle);
3684 struct hclge_dev *hdev = vport->back;
3685 struct hclge_vlan_filter_pf_cfg *req;
3686 struct hclge_desc desc;
3687 u8 vlan_offset_byte_val;
3688 u8 vlan_offset_byte;
3689 u8 vlan_offset_160;
3690 int ret;
3691
3692 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
3693
3694 vlan_offset_160 = vlan_id / 160;
3695 vlan_offset_byte = (vlan_id % 160) / 8;
3696 vlan_offset_byte_val = 1 << (vlan_id % 8);
3697
3698 req = (struct hclge_vlan_filter_pf_cfg *)desc.data;
3699 req->vlan_offset = vlan_offset_160;
3700 req->vlan_cfg = is_kill;
3701 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
3702
3703 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3704 if (ret) {
3705 dev_err(&hdev->pdev->dev,
3706 "port vlan command, send fail, ret =%d.\n",
3707 ret);
3708 return ret;
3709 }
3710
3711 ret = hclge_set_vf_vlan_common(hdev, 0, is_kill, vlan_id, 0, proto);
3712 if (ret) {
3713 dev_err(&hdev->pdev->dev,
3714 "Set pf vlan filter config fail, ret =%d.\n",
3715 ret);
3716 return -EIO;
3717 }
3718
3719 return 0;
3720}
3721
3722static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
3723 u16 vlan, u8 qos, __be16 proto)
3724{
3725 struct hclge_vport *vport = hclge_get_vport(handle);
3726 struct hclge_dev *hdev = vport->back;
3727
3728 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
3729 return -EINVAL;
3730 if (proto != htons(ETH_P_8021Q))
3731 return -EPROTONOSUPPORT;
3732
3733 return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto);
3734}
3735
3736static int hclge_init_vlan_config(struct hclge_dev *hdev)
3737{
3738#define HCLGE_VLAN_TYPE_VF_TABLE 0
3739#define HCLGE_VLAN_TYPE_PORT_TABLE 1
5e43aef8 3740 struct hnae3_handle *handle;
46a3df9f
S
3741 int ret;
3742
3743 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE,
3744 true);
3745 if (ret)
3746 return ret;
3747
3748 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE,
3749 true);
5e43aef8
L
3750 if (ret)
3751 return ret;
46a3df9f 3752
5e43aef8
L
3753 handle = &hdev->vport[0].nic;
3754 return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
46a3df9f
S
3755}
3756
3757static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
3758{
3759 struct hclge_vport *vport = hclge_get_vport(handle);
3760 struct hclge_config_max_frm_size *req;
3761 struct hclge_dev *hdev = vport->back;
3762 struct hclge_desc desc;
3763 int ret;
3764
3765 if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU))
3766 return -EINVAL;
3767
3768 hdev->mps = new_mtu;
3769 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
3770
3771 req = (struct hclge_config_max_frm_size *)desc.data;
3772 req->max_frm_size = cpu_to_le16(new_mtu);
3773
3774 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3775 if (ret) {
3776 dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
3777 return ret;
3778 }
3779
3780 return 0;
3781}
3782
3783static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
3784 bool enable)
3785{
3786 struct hclge_reset_tqp_queue *req;
3787 struct hclge_desc desc;
3788 int ret;
3789
3790 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
3791
3792 req = (struct hclge_reset_tqp_queue *)desc.data;
3793 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
3794 hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
3795
3796 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3797 if (ret) {
3798 dev_err(&hdev->pdev->dev,
3799 "Send tqp reset cmd error, status =%d\n", ret);
3800 return ret;
3801 }
3802
3803 return 0;
3804}
3805
3806static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
3807{
3808 struct hclge_reset_tqp_queue *req;
3809 struct hclge_desc desc;
3810 int ret;
3811
3812 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
3813
3814 req = (struct hclge_reset_tqp_queue *)desc.data;
3815 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
3816
3817 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3818 if (ret) {
3819 dev_err(&hdev->pdev->dev,
3820 "Get reset status error, status =%d\n", ret);
3821 return ret;
3822 }
3823
3824 return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
3825}
3826
3827static void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
3828{
3829 struct hclge_vport *vport = hclge_get_vport(handle);
3830 struct hclge_dev *hdev = vport->back;
3831 int reset_try_times = 0;
3832 int reset_status;
3833 int ret;
3834
3835 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
3836 if (ret) {
3837 dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
3838 return;
3839 }
3840
3841 ret = hclge_send_reset_tqp_cmd(hdev, queue_id, true);
3842 if (ret) {
3843 dev_warn(&hdev->pdev->dev,
3844 "Send reset tqp cmd fail, ret = %d\n", ret);
3845 return;
3846 }
3847
3848 reset_try_times = 0;
3849 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
3850 /* Wait for tqp hw reset */
3851 msleep(20);
3852 reset_status = hclge_get_reset_status(hdev, queue_id);
3853 if (reset_status)
3854 break;
3855 }
3856
3857 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
3858 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
3859 return;
3860 }
3861
3862 ret = hclge_send_reset_tqp_cmd(hdev, queue_id, false);
3863 if (ret) {
3864 dev_warn(&hdev->pdev->dev,
3865 "Deassert the soft reset fail, ret = %d\n", ret);
3866 return;
3867 }
3868}
3869
3870static u32 hclge_get_fw_version(struct hnae3_handle *handle)
3871{
3872 struct hclge_vport *vport = hclge_get_vport(handle);
3873 struct hclge_dev *hdev = vport->back;
3874
3875 return hdev->fw_version;
3876}
3877
3878static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
3879 u32 *rx_en, u32 *tx_en)
3880{
3881 struct hclge_vport *vport = hclge_get_vport(handle);
3882 struct hclge_dev *hdev = vport->back;
3883
3884 *auto_neg = hclge_get_autoneg(handle);
3885
3886 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
3887 *rx_en = 0;
3888 *tx_en = 0;
3889 return;
3890 }
3891
3892 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
3893 *rx_en = 1;
3894 *tx_en = 0;
3895 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
3896 *tx_en = 1;
3897 *rx_en = 0;
3898 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
3899 *rx_en = 1;
3900 *tx_en = 1;
3901 } else {
3902 *rx_en = 0;
3903 *tx_en = 0;
3904 }
3905}
3906
3907static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
3908 u8 *auto_neg, u32 *speed, u8 *duplex)
3909{
3910 struct hclge_vport *vport = hclge_get_vport(handle);
3911 struct hclge_dev *hdev = vport->back;
3912
3913 if (speed)
3914 *speed = hdev->hw.mac.speed;
3915 if (duplex)
3916 *duplex = hdev->hw.mac.duplex;
3917 if (auto_neg)
3918 *auto_neg = hdev->hw.mac.autoneg;
3919}
3920
3921static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
3922{
3923 struct hclge_vport *vport = hclge_get_vport(handle);
3924 struct hclge_dev *hdev = vport->back;
3925
3926 if (media_type)
3927 *media_type = hdev->hw.mac.media_type;
3928}
3929
3930static void hclge_get_mdix_mode(struct hnae3_handle *handle,
3931 u8 *tp_mdix_ctrl, u8 *tp_mdix)
3932{
3933 struct hclge_vport *vport = hclge_get_vport(handle);
3934 struct hclge_dev *hdev = vport->back;
3935 struct phy_device *phydev = hdev->hw.mac.phydev;
3936 int mdix_ctrl, mdix, retval, is_resolved;
3937
3938 if (!phydev) {
3939 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
3940 *tp_mdix = ETH_TP_MDI_INVALID;
3941 return;
3942 }
3943
3944 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
3945
3946 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
3947 mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
3948 HCLGE_PHY_MDIX_CTRL_S);
3949
3950 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
3951 mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
3952 is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
3953
3954 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
3955
3956 switch (mdix_ctrl) {
3957 case 0x0:
3958 *tp_mdix_ctrl = ETH_TP_MDI;
3959 break;
3960 case 0x1:
3961 *tp_mdix_ctrl = ETH_TP_MDI_X;
3962 break;
3963 case 0x3:
3964 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
3965 break;
3966 default:
3967 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
3968 break;
3969 }
3970
3971 if (!is_resolved)
3972 *tp_mdix = ETH_TP_MDI_INVALID;
3973 else if (mdix)
3974 *tp_mdix = ETH_TP_MDI_X;
3975 else
3976 *tp_mdix = ETH_TP_MDI;
3977}
3978
3979static int hclge_init_client_instance(struct hnae3_client *client,
3980 struct hnae3_ae_dev *ae_dev)
3981{
3982 struct hclge_dev *hdev = ae_dev->priv;
3983 struct hclge_vport *vport;
3984 int i, ret;
3985
3986 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3987 vport = &hdev->vport[i];
3988
3989 switch (client->type) {
3990 case HNAE3_CLIENT_KNIC:
3991
3992 hdev->nic_client = client;
3993 vport->nic.client = client;
3994 ret = client->ops->init_instance(&vport->nic);
3995 if (ret)
3996 goto err;
3997
3998 if (hdev->roce_client &&
e92a0843 3999 hnae3_dev_roce_supported(hdev)) {
46a3df9f
S
4000 struct hnae3_client *rc = hdev->roce_client;
4001
4002 ret = hclge_init_roce_base_info(vport);
4003 if (ret)
4004 goto err;
4005
4006 ret = rc->ops->init_instance(&vport->roce);
4007 if (ret)
4008 goto err;
4009 }
4010
4011 break;
4012 case HNAE3_CLIENT_UNIC:
4013 hdev->nic_client = client;
4014 vport->nic.client = client;
4015
4016 ret = client->ops->init_instance(&vport->nic);
4017 if (ret)
4018 goto err;
4019
4020 break;
4021 case HNAE3_CLIENT_ROCE:
e92a0843 4022 if (hnae3_dev_roce_supported(hdev)) {
46a3df9f
S
4023 hdev->roce_client = client;
4024 vport->roce.client = client;
4025 }
4026
4027 if (hdev->roce_client) {
4028 ret = hclge_init_roce_base_info(vport);
4029 if (ret)
4030 goto err;
4031
4032 ret = client->ops->init_instance(&vport->roce);
4033 if (ret)
4034 goto err;
4035 }
4036 }
4037 }
4038
4039 return 0;
4040err:
4041 return ret;
4042}
4043
4044static void hclge_uninit_client_instance(struct hnae3_client *client,
4045 struct hnae3_ae_dev *ae_dev)
4046{
4047 struct hclge_dev *hdev = ae_dev->priv;
4048 struct hclge_vport *vport;
4049 int i;
4050
4051 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4052 vport = &hdev->vport[i];
4053 if (hdev->roce_client)
4054 hdev->roce_client->ops->uninit_instance(&vport->roce,
4055 0);
4056 if (client->type == HNAE3_CLIENT_ROCE)
4057 return;
4058 if (client->ops->uninit_instance)
4059 client->ops->uninit_instance(&vport->nic, 0);
4060 }
4061}
4062
4063static int hclge_pci_init(struct hclge_dev *hdev)
4064{
4065 struct pci_dev *pdev = hdev->pdev;
4066 struct hclge_hw *hw;
4067 int ret;
4068
4069 ret = pci_enable_device(pdev);
4070 if (ret) {
4071 dev_err(&pdev->dev, "failed to enable PCI device\n");
4072 goto err_no_drvdata;
4073 }
4074
4075 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4076 if (ret) {
4077 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4078 if (ret) {
4079 dev_err(&pdev->dev,
4080 "can't set consistent PCI DMA");
4081 goto err_disable_device;
4082 }
4083 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
4084 }
4085
4086 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
4087 if (ret) {
4088 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
4089 goto err_disable_device;
4090 }
4091
4092 pci_set_master(pdev);
4093 hw = &hdev->hw;
4094 hw->back = hdev;
4095 hw->io_base = pcim_iomap(pdev, 2, 0);
4096 if (!hw->io_base) {
4097 dev_err(&pdev->dev, "Can't map configuration register space\n");
4098 ret = -ENOMEM;
4099 goto err_clr_master;
4100 }
4101
4102 return 0;
4103err_clr_master:
4104 pci_clear_master(pdev);
4105 pci_release_regions(pdev);
4106err_disable_device:
4107 pci_disable_device(pdev);
4108err_no_drvdata:
4109 pci_set_drvdata(pdev, NULL);
4110
4111 return ret;
4112}
4113
4114static void hclge_pci_uninit(struct hclge_dev *hdev)
4115{
4116 struct pci_dev *pdev = hdev->pdev;
4117
4118 if (hdev->flag & HCLGE_FLAG_USE_MSIX) {
4119 pci_disable_msix(pdev);
4120 devm_kfree(&pdev->dev, hdev->msix_entries);
4121 hdev->msix_entries = NULL;
4122 } else {
4123 pci_disable_msi(pdev);
4124 }
4125
4126 pci_clear_master(pdev);
4127 pci_release_mem_regions(pdev);
4128 pci_disable_device(pdev);
4129}
4130
4131static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
4132{
4133 struct pci_dev *pdev = ae_dev->pdev;
46a3df9f
S
4134 struct hclge_dev *hdev;
4135 int ret;
4136
4137 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
4138 if (!hdev) {
4139 ret = -ENOMEM;
4140 goto err_hclge_dev;
4141 }
4142
4143 hdev->flag |= HCLGE_FLAG_USE_MSIX;
4144 hdev->pdev = pdev;
4145 hdev->ae_dev = ae_dev;
4146 ae_dev->priv = hdev;
4147
46a3df9f
S
4148 ret = hclge_pci_init(hdev);
4149 if (ret) {
4150 dev_err(&pdev->dev, "PCI init failed\n");
4151 goto err_pci_init;
4152 }
4153
4154 /* Command queue initialize */
4155 ret = hclge_cmd_init(hdev);
4156 if (ret)
4157 goto err_cmd_init;
4158
4159 ret = hclge_get_cap(hdev);
4160 if (ret) {
e00e2197
CIK
4161 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
4162 ret);
46a3df9f
S
4163 return ret;
4164 }
4165
4166 ret = hclge_configure(hdev);
4167 if (ret) {
4168 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
4169 return ret;
4170 }
4171
4172 if (hdev->flag & HCLGE_FLAG_USE_MSIX)
4173 ret = hclge_init_msix(hdev);
4174 else
4175 ret = hclge_init_msi(hdev);
4176 if (ret) {
4177 dev_err(&pdev->dev, "Init msix/msi error, ret = %d.\n", ret);
4178 return ret;
4179 }
4180
4181 ret = hclge_alloc_tqps(hdev);
4182 if (ret) {
4183 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
4184 return ret;
4185 }
4186
4187 ret = hclge_alloc_vport(hdev);
4188 if (ret) {
4189 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
4190 return ret;
4191 }
4192
4193 ret = hclge_mac_init(hdev);
4194 if (ret) {
4195 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
4196 return ret;
4197 }
4198 ret = hclge_buffer_alloc(hdev);
4199 if (ret) {
4200 dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret);
4201 return ret;
4202 }
4203
4204 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
4205 if (ret) {
4206 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
4207 return ret;
4208 }
4209
46a3df9f
S
4210 ret = hclge_init_vlan_config(hdev);
4211 if (ret) {
4212 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
4213 return ret;
4214 }
4215
4216 ret = hclge_tm_schd_init(hdev);
4217 if (ret) {
4218 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
4219 return ret;
68ece54e
YL
4220 }
4221
4222 ret = hclge_rss_init_hw(hdev);
4223 if (ret) {
4224 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
4225 return ret;
46a3df9f
S
4226 }
4227
4228 setup_timer(&hdev->service_timer, hclge_service_timer,
4229 (unsigned long)hdev);
4230 INIT_WORK(&hdev->service_task, hclge_service_task);
4231
4232 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
4233 set_bit(HCLGE_STATE_DOWN, &hdev->state);
4234
4235 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
4236 return 0;
4237
4238err_cmd_init:
4239 pci_release_regions(pdev);
4240err_pci_init:
4241 pci_set_drvdata(pdev, NULL);
4242err_hclge_dev:
4243 return ret;
4244}
4245
4246static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
4247{
4248 struct hclge_dev *hdev = ae_dev->priv;
4249 struct hclge_mac *mac = &hdev->hw.mac;
4250
4251 set_bit(HCLGE_STATE_DOWN, &hdev->state);
4252
2a32ca13
AB
4253 if (IS_ENABLED(CONFIG_PCI_IOV))
4254 hclge_disable_sriov(hdev);
46a3df9f
S
4255
4256 if (hdev->service_timer.data)
4257 del_timer_sync(&hdev->service_timer);
4258 if (hdev->service_task.func)
4259 cancel_work_sync(&hdev->service_task);
4260
4261 if (mac->phydev)
4262 mdiobus_unregister(mac->mdio_bus);
4263
4264 hclge_destroy_cmd_queue(&hdev->hw);
4265 hclge_pci_uninit(hdev);
4266 ae_dev->priv = NULL;
4267}
4268
4269static const struct hnae3_ae_ops hclge_ops = {
4270 .init_ae_dev = hclge_init_ae_dev,
4271 .uninit_ae_dev = hclge_uninit_ae_dev,
4272 .init_client_instance = hclge_init_client_instance,
4273 .uninit_client_instance = hclge_uninit_client_instance,
4274 .map_ring_to_vector = hclge_map_handle_ring_to_vector,
4275 .unmap_ring_from_vector = hclge_unmap_ring_from_vector,
4276 .get_vector = hclge_get_vector,
4277 .set_promisc_mode = hclge_set_promisc_mode,
4278 .start = hclge_ae_start,
4279 .stop = hclge_ae_stop,
4280 .get_status = hclge_get_status,
4281 .get_ksettings_an_result = hclge_get_ksettings_an_result,
4282 .update_speed_duplex_h = hclge_update_speed_duplex_h,
4283 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
4284 .get_media_type = hclge_get_media_type,
4285 .get_rss_key_size = hclge_get_rss_key_size,
4286 .get_rss_indir_size = hclge_get_rss_indir_size,
4287 .get_rss = hclge_get_rss,
4288 .set_rss = hclge_set_rss,
4289 .get_tc_size = hclge_get_tc_size,
4290 .get_mac_addr = hclge_get_mac_addr,
4291 .set_mac_addr = hclge_set_mac_addr,
4292 .add_uc_addr = hclge_add_uc_addr,
4293 .rm_uc_addr = hclge_rm_uc_addr,
4294 .add_mc_addr = hclge_add_mc_addr,
4295 .rm_mc_addr = hclge_rm_mc_addr,
4296 .set_autoneg = hclge_set_autoneg,
4297 .get_autoneg = hclge_get_autoneg,
4298 .get_pauseparam = hclge_get_pauseparam,
4299 .set_mtu = hclge_set_mtu,
4300 .reset_queue = hclge_reset_tqp,
4301 .get_stats = hclge_get_stats,
4302 .update_stats = hclge_update_stats,
4303 .get_strings = hclge_get_strings,
4304 .get_sset_count = hclge_get_sset_count,
4305 .get_fw_version = hclge_get_fw_version,
4306 .get_mdix_mode = hclge_get_mdix_mode,
4307 .set_vlan_filter = hclge_set_port_vlan_filter,
4308 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
4309};
4310
4311static struct hnae3_ae_algo ae_algo = {
4312 .ops = &hclge_ops,
4313 .name = HCLGE_NAME,
4314 .pdev_id_table = ae_algo_pci_tbl,
4315};
4316
4317static int hclge_init(void)
4318{
4319 pr_info("%s is initializing\n", HCLGE_NAME);
4320
4321 return hnae3_register_ae_algo(&ae_algo);
4322}
4323
4324static void hclge_exit(void)
4325{
4326 hnae3_unregister_ae_algo(&ae_algo);
4327}
4328module_init(hclge_init);
4329module_exit(hclge_exit);
4330
4331MODULE_LICENSE("GPL");
4332MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
4333MODULE_DESCRIPTION("HCLGE Driver");
4334MODULE_VERSION(HCLGE_MOD_VERSION);