2 * Copyright (c) 2016-2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/acpi.h>
11 #include <linux/device.h>
12 #include <linux/etherdevice.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/pci.h>
19 #include <linux/platform_device.h>
20 #include <net/rtnetlink.h>
21 #include "hclge_cmd.h"
22 #include "hclge_dcb.h"
23 #include "hclge_main.h"
24 #include "hclge_mbx.h"
25 #include "hclge_mdio.h"
29 #define HCLGE_NAME "hclge"
30 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
31 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
32 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
33 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
35 static int hclge_set_mta_filter_mode(struct hclge_dev
*hdev
,
36 enum hclge_mta_dmac_sel_type mta_mac_sel
,
38 static int hclge_init_vlan_config(struct hclge_dev
*hdev
);
39 static int hclge_reset_ae_dev(struct hnae3_ae_dev
*ae_dev
);
41 static struct hnae3_ae_algo ae_algo
;
43 static const struct pci_device_id ae_algo_pci_tbl
[] = {
44 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
45 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
46 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
), 0},
47 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
), 0},
48 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
), 0},
49 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
), 0},
50 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
), 0},
51 /* required last entry */
55 static const char hns3_nic_test_strs
[][ETH_GSTRING_LEN
] = {
57 "Serdes Loopback test",
61 static const struct hclge_comm_stats_str g_all_64bit_stats_string
[] = {
62 {"igu_rx_oversize_pkt",
63 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt
)},
64 {"igu_rx_undersize_pkt",
65 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt
)},
66 {"igu_rx_out_all_pkt",
67 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt
)},
69 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt
)},
71 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt
)},
73 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt
)},
74 {"egu_tx_out_all_pkt",
75 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt
)},
77 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt
)},
79 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt
)},
81 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt
)},
82 {"ssu_ppp_mac_key_num",
83 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num
)},
84 {"ssu_ppp_host_key_num",
85 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num
)},
86 {"ppp_ssu_mac_rlt_num",
87 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num
)},
88 {"ppp_ssu_host_rlt_num",
89 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num
)},
91 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num
)},
93 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num
)},
95 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num
)},
97 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num
)}
100 static const struct hclge_comm_stats_str g_all_32bit_stats_string
[] = {
102 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt
)},
103 {"igu_rx_no_eof_pkt",
104 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt
)},
105 {"igu_rx_no_sof_pkt",
106 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt
)},
108 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt
)},
109 {"ssu_full_drop_num",
110 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num
)},
111 {"ssu_part_drop_num",
112 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num
)},
114 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num
)},
116 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num
)},
118 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num
)},
120 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt
)},
122 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt
)},
124 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt
)},
125 {"qcn_fb_invaild_cnt",
126 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt
)},
127 {"rx_packet_tc0_in_cnt",
128 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt
)},
129 {"rx_packet_tc1_in_cnt",
130 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt
)},
131 {"rx_packet_tc2_in_cnt",
132 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt
)},
133 {"rx_packet_tc3_in_cnt",
134 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt
)},
135 {"rx_packet_tc4_in_cnt",
136 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt
)},
137 {"rx_packet_tc5_in_cnt",
138 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt
)},
139 {"rx_packet_tc6_in_cnt",
140 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt
)},
141 {"rx_packet_tc7_in_cnt",
142 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt
)},
143 {"rx_packet_tc0_out_cnt",
144 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt
)},
145 {"rx_packet_tc1_out_cnt",
146 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt
)},
147 {"rx_packet_tc2_out_cnt",
148 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt
)},
149 {"rx_packet_tc3_out_cnt",
150 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt
)},
151 {"rx_packet_tc4_out_cnt",
152 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt
)},
153 {"rx_packet_tc5_out_cnt",
154 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt
)},
155 {"rx_packet_tc6_out_cnt",
156 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt
)},
157 {"rx_packet_tc7_out_cnt",
158 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt
)},
159 {"tx_packet_tc0_in_cnt",
160 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt
)},
161 {"tx_packet_tc1_in_cnt",
162 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt
)},
163 {"tx_packet_tc2_in_cnt",
164 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt
)},
165 {"tx_packet_tc3_in_cnt",
166 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt
)},
167 {"tx_packet_tc4_in_cnt",
168 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt
)},
169 {"tx_packet_tc5_in_cnt",
170 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt
)},
171 {"tx_packet_tc6_in_cnt",
172 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt
)},
173 {"tx_packet_tc7_in_cnt",
174 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt
)},
175 {"tx_packet_tc0_out_cnt",
176 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt
)},
177 {"tx_packet_tc1_out_cnt",
178 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt
)},
179 {"tx_packet_tc2_out_cnt",
180 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt
)},
181 {"tx_packet_tc3_out_cnt",
182 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt
)},
183 {"tx_packet_tc4_out_cnt",
184 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt
)},
185 {"tx_packet_tc5_out_cnt",
186 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt
)},
187 {"tx_packet_tc6_out_cnt",
188 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt
)},
189 {"tx_packet_tc7_out_cnt",
190 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt
)},
191 {"pkt_curr_buf_tc0_cnt",
192 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt
)},
193 {"pkt_curr_buf_tc1_cnt",
194 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt
)},
195 {"pkt_curr_buf_tc2_cnt",
196 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt
)},
197 {"pkt_curr_buf_tc3_cnt",
198 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt
)},
199 {"pkt_curr_buf_tc4_cnt",
200 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt
)},
201 {"pkt_curr_buf_tc5_cnt",
202 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt
)},
203 {"pkt_curr_buf_tc6_cnt",
204 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt
)},
205 {"pkt_curr_buf_tc7_cnt",
206 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt
)},
208 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num
)},
209 {"lo_pri_unicast_rlt_drop_num",
210 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num
)},
211 {"hi_pri_multicast_rlt_drop_num",
212 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num
)},
213 {"lo_pri_multicast_rlt_drop_num",
214 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num
)},
215 {"rx_oq_drop_pkt_cnt",
216 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt
)},
217 {"tx_oq_drop_pkt_cnt",
218 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt
)},
219 {"nic_l2_err_drop_pkt_cnt",
220 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt
)},
221 {"roc_l2_err_drop_pkt_cnt",
222 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt
)}
225 static const struct hclge_comm_stats_str g_mac_stats_string
[] = {
226 {"mac_tx_mac_pause_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num
)},
228 {"mac_rx_mac_pause_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num
)},
230 {"mac_tx_pfc_pri0_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num
)},
232 {"mac_tx_pfc_pri1_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num
)},
234 {"mac_tx_pfc_pri2_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num
)},
236 {"mac_tx_pfc_pri3_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num
)},
238 {"mac_tx_pfc_pri4_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num
)},
240 {"mac_tx_pfc_pri5_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num
)},
242 {"mac_tx_pfc_pri6_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num
)},
244 {"mac_tx_pfc_pri7_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num
)},
246 {"mac_rx_pfc_pri0_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num
)},
248 {"mac_rx_pfc_pri1_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num
)},
250 {"mac_rx_pfc_pri2_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num
)},
252 {"mac_rx_pfc_pri3_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num
)},
254 {"mac_rx_pfc_pri4_pkt_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num
)},
256 {"mac_rx_pfc_pri5_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num
)},
258 {"mac_rx_pfc_pri6_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num
)},
260 {"mac_rx_pfc_pri7_pkt_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num
)},
262 {"mac_tx_total_pkt_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num
)},
264 {"mac_tx_total_oct_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num
)},
266 {"mac_tx_good_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num
)},
268 {"mac_tx_bad_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num
)},
270 {"mac_tx_good_oct_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num
)},
272 {"mac_tx_bad_oct_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num
)},
274 {"mac_tx_uni_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num
)},
276 {"mac_tx_multi_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num
)},
278 {"mac_tx_broad_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num
)},
280 {"mac_tx_undersize_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num
)},
282 {"mac_tx_overrsize_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num
)},
284 {"mac_tx_64_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num
)},
286 {"mac_tx_65_127_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num
)},
288 {"mac_tx_128_255_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num
)},
290 {"mac_tx_256_511_oct_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num
)},
292 {"mac_tx_512_1023_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num
)},
294 {"mac_tx_1024_1518_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num
)},
296 {"mac_tx_1519_max_oct_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num
)},
298 {"mac_rx_total_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num
)},
300 {"mac_rx_total_oct_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num
)},
302 {"mac_rx_good_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num
)},
304 {"mac_rx_bad_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num
)},
306 {"mac_rx_good_oct_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num
)},
308 {"mac_rx_bad_oct_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num
)},
310 {"mac_rx_uni_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num
)},
312 {"mac_rx_multi_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num
)},
314 {"mac_rx_broad_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num
)},
316 {"mac_rx_undersize_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num
)},
318 {"mac_rx_overrsize_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num
)},
320 {"mac_rx_64_oct_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num
)},
322 {"mac_rx_65_127_oct_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num
)},
324 {"mac_rx_128_255_oct_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num
)},
326 {"mac_rx_256_511_oct_pkt_num",
327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num
)},
328 {"mac_rx_512_1023_oct_pkt_num",
329 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num
)},
330 {"mac_rx_1024_1518_oct_pkt_num",
331 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num
)},
332 {"mac_rx_1519_max_oct_pkt_num",
333 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num
)},
335 {"mac_trans_fragment_pkt_num",
336 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num
)},
337 {"mac_trans_undermin_pkt_num",
338 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num
)},
339 {"mac_trans_jabber_pkt_num",
340 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num
)},
341 {"mac_trans_err_all_pkt_num",
342 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num
)},
343 {"mac_trans_from_app_good_pkt_num",
344 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num
)},
345 {"mac_trans_from_app_bad_pkt_num",
346 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num
)},
347 {"mac_rcv_fragment_pkt_num",
348 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num
)},
349 {"mac_rcv_undermin_pkt_num",
350 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num
)},
351 {"mac_rcv_jabber_pkt_num",
352 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num
)},
353 {"mac_rcv_fcs_err_pkt_num",
354 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num
)},
355 {"mac_rcv_send_app_good_pkt_num",
356 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num
)},
357 {"mac_rcv_send_app_bad_pkt_num",
358 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num
)}
361 static int hclge_64_bit_update_stats(struct hclge_dev
*hdev
)
363 #define HCLGE_64_BIT_CMD_NUM 5
364 #define HCLGE_64_BIT_RTN_DATANUM 4
365 u64
*data
= (u64
*)(&hdev
->hw_stats
.all_64_bit_stats
);
366 struct hclge_desc desc
[HCLGE_64_BIT_CMD_NUM
];
371 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_64_BIT
, true);
372 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_64_BIT_CMD_NUM
);
374 dev_err(&hdev
->pdev
->dev
,
375 "Get 64 bit pkt stats fail, status = %d.\n", ret
);
379 for (i
= 0; i
< HCLGE_64_BIT_CMD_NUM
; i
++) {
380 if (unlikely(i
== 0)) {
381 desc_data
= (__le64
*)(&desc
[i
].data
[0]);
382 n
= HCLGE_64_BIT_RTN_DATANUM
- 1;
384 desc_data
= (__le64
*)(&desc
[i
]);
385 n
= HCLGE_64_BIT_RTN_DATANUM
;
387 for (k
= 0; k
< n
; k
++) {
388 *data
++ += le64_to_cpu(*desc_data
);
396 static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats
*stats
)
398 stats
->pkt_curr_buf_cnt
= 0;
399 stats
->pkt_curr_buf_tc0_cnt
= 0;
400 stats
->pkt_curr_buf_tc1_cnt
= 0;
401 stats
->pkt_curr_buf_tc2_cnt
= 0;
402 stats
->pkt_curr_buf_tc3_cnt
= 0;
403 stats
->pkt_curr_buf_tc4_cnt
= 0;
404 stats
->pkt_curr_buf_tc5_cnt
= 0;
405 stats
->pkt_curr_buf_tc6_cnt
= 0;
406 stats
->pkt_curr_buf_tc7_cnt
= 0;
409 static int hclge_32_bit_update_stats(struct hclge_dev
*hdev
)
411 #define HCLGE_32_BIT_CMD_NUM 8
412 #define HCLGE_32_BIT_RTN_DATANUM 8
414 struct hclge_desc desc
[HCLGE_32_BIT_CMD_NUM
];
415 struct hclge_32_bit_stats
*all_32_bit_stats
;
421 all_32_bit_stats
= &hdev
->hw_stats
.all_32_bit_stats
;
422 data
= (u64
*)(&all_32_bit_stats
->egu_tx_1588_pkt
);
424 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_32_BIT
, true);
425 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_32_BIT_CMD_NUM
);
427 dev_err(&hdev
->pdev
->dev
,
428 "Get 32 bit pkt stats fail, status = %d.\n", ret
);
433 hclge_reset_partial_32bit_counter(all_32_bit_stats
);
434 for (i
= 0; i
< HCLGE_32_BIT_CMD_NUM
; i
++) {
435 if (unlikely(i
== 0)) {
436 __le16
*desc_data_16bit
;
438 all_32_bit_stats
->igu_rx_err_pkt
+=
439 le32_to_cpu(desc
[i
].data
[0]);
441 desc_data_16bit
= (__le16
*)&desc
[i
].data
[1];
442 all_32_bit_stats
->igu_rx_no_eof_pkt
+=
443 le16_to_cpu(*desc_data_16bit
);
446 all_32_bit_stats
->igu_rx_no_sof_pkt
+=
447 le16_to_cpu(*desc_data_16bit
);
449 desc_data
= &desc
[i
].data
[2];
450 n
= HCLGE_32_BIT_RTN_DATANUM
- 4;
452 desc_data
= (__le32
*)&desc
[i
];
453 n
= HCLGE_32_BIT_RTN_DATANUM
;
455 for (k
= 0; k
< n
; k
++) {
456 *data
++ += le32_to_cpu(*desc_data
);
464 static int hclge_mac_update_stats(struct hclge_dev
*hdev
)
466 #define HCLGE_MAC_CMD_NUM 17
467 #define HCLGE_RTN_DATA_NUM 4
469 u64
*data
= (u64
*)(&hdev
->hw_stats
.mac_stats
);
470 struct hclge_desc desc
[HCLGE_MAC_CMD_NUM
];
475 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_MAC
, true);
476 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_MAC_CMD_NUM
);
478 dev_err(&hdev
->pdev
->dev
,
479 "Get MAC pkt stats fail, status = %d.\n", ret
);
484 for (i
= 0; i
< HCLGE_MAC_CMD_NUM
; i
++) {
485 if (unlikely(i
== 0)) {
486 desc_data
= (__le64
*)(&desc
[i
].data
[0]);
487 n
= HCLGE_RTN_DATA_NUM
- 2;
489 desc_data
= (__le64
*)(&desc
[i
]);
490 n
= HCLGE_RTN_DATA_NUM
;
492 for (k
= 0; k
< n
; k
++) {
493 *data
++ += le64_to_cpu(*desc_data
);
501 static int hclge_tqps_update_stats(struct hnae3_handle
*handle
)
503 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
504 struct hclge_vport
*vport
= hclge_get_vport(handle
);
505 struct hclge_dev
*hdev
= vport
->back
;
506 struct hnae3_queue
*queue
;
507 struct hclge_desc desc
[1];
508 struct hclge_tqp
*tqp
;
511 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
512 queue
= handle
->kinfo
.tqp
[i
];
513 tqp
= container_of(queue
, struct hclge_tqp
, q
);
514 /* command : HCLGE_OPC_QUERY_IGU_STAT */
515 hclge_cmd_setup_basic_desc(&desc
[0],
516 HCLGE_OPC_QUERY_RX_STATUS
,
519 desc
[0].data
[0] = cpu_to_le32((tqp
->index
& 0x1ff));
520 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
522 dev_err(&hdev
->pdev
->dev
,
523 "Query tqp stat fail, status = %d,queue = %d\n",
527 tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
+=
528 le32_to_cpu(desc
[0].data
[4]);
531 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
532 queue
= handle
->kinfo
.tqp
[i
];
533 tqp
= container_of(queue
, struct hclge_tqp
, q
);
534 /* command : HCLGE_OPC_QUERY_IGU_STAT */
535 hclge_cmd_setup_basic_desc(&desc
[0],
536 HCLGE_OPC_QUERY_TX_STATUS
,
539 desc
[0].data
[0] = cpu_to_le32((tqp
->index
& 0x1ff));
540 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
542 dev_err(&hdev
->pdev
->dev
,
543 "Query tqp stat fail, status = %d,queue = %d\n",
547 tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
+=
548 le32_to_cpu(desc
[0].data
[4]);
554 static u64
*hclge_tqps_get_stats(struct hnae3_handle
*handle
, u64
*data
)
556 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
557 struct hclge_tqp
*tqp
;
561 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
562 tqp
= container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
563 *buff
++ = tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
;
566 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
567 tqp
= container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
568 *buff
++ = tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
;
574 static int hclge_tqps_get_sset_count(struct hnae3_handle
*handle
, int stringset
)
576 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
578 return kinfo
->num_tqps
* (2);
581 static u8
*hclge_tqps_get_strings(struct hnae3_handle
*handle
, u8
*data
)
583 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
587 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
588 struct hclge_tqp
*tqp
= container_of(handle
->kinfo
.tqp
[i
],
589 struct hclge_tqp
, q
);
590 snprintf(buff
, ETH_GSTRING_LEN
, "rcb_q%d_tx_pktnum_rcd",
592 buff
= buff
+ ETH_GSTRING_LEN
;
595 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
596 struct hclge_tqp
*tqp
= container_of(kinfo
->tqp
[i
],
597 struct hclge_tqp
, q
);
598 snprintf(buff
, ETH_GSTRING_LEN
, "rcb_q%d_rx_pktnum_rcd",
600 buff
= buff
+ ETH_GSTRING_LEN
;
606 static u64
*hclge_comm_get_stats(void *comm_stats
,
607 const struct hclge_comm_stats_str strs
[],
613 for (i
= 0; i
< size
; i
++)
614 buf
[i
] = HCLGE_STATS_READ(comm_stats
, strs
[i
].offset
);
619 static u8
*hclge_comm_get_strings(u32 stringset
,
620 const struct hclge_comm_stats_str strs
[],
623 char *buff
= (char *)data
;
626 if (stringset
!= ETH_SS_STATS
)
629 for (i
= 0; i
< size
; i
++) {
630 snprintf(buff
, ETH_GSTRING_LEN
,
632 buff
= buff
+ ETH_GSTRING_LEN
;
638 static void hclge_update_netstat(struct hclge_hw_stats
*hw_stats
,
639 struct net_device_stats
*net_stats
)
641 net_stats
->tx_dropped
= 0;
642 net_stats
->rx_dropped
= hw_stats
->all_32_bit_stats
.ssu_full_drop_num
;
643 net_stats
->rx_dropped
+= hw_stats
->all_32_bit_stats
.ppp_key_drop_num
;
644 net_stats
->rx_dropped
+= hw_stats
->all_32_bit_stats
.ssu_key_drop_num
;
646 net_stats
->rx_errors
= hw_stats
->mac_stats
.mac_rx_overrsize_pkt_num
;
647 net_stats
->rx_errors
+= hw_stats
->mac_stats
.mac_rx_undersize_pkt_num
;
648 net_stats
->rx_errors
+= hw_stats
->all_32_bit_stats
.igu_rx_err_pkt
;
649 net_stats
->rx_errors
+= hw_stats
->all_32_bit_stats
.igu_rx_no_eof_pkt
;
650 net_stats
->rx_errors
+= hw_stats
->all_32_bit_stats
.igu_rx_no_sof_pkt
;
651 net_stats
->rx_errors
+= hw_stats
->mac_stats
.mac_rcv_fcs_err_pkt_num
;
653 net_stats
->multicast
= hw_stats
->mac_stats
.mac_tx_multi_pkt_num
;
654 net_stats
->multicast
+= hw_stats
->mac_stats
.mac_rx_multi_pkt_num
;
656 net_stats
->rx_crc_errors
= hw_stats
->mac_stats
.mac_rcv_fcs_err_pkt_num
;
657 net_stats
->rx_length_errors
=
658 hw_stats
->mac_stats
.mac_rx_undersize_pkt_num
;
659 net_stats
->rx_length_errors
+=
660 hw_stats
->mac_stats
.mac_rx_overrsize_pkt_num
;
661 net_stats
->rx_over_errors
=
662 hw_stats
->mac_stats
.mac_rx_overrsize_pkt_num
;
665 static void hclge_update_stats_for_all(struct hclge_dev
*hdev
)
667 struct hnae3_handle
*handle
;
670 handle
= &hdev
->vport
[0].nic
;
671 if (handle
->client
) {
672 status
= hclge_tqps_update_stats(handle
);
674 dev_err(&hdev
->pdev
->dev
,
675 "Update TQPS stats fail, status = %d.\n",
680 status
= hclge_mac_update_stats(hdev
);
682 dev_err(&hdev
->pdev
->dev
,
683 "Update MAC stats fail, status = %d.\n", status
);
685 status
= hclge_32_bit_update_stats(hdev
);
687 dev_err(&hdev
->pdev
->dev
,
688 "Update 32 bit stats fail, status = %d.\n",
691 hclge_update_netstat(&hdev
->hw_stats
, &handle
->kinfo
.netdev
->stats
);
694 static void hclge_update_stats(struct hnae3_handle
*handle
,
695 struct net_device_stats
*net_stats
)
697 struct hclge_vport
*vport
= hclge_get_vport(handle
);
698 struct hclge_dev
*hdev
= vport
->back
;
699 struct hclge_hw_stats
*hw_stats
= &hdev
->hw_stats
;
702 status
= hclge_mac_update_stats(hdev
);
704 dev_err(&hdev
->pdev
->dev
,
705 "Update MAC stats fail, status = %d.\n",
708 status
= hclge_32_bit_update_stats(hdev
);
710 dev_err(&hdev
->pdev
->dev
,
711 "Update 32 bit stats fail, status = %d.\n",
714 status
= hclge_64_bit_update_stats(hdev
);
716 dev_err(&hdev
->pdev
->dev
,
717 "Update 64 bit stats fail, status = %d.\n",
720 status
= hclge_tqps_update_stats(handle
);
722 dev_err(&hdev
->pdev
->dev
,
723 "Update TQPS stats fail, status = %d.\n",
726 hclge_update_netstat(hw_stats
, net_stats
);
729 static int hclge_get_sset_count(struct hnae3_handle
*handle
, int stringset
)
731 #define HCLGE_LOOPBACK_TEST_FLAGS 0x7
733 struct hclge_vport
*vport
= hclge_get_vport(handle
);
734 struct hclge_dev
*hdev
= vport
->back
;
737 /* Loopback test support rules:
738 * mac: only GE mode support
739 * serdes: all mac mode will support include GE/XGE/LGE/CGE
740 * phy: only support when phy device exist on board
742 if (stringset
== ETH_SS_TEST
) {
743 /* clear loopback bit flags at first */
744 handle
->flags
= (handle
->flags
& (~HCLGE_LOOPBACK_TEST_FLAGS
));
745 if (hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_10M
||
746 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_100M
||
747 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_1G
) {
749 handle
->flags
|= HNAE3_SUPPORT_MAC_LOOPBACK
;
753 } else if (stringset
== ETH_SS_STATS
) {
754 count
= ARRAY_SIZE(g_mac_stats_string
) +
755 ARRAY_SIZE(g_all_32bit_stats_string
) +
756 ARRAY_SIZE(g_all_64bit_stats_string
) +
757 hclge_tqps_get_sset_count(handle
, stringset
);
763 static void hclge_get_strings(struct hnae3_handle
*handle
,
767 u8
*p
= (char *)data
;
770 if (stringset
== ETH_SS_STATS
) {
771 size
= ARRAY_SIZE(g_mac_stats_string
);
772 p
= hclge_comm_get_strings(stringset
,
776 size
= ARRAY_SIZE(g_all_32bit_stats_string
);
777 p
= hclge_comm_get_strings(stringset
,
778 g_all_32bit_stats_string
,
781 size
= ARRAY_SIZE(g_all_64bit_stats_string
);
782 p
= hclge_comm_get_strings(stringset
,
783 g_all_64bit_stats_string
,
786 p
= hclge_tqps_get_strings(handle
, p
);
787 } else if (stringset
== ETH_SS_TEST
) {
788 if (handle
->flags
& HNAE3_SUPPORT_MAC_LOOPBACK
) {
790 hns3_nic_test_strs
[HNAE3_MAC_INTER_LOOP_MAC
],
792 p
+= ETH_GSTRING_LEN
;
794 if (handle
->flags
& HNAE3_SUPPORT_SERDES_LOOPBACK
) {
796 hns3_nic_test_strs
[HNAE3_MAC_INTER_LOOP_SERDES
],
798 p
+= ETH_GSTRING_LEN
;
800 if (handle
->flags
& HNAE3_SUPPORT_PHY_LOOPBACK
) {
802 hns3_nic_test_strs
[HNAE3_MAC_INTER_LOOP_PHY
],
804 p
+= ETH_GSTRING_LEN
;
809 static void hclge_get_stats(struct hnae3_handle
*handle
, u64
*data
)
811 struct hclge_vport
*vport
= hclge_get_vport(handle
);
812 struct hclge_dev
*hdev
= vport
->back
;
815 p
= hclge_comm_get_stats(&hdev
->hw_stats
.mac_stats
,
817 ARRAY_SIZE(g_mac_stats_string
),
819 p
= hclge_comm_get_stats(&hdev
->hw_stats
.all_32_bit_stats
,
820 g_all_32bit_stats_string
,
821 ARRAY_SIZE(g_all_32bit_stats_string
),
823 p
= hclge_comm_get_stats(&hdev
->hw_stats
.all_64_bit_stats
,
824 g_all_64bit_stats_string
,
825 ARRAY_SIZE(g_all_64bit_stats_string
),
827 p
= hclge_tqps_get_stats(handle
, p
);
830 static int hclge_parse_func_status(struct hclge_dev
*hdev
,
831 struct hclge_func_status_cmd
*status
)
833 if (!(status
->pf_state
& HCLGE_PF_STATE_DONE
))
836 /* Set the pf to main pf */
837 if (status
->pf_state
& HCLGE_PF_STATE_MAIN
)
838 hdev
->flag
|= HCLGE_FLAG_MAIN
;
840 hdev
->flag
&= ~HCLGE_FLAG_MAIN
;
845 static int hclge_query_function_status(struct hclge_dev
*hdev
)
847 struct hclge_func_status_cmd
*req
;
848 struct hclge_desc desc
;
852 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_FUNC_STATUS
, true);
853 req
= (struct hclge_func_status_cmd
*)desc
.data
;
856 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
858 dev_err(&hdev
->pdev
->dev
,
859 "query function status failed %d.\n",
865 /* Check pf reset is done */
868 usleep_range(1000, 2000);
869 } while (timeout
++ < 5);
871 ret
= hclge_parse_func_status(hdev
, req
);
876 static int hclge_query_pf_resource(struct hclge_dev
*hdev
)
878 struct hclge_pf_res_cmd
*req
;
879 struct hclge_desc desc
;
882 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_PF_RSRC
, true);
883 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
885 dev_err(&hdev
->pdev
->dev
,
886 "query pf resource failed %d.\n", ret
);
890 req
= (struct hclge_pf_res_cmd
*)desc
.data
;
891 hdev
->num_tqps
= __le16_to_cpu(req
->tqp_num
);
892 hdev
->pkt_buf_size
= __le16_to_cpu(req
->buf_size
) << HCLGE_BUF_UNIT_S
;
894 if (hnae3_dev_roce_supported(hdev
)) {
896 hnae_get_field(__le16_to_cpu(req
->pf_intr_vector_number
),
897 HCLGE_PF_VEC_NUM_M
, HCLGE_PF_VEC_NUM_S
);
899 /* PF should have NIC vectors and Roce vectors,
900 * NIC vectors are queued before Roce vectors.
902 hdev
->num_msi
= hdev
->num_roce_msi
+ HCLGE_ROCE_VECTOR_OFFSET
;
905 hnae_get_field(__le16_to_cpu(req
->pf_intr_vector_number
),
906 HCLGE_PF_VEC_NUM_M
, HCLGE_PF_VEC_NUM_S
);
912 static int hclge_parse_speed(int speed_cmd
, int *speed
)
916 *speed
= HCLGE_MAC_SPEED_10M
;
919 *speed
= HCLGE_MAC_SPEED_100M
;
922 *speed
= HCLGE_MAC_SPEED_1G
;
925 *speed
= HCLGE_MAC_SPEED_10G
;
928 *speed
= HCLGE_MAC_SPEED_25G
;
931 *speed
= HCLGE_MAC_SPEED_40G
;
934 *speed
= HCLGE_MAC_SPEED_50G
;
937 *speed
= HCLGE_MAC_SPEED_100G
;
946 static void hclge_parse_cfg(struct hclge_cfg
*cfg
, struct hclge_desc
*desc
)
948 struct hclge_cfg_param_cmd
*req
;
949 u64 mac_addr_tmp_high
;
953 req
= (struct hclge_cfg_param_cmd
*)desc
[0].data
;
955 /* get the configuration */
956 cfg
->vmdq_vport_num
= hnae_get_field(__le32_to_cpu(req
->param
[0]),
959 cfg
->tc_num
= hnae_get_field(__le32_to_cpu(req
->param
[0]),
960 HCLGE_CFG_TC_NUM_M
, HCLGE_CFG_TC_NUM_S
);
961 cfg
->tqp_desc_num
= hnae_get_field(__le32_to_cpu(req
->param
[0]),
962 HCLGE_CFG_TQP_DESC_N_M
,
963 HCLGE_CFG_TQP_DESC_N_S
);
965 cfg
->phy_addr
= hnae_get_field(__le32_to_cpu(req
->param
[1]),
966 HCLGE_CFG_PHY_ADDR_M
,
967 HCLGE_CFG_PHY_ADDR_S
);
968 cfg
->media_type
= hnae_get_field(__le32_to_cpu(req
->param
[1]),
969 HCLGE_CFG_MEDIA_TP_M
,
970 HCLGE_CFG_MEDIA_TP_S
);
971 cfg
->rx_buf_len
= hnae_get_field(__le32_to_cpu(req
->param
[1]),
972 HCLGE_CFG_RX_BUF_LEN_M
,
973 HCLGE_CFG_RX_BUF_LEN_S
);
974 /* get mac_address */
975 mac_addr_tmp
= __le32_to_cpu(req
->param
[2]);
976 mac_addr_tmp_high
= hnae_get_field(__le32_to_cpu(req
->param
[3]),
977 HCLGE_CFG_MAC_ADDR_H_M
,
978 HCLGE_CFG_MAC_ADDR_H_S
);
980 mac_addr_tmp
|= (mac_addr_tmp_high
<< 31) << 1;
982 cfg
->default_speed
= hnae_get_field(__le32_to_cpu(req
->param
[3]),
983 HCLGE_CFG_DEFAULT_SPEED_M
,
984 HCLGE_CFG_DEFAULT_SPEED_S
);
985 for (i
= 0; i
< ETH_ALEN
; i
++)
986 cfg
->mac_addr
[i
] = (mac_addr_tmp
>> (8 * i
)) & 0xff;
988 req
= (struct hclge_cfg_param_cmd
*)desc
[1].data
;
989 cfg
->numa_node_map
= __le32_to_cpu(req
->param
[0]);
992 /* hclge_get_cfg: query the static parameter from flash
993 * @hdev: pointer to struct hclge_dev
994 * @hcfg: the config structure to be getted
996 static int hclge_get_cfg(struct hclge_dev
*hdev
, struct hclge_cfg
*hcfg
)
998 struct hclge_desc desc
[HCLGE_PF_CFG_DESC_NUM
];
999 struct hclge_cfg_param_cmd
*req
;
1002 for (i
= 0; i
< HCLGE_PF_CFG_DESC_NUM
; i
++) {
1005 req
= (struct hclge_cfg_param_cmd
*)desc
[i
].data
;
1006 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_GET_CFG_PARAM
,
1008 hnae_set_field(offset
, HCLGE_CFG_OFFSET_M
,
1009 HCLGE_CFG_OFFSET_S
, i
* HCLGE_CFG_RD_LEN_BYTES
);
1010 /* Len should be united by 4 bytes when send to hardware */
1011 hnae_set_field(offset
, HCLGE_CFG_RD_LEN_M
, HCLGE_CFG_RD_LEN_S
,
1012 HCLGE_CFG_RD_LEN_BYTES
/ HCLGE_CFG_RD_LEN_UNIT
);
1013 req
->offset
= cpu_to_le32(offset
);
1016 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_PF_CFG_DESC_NUM
);
1018 dev_err(&hdev
->pdev
->dev
,
1019 "get config failed %d.\n", ret
);
1023 hclge_parse_cfg(hcfg
, desc
);
1027 static int hclge_get_cap(struct hclge_dev
*hdev
)
1031 ret
= hclge_query_function_status(hdev
);
1033 dev_err(&hdev
->pdev
->dev
,
1034 "query function status error %d.\n", ret
);
1038 /* get pf resource */
1039 ret
= hclge_query_pf_resource(hdev
);
1041 dev_err(&hdev
->pdev
->dev
,
1042 "query pf resource error %d.\n", ret
);
1049 static int hclge_configure(struct hclge_dev
*hdev
)
1051 struct hclge_cfg cfg
;
1054 ret
= hclge_get_cfg(hdev
, &cfg
);
1056 dev_err(&hdev
->pdev
->dev
, "get mac mode error %d.\n", ret
);
1060 hdev
->num_vmdq_vport
= cfg
.vmdq_vport_num
;
1061 hdev
->base_tqp_pid
= 0;
1062 hdev
->rss_size_max
= 1;
1063 hdev
->rx_buf_len
= cfg
.rx_buf_len
;
1064 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, cfg
.mac_addr
);
1065 hdev
->hw
.mac
.media_type
= cfg
.media_type
;
1066 hdev
->hw
.mac
.phy_addr
= cfg
.phy_addr
;
1067 hdev
->num_desc
= cfg
.tqp_desc_num
;
1068 hdev
->tm_info
.num_pg
= 1;
1069 hdev
->tc_max
= cfg
.tc_num
;
1070 hdev
->tm_info
.hw_pfc_map
= 0;
1072 ret
= hclge_parse_speed(cfg
.default_speed
, &hdev
->hw
.mac
.speed
);
1074 dev_err(&hdev
->pdev
->dev
, "Get wrong speed ret=%d.\n", ret
);
1078 if ((hdev
->tc_max
> HNAE3_MAX_TC
) ||
1079 (hdev
->tc_max
< 1)) {
1080 dev_warn(&hdev
->pdev
->dev
, "TC num = %d.\n",
1085 /* Dev does not support DCB */
1086 if (!hnae3_dev_dcb_supported(hdev
)) {
1090 hdev
->pfc_max
= hdev
->tc_max
;
1093 hdev
->tm_info
.num_tc
= hdev
->tc_max
;
1095 /* Currently not support uncontiuous tc */
1096 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++)
1097 hnae_set_bit(hdev
->hw_tc_map
, i
, 1);
1099 if (!hdev
->num_vmdq_vport
&& !hdev
->num_req_vfs
)
1100 hdev
->tx_sch_mode
= HCLGE_FLAG_TC_BASE_SCH_MODE
;
1102 hdev
->tx_sch_mode
= HCLGE_FLAG_VNET_BASE_SCH_MODE
;
1107 static int hclge_config_tso(struct hclge_dev
*hdev
, int tso_mss_min
,
1110 struct hclge_cfg_tso_status_cmd
*req
;
1111 struct hclge_desc desc
;
1114 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TSO_GENERIC_CONFIG
, false);
1116 req
= (struct hclge_cfg_tso_status_cmd
*)desc
.data
;
1119 hnae_set_field(tso_mss
, HCLGE_TSO_MSS_MIN_M
,
1120 HCLGE_TSO_MSS_MIN_S
, tso_mss_min
);
1121 req
->tso_mss_min
= cpu_to_le16(tso_mss
);
1124 hnae_set_field(tso_mss
, HCLGE_TSO_MSS_MIN_M
,
1125 HCLGE_TSO_MSS_MIN_S
, tso_mss_max
);
1126 req
->tso_mss_max
= cpu_to_le16(tso_mss
);
1128 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1131 static int hclge_alloc_tqps(struct hclge_dev
*hdev
)
1133 struct hclge_tqp
*tqp
;
1136 hdev
->htqp
= devm_kcalloc(&hdev
->pdev
->dev
, hdev
->num_tqps
,
1137 sizeof(struct hclge_tqp
), GFP_KERNEL
);
1143 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
1144 tqp
->dev
= &hdev
->pdev
->dev
;
1147 tqp
->q
.ae_algo
= &ae_algo
;
1148 tqp
->q
.buf_size
= hdev
->rx_buf_len
;
1149 tqp
->q
.desc_num
= hdev
->num_desc
;
1150 tqp
->q
.io_base
= hdev
->hw
.io_base
+ HCLGE_TQP_REG_OFFSET
+
1151 i
* HCLGE_TQP_REG_SIZE
;
1159 static int hclge_map_tqps_to_func(struct hclge_dev
*hdev
, u16 func_id
,
1160 u16 tqp_pid
, u16 tqp_vid
, bool is_pf
)
1162 struct hclge_tqp_map_cmd
*req
;
1163 struct hclge_desc desc
;
1166 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_SET_TQP_MAP
, false);
1168 req
= (struct hclge_tqp_map_cmd
*)desc
.data
;
1169 req
->tqp_id
= cpu_to_le16(tqp_pid
);
1170 req
->tqp_vf
= func_id
;
1171 req
->tqp_flag
= !is_pf
<< HCLGE_TQP_MAP_TYPE_B
|
1172 1 << HCLGE_TQP_MAP_EN_B
;
1173 req
->tqp_vid
= cpu_to_le16(tqp_vid
);
1175 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1177 dev_err(&hdev
->pdev
->dev
, "TQP map failed %d.\n",
1185 static int hclge_assign_tqp(struct hclge_vport
*vport
,
1186 struct hnae3_queue
**tqp
, u16 num_tqps
)
1188 struct hclge_dev
*hdev
= vport
->back
;
1191 for (i
= 0, alloced
= 0; i
< hdev
->num_tqps
&&
1192 alloced
< num_tqps
; i
++) {
1193 if (!hdev
->htqp
[i
].alloced
) {
1194 hdev
->htqp
[i
].q
.handle
= &vport
->nic
;
1195 hdev
->htqp
[i
].q
.tqp_index
= alloced
;
1196 tqp
[alloced
] = &hdev
->htqp
[i
].q
;
1197 hdev
->htqp
[i
].alloced
= true;
1201 vport
->alloc_tqps
= num_tqps
;
1206 static int hclge_knic_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1208 struct hnae3_handle
*nic
= &vport
->nic
;
1209 struct hnae3_knic_private_info
*kinfo
= &nic
->kinfo
;
1210 struct hclge_dev
*hdev
= vport
->back
;
1213 kinfo
->num_desc
= hdev
->num_desc
;
1214 kinfo
->rx_buf_len
= hdev
->rx_buf_len
;
1215 kinfo
->num_tc
= min_t(u16
, num_tqps
, hdev
->tm_info
.num_tc
);
1217 = min_t(u16
, hdev
->rss_size_max
, num_tqps
/ kinfo
->num_tc
);
1218 kinfo
->num_tqps
= kinfo
->rss_size
* kinfo
->num_tc
;
1220 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
1221 if (hdev
->hw_tc_map
& BIT(i
)) {
1222 kinfo
->tc_info
[i
].enable
= true;
1223 kinfo
->tc_info
[i
].tqp_offset
= i
* kinfo
->rss_size
;
1224 kinfo
->tc_info
[i
].tqp_count
= kinfo
->rss_size
;
1225 kinfo
->tc_info
[i
].tc
= i
;
1227 /* Set to default queue if TC is disable */
1228 kinfo
->tc_info
[i
].enable
= false;
1229 kinfo
->tc_info
[i
].tqp_offset
= 0;
1230 kinfo
->tc_info
[i
].tqp_count
= 1;
1231 kinfo
->tc_info
[i
].tc
= 0;
1235 kinfo
->tqp
= devm_kcalloc(&hdev
->pdev
->dev
, kinfo
->num_tqps
,
1236 sizeof(struct hnae3_queue
*), GFP_KERNEL
);
1240 ret
= hclge_assign_tqp(vport
, kinfo
->tqp
, kinfo
->num_tqps
);
1242 dev_err(&hdev
->pdev
->dev
, "fail to assign TQPs %d.\n", ret
);
1249 static int hclge_map_tqp_to_vport(struct hclge_dev
*hdev
,
1250 struct hclge_vport
*vport
)
1252 struct hnae3_handle
*nic
= &vport
->nic
;
1253 struct hnae3_knic_private_info
*kinfo
;
1256 kinfo
= &nic
->kinfo
;
1257 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
1258 struct hclge_tqp
*q
=
1259 container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
1263 is_pf
= !(vport
->vport_id
);
1264 ret
= hclge_map_tqps_to_func(hdev
, vport
->vport_id
, q
->index
,
1273 static int hclge_map_tqp(struct hclge_dev
*hdev
)
1275 struct hclge_vport
*vport
= hdev
->vport
;
1278 num_vport
= hdev
->num_vmdq_vport
+ hdev
->num_req_vfs
+ 1;
1279 for (i
= 0; i
< num_vport
; i
++) {
1282 ret
= hclge_map_tqp_to_vport(hdev
, vport
);
1292 static void hclge_unic_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1294 /* this would be initialized later */
1297 static int hclge_vport_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1299 struct hnae3_handle
*nic
= &vport
->nic
;
1300 struct hclge_dev
*hdev
= vport
->back
;
1303 nic
->pdev
= hdev
->pdev
;
1304 nic
->ae_algo
= &ae_algo
;
1305 nic
->numa_node_mask
= hdev
->numa_node_mask
;
1307 if (hdev
->ae_dev
->dev_type
== HNAE3_DEV_KNIC
) {
1308 ret
= hclge_knic_setup(vport
, num_tqps
);
1310 dev_err(&hdev
->pdev
->dev
, "knic setup failed %d\n",
1315 hclge_unic_setup(vport
, num_tqps
);
1321 static int hclge_alloc_vport(struct hclge_dev
*hdev
)
1323 struct pci_dev
*pdev
= hdev
->pdev
;
1324 struct hclge_vport
*vport
;
1330 /* We need to alloc a vport for main NIC of PF */
1331 num_vport
= hdev
->num_vmdq_vport
+ hdev
->num_req_vfs
+ 1;
1333 if (hdev
->num_tqps
< num_vport
)
1334 num_vport
= hdev
->num_tqps
;
1336 /* Alloc the same number of TQPs for every vport */
1337 tqp_per_vport
= hdev
->num_tqps
/ num_vport
;
1338 tqp_main_vport
= tqp_per_vport
+ hdev
->num_tqps
% num_vport
;
1340 vport
= devm_kcalloc(&pdev
->dev
, num_vport
, sizeof(struct hclge_vport
),
1345 hdev
->vport
= vport
;
1346 hdev
->num_alloc_vport
= num_vport
;
1348 #ifdef CONFIG_PCI_IOV
1350 if (hdev
->num_req_vfs
) {
1351 dev_info(&pdev
->dev
, "active VFs(%d) found, enabling SRIOV\n",
1353 ret
= pci_enable_sriov(hdev
->pdev
, hdev
->num_req_vfs
);
1355 hdev
->num_alloc_vfs
= 0;
1356 dev_err(&pdev
->dev
, "SRIOV enable failed %d\n",
1361 hdev
->num_alloc_vfs
= hdev
->num_req_vfs
;
1364 for (i
= 0; i
< num_vport
; i
++) {
1366 vport
->vport_id
= i
;
1369 ret
= hclge_vport_setup(vport
, tqp_main_vport
);
1371 ret
= hclge_vport_setup(vport
, tqp_per_vport
);
1374 "vport setup failed for vport %d, %d\n",
1385 static int hclge_cmd_alloc_tx_buff(struct hclge_dev
*hdev
,
1386 struct hclge_pkt_buf_alloc
*buf_alloc
)
1388 /* TX buffer size is unit by 128 byte */
1389 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1390 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1391 struct hclge_tx_buff_alloc_cmd
*req
;
1392 struct hclge_desc desc
;
1396 req
= (struct hclge_tx_buff_alloc_cmd
*)desc
.data
;
1398 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TX_BUFF_ALLOC
, 0);
1399 for (i
= 0; i
< HCLGE_TC_NUM
; i
++) {
1400 u32 buf_size
= buf_alloc
->priv_buf
[i
].tx_buf_size
;
1402 req
->tx_pkt_buff
[i
] =
1403 cpu_to_le16((buf_size
>> HCLGE_BUF_SIZE_UNIT_SHIFT
) |
1404 HCLGE_BUF_SIZE_UPDATE_EN_MSK
);
1407 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1409 dev_err(&hdev
->pdev
->dev
, "tx buffer alloc cmd failed %d.\n",
1417 static int hclge_tx_buffer_alloc(struct hclge_dev
*hdev
,
1418 struct hclge_pkt_buf_alloc
*buf_alloc
)
1420 int ret
= hclge_cmd_alloc_tx_buff(hdev
, buf_alloc
);
1423 dev_err(&hdev
->pdev
->dev
,
1424 "tx buffer alloc failed %d\n", ret
);
1431 static int hclge_get_tc_num(struct hclge_dev
*hdev
)
1435 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1436 if (hdev
->hw_tc_map
& BIT(i
))
1441 static int hclge_get_pfc_enalbe_num(struct hclge_dev
*hdev
)
1445 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1446 if (hdev
->hw_tc_map
& BIT(i
) &&
1447 hdev
->tm_info
.hw_pfc_map
& BIT(i
))
1452 /* Get the number of pfc enabled TCs, which have private buffer */
1453 static int hclge_get_pfc_priv_num(struct hclge_dev
*hdev
,
1454 struct hclge_pkt_buf_alloc
*buf_alloc
)
1456 struct hclge_priv_buf
*priv
;
1459 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1460 priv
= &buf_alloc
->priv_buf
[i
];
1461 if ((hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1469 /* Get the number of pfc disabled TCs, which have private buffer */
1470 static int hclge_get_no_pfc_priv_num(struct hclge_dev
*hdev
,
1471 struct hclge_pkt_buf_alloc
*buf_alloc
)
1473 struct hclge_priv_buf
*priv
;
1476 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1477 priv
= &buf_alloc
->priv_buf
[i
];
1478 if (hdev
->hw_tc_map
& BIT(i
) &&
1479 !(hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1487 static u32
hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc
*buf_alloc
)
1489 struct hclge_priv_buf
*priv
;
1493 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1494 priv
= &buf_alloc
->priv_buf
[i
];
1496 rx_priv
+= priv
->buf_size
;
1501 static u32
hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc
*buf_alloc
)
1503 u32 i
, total_tx_size
= 0;
1505 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1506 total_tx_size
+= buf_alloc
->priv_buf
[i
].tx_buf_size
;
1508 return total_tx_size
;
1511 static bool hclge_is_rx_buf_ok(struct hclge_dev
*hdev
,
1512 struct hclge_pkt_buf_alloc
*buf_alloc
,
1515 u32 shared_buf_min
, shared_buf_tc
, shared_std
;
1516 int tc_num
, pfc_enable_num
;
1521 tc_num
= hclge_get_tc_num(hdev
);
1522 pfc_enable_num
= hclge_get_pfc_enalbe_num(hdev
);
1524 if (hnae3_dev_dcb_supported(hdev
))
1525 shared_buf_min
= 2 * hdev
->mps
+ HCLGE_DEFAULT_DV
;
1527 shared_buf_min
= 2 * hdev
->mps
+ HCLGE_DEFAULT_NON_DCB_DV
;
1529 shared_buf_tc
= pfc_enable_num
* hdev
->mps
+
1530 (tc_num
- pfc_enable_num
) * hdev
->mps
/ 2 +
1532 shared_std
= max_t(u32
, shared_buf_min
, shared_buf_tc
);
1534 rx_priv
= hclge_get_rx_priv_buff_alloced(buf_alloc
);
1535 if (rx_all
<= rx_priv
+ shared_std
)
1538 shared_buf
= rx_all
- rx_priv
;
1539 buf_alloc
->s_buf
.buf_size
= shared_buf
;
1540 buf_alloc
->s_buf
.self
.high
= shared_buf
;
1541 buf_alloc
->s_buf
.self
.low
= 2 * hdev
->mps
;
1543 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1544 if ((hdev
->hw_tc_map
& BIT(i
)) &&
1545 (hdev
->tm_info
.hw_pfc_map
& BIT(i
))) {
1546 buf_alloc
->s_buf
.tc_thrd
[i
].low
= hdev
->mps
;
1547 buf_alloc
->s_buf
.tc_thrd
[i
].high
= 2 * hdev
->mps
;
1549 buf_alloc
->s_buf
.tc_thrd
[i
].low
= 0;
1550 buf_alloc
->s_buf
.tc_thrd
[i
].high
= hdev
->mps
;
1557 static int hclge_tx_buffer_calc(struct hclge_dev
*hdev
,
1558 struct hclge_pkt_buf_alloc
*buf_alloc
)
1562 total_size
= hdev
->pkt_buf_size
;
1564 /* alloc tx buffer for all enabled tc */
1565 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1566 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
1568 if (total_size
< HCLGE_DEFAULT_TX_BUF
)
1571 if (hdev
->hw_tc_map
& BIT(i
))
1572 priv
->tx_buf_size
= HCLGE_DEFAULT_TX_BUF
;
1574 priv
->tx_buf_size
= 0;
1576 total_size
-= priv
->tx_buf_size
;
1582 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1583 * @hdev: pointer to struct hclge_dev
1584 * @buf_alloc: pointer to buffer calculation data
1585 * @return: 0: calculate sucessful, negative: fail
1587 static int hclge_rx_buffer_calc(struct hclge_dev
*hdev
,
1588 struct hclge_pkt_buf_alloc
*buf_alloc
)
1590 u32 rx_all
= hdev
->pkt_buf_size
;
1591 int no_pfc_priv_num
, pfc_priv_num
;
1592 struct hclge_priv_buf
*priv
;
1595 rx_all
-= hclge_get_tx_buff_alloced(buf_alloc
);
1597 /* When DCB is not supported, rx private
1598 * buffer is not allocated.
1600 if (!hnae3_dev_dcb_supported(hdev
)) {
1601 if (!hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1607 /* step 1, try to alloc private buffer for all enabled tc */
1608 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1609 priv
= &buf_alloc
->priv_buf
[i
];
1610 if (hdev
->hw_tc_map
& BIT(i
)) {
1612 if (hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1613 priv
->wl
.low
= hdev
->mps
;
1614 priv
->wl
.high
= priv
->wl
.low
+ hdev
->mps
;
1615 priv
->buf_size
= priv
->wl
.high
+
1619 priv
->wl
.high
= 2 * hdev
->mps
;
1620 priv
->buf_size
= priv
->wl
.high
;
1630 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1633 /* step 2, try to decrease the buffer size of
1634 * no pfc TC's private buffer
1636 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1637 priv
= &buf_alloc
->priv_buf
[i
];
1644 if (!(hdev
->hw_tc_map
& BIT(i
)))
1649 if (hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1651 priv
->wl
.high
= priv
->wl
.low
+ hdev
->mps
;
1652 priv
->buf_size
= priv
->wl
.high
+ HCLGE_DEFAULT_DV
;
1655 priv
->wl
.high
= hdev
->mps
;
1656 priv
->buf_size
= priv
->wl
.high
;
1660 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1663 /* step 3, try to reduce the number of pfc disabled TCs,
1664 * which have private buffer
1666 /* get the total no pfc enable TC number, which have private buffer */
1667 no_pfc_priv_num
= hclge_get_no_pfc_priv_num(hdev
, buf_alloc
);
1669 /* let the last to be cleared first */
1670 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
1671 priv
= &buf_alloc
->priv_buf
[i
];
1673 if (hdev
->hw_tc_map
& BIT(i
) &&
1674 !(hdev
->tm_info
.hw_pfc_map
& BIT(i
))) {
1675 /* Clear the no pfc TC private buffer */
1683 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
) ||
1684 no_pfc_priv_num
== 0)
1688 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1691 /* step 4, try to reduce the number of pfc enabled TCs
1692 * which have private buffer.
1694 pfc_priv_num
= hclge_get_pfc_priv_num(hdev
, buf_alloc
);
1696 /* let the last to be cleared first */
1697 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
1698 priv
= &buf_alloc
->priv_buf
[i
];
1700 if (hdev
->hw_tc_map
& BIT(i
) &&
1701 hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1702 /* Reduce the number of pfc TC with private buffer */
1710 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
) ||
1714 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1720 static int hclge_rx_priv_buf_alloc(struct hclge_dev
*hdev
,
1721 struct hclge_pkt_buf_alloc
*buf_alloc
)
1723 struct hclge_rx_priv_buff_cmd
*req
;
1724 struct hclge_desc desc
;
1728 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_PRIV_BUFF_ALLOC
, false);
1729 req
= (struct hclge_rx_priv_buff_cmd
*)desc
.data
;
1731 /* Alloc private buffer TCs */
1732 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1733 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
1736 cpu_to_le16(priv
->buf_size
>> HCLGE_BUF_UNIT_S
);
1738 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B
);
1742 cpu_to_le16((buf_alloc
->s_buf
.buf_size
>> HCLGE_BUF_UNIT_S
) |
1743 (1 << HCLGE_TC0_PRI_BUF_EN_B
));
1745 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1747 dev_err(&hdev
->pdev
->dev
,
1748 "rx private buffer alloc cmd failed %d\n", ret
);
1755 #define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
1757 static int hclge_rx_priv_wl_config(struct hclge_dev
*hdev
,
1758 struct hclge_pkt_buf_alloc
*buf_alloc
)
1760 struct hclge_rx_priv_wl_buf
*req
;
1761 struct hclge_priv_buf
*priv
;
1762 struct hclge_desc desc
[2];
1766 for (i
= 0; i
< 2; i
++) {
1767 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_RX_PRIV_WL_ALLOC
,
1769 req
= (struct hclge_rx_priv_wl_buf
*)desc
[i
].data
;
1771 /* The first descriptor set the NEXT bit to 1 */
1773 desc
[i
].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1775 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1777 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
1778 u32 idx
= i
* HCLGE_TC_NUM_ONE_DESC
+ j
;
1780 priv
= &buf_alloc
->priv_buf
[idx
];
1781 req
->tc_wl
[j
].high
=
1782 cpu_to_le16(priv
->wl
.high
>> HCLGE_BUF_UNIT_S
);
1783 req
->tc_wl
[j
].high
|=
1784 cpu_to_le16(HCLGE_PRIV_ENABLE(priv
->wl
.high
) <<
1785 HCLGE_RX_PRIV_EN_B
);
1787 cpu_to_le16(priv
->wl
.low
>> HCLGE_BUF_UNIT_S
);
1788 req
->tc_wl
[j
].low
|=
1789 cpu_to_le16(HCLGE_PRIV_ENABLE(priv
->wl
.low
) <<
1790 HCLGE_RX_PRIV_EN_B
);
1794 /* Send 2 descriptor at one time */
1795 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
1797 dev_err(&hdev
->pdev
->dev
,
1798 "rx private waterline config cmd failed %d\n",
1805 static int hclge_common_thrd_config(struct hclge_dev
*hdev
,
1806 struct hclge_pkt_buf_alloc
*buf_alloc
)
1808 struct hclge_shared_buf
*s_buf
= &buf_alloc
->s_buf
;
1809 struct hclge_rx_com_thrd
*req
;
1810 struct hclge_desc desc
[2];
1811 struct hclge_tc_thrd
*tc
;
1815 for (i
= 0; i
< 2; i
++) {
1816 hclge_cmd_setup_basic_desc(&desc
[i
],
1817 HCLGE_OPC_RX_COM_THRD_ALLOC
, false);
1818 req
= (struct hclge_rx_com_thrd
*)&desc
[i
].data
;
1820 /* The first descriptor set the NEXT bit to 1 */
1822 desc
[i
].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1824 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1826 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
1827 tc
= &s_buf
->tc_thrd
[i
* HCLGE_TC_NUM_ONE_DESC
+ j
];
1829 req
->com_thrd
[j
].high
=
1830 cpu_to_le16(tc
->high
>> HCLGE_BUF_UNIT_S
);
1831 req
->com_thrd
[j
].high
|=
1832 cpu_to_le16(HCLGE_PRIV_ENABLE(tc
->high
) <<
1833 HCLGE_RX_PRIV_EN_B
);
1834 req
->com_thrd
[j
].low
=
1835 cpu_to_le16(tc
->low
>> HCLGE_BUF_UNIT_S
);
1836 req
->com_thrd
[j
].low
|=
1837 cpu_to_le16(HCLGE_PRIV_ENABLE(tc
->low
) <<
1838 HCLGE_RX_PRIV_EN_B
);
1842 /* Send 2 descriptors at one time */
1843 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
1845 dev_err(&hdev
->pdev
->dev
,
1846 "common threshold config cmd failed %d\n", ret
);
1852 static int hclge_common_wl_config(struct hclge_dev
*hdev
,
1853 struct hclge_pkt_buf_alloc
*buf_alloc
)
1855 struct hclge_shared_buf
*buf
= &buf_alloc
->s_buf
;
1856 struct hclge_rx_com_wl
*req
;
1857 struct hclge_desc desc
;
1860 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_COM_WL_ALLOC
, false);
1862 req
= (struct hclge_rx_com_wl
*)desc
.data
;
1863 req
->com_wl
.high
= cpu_to_le16(buf
->self
.high
>> HCLGE_BUF_UNIT_S
);
1865 cpu_to_le16(HCLGE_PRIV_ENABLE(buf
->self
.high
) <<
1866 HCLGE_RX_PRIV_EN_B
);
1868 req
->com_wl
.low
= cpu_to_le16(buf
->self
.low
>> HCLGE_BUF_UNIT_S
);
1870 cpu_to_le16(HCLGE_PRIV_ENABLE(buf
->self
.low
) <<
1871 HCLGE_RX_PRIV_EN_B
);
1873 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1875 dev_err(&hdev
->pdev
->dev
,
1876 "common waterline config cmd failed %d\n", ret
);
1883 int hclge_buffer_alloc(struct hclge_dev
*hdev
)
1885 struct hclge_pkt_buf_alloc
*pkt_buf
;
1888 pkt_buf
= kzalloc(sizeof(*pkt_buf
), GFP_KERNEL
);
1892 ret
= hclge_tx_buffer_calc(hdev
, pkt_buf
);
1894 dev_err(&hdev
->pdev
->dev
,
1895 "could not calc tx buffer size for all TCs %d\n", ret
);
1899 ret
= hclge_tx_buffer_alloc(hdev
, pkt_buf
);
1901 dev_err(&hdev
->pdev
->dev
,
1902 "could not alloc tx buffers %d\n", ret
);
1906 ret
= hclge_rx_buffer_calc(hdev
, pkt_buf
);
1908 dev_err(&hdev
->pdev
->dev
,
1909 "could not calc rx priv buffer size for all TCs %d\n",
1914 ret
= hclge_rx_priv_buf_alloc(hdev
, pkt_buf
);
1916 dev_err(&hdev
->pdev
->dev
, "could not alloc rx priv buffer %d\n",
1921 if (hnae3_dev_dcb_supported(hdev
)) {
1922 ret
= hclge_rx_priv_wl_config(hdev
, pkt_buf
);
1924 dev_err(&hdev
->pdev
->dev
,
1925 "could not configure rx private waterline %d\n",
1930 ret
= hclge_common_thrd_config(hdev
, pkt_buf
);
1932 dev_err(&hdev
->pdev
->dev
,
1933 "could not configure common threshold %d\n",
1939 ret
= hclge_common_wl_config(hdev
, pkt_buf
);
1941 dev_err(&hdev
->pdev
->dev
,
1942 "could not configure common waterline %d\n", ret
);
1949 static int hclge_init_roce_base_info(struct hclge_vport
*vport
)
1951 struct hnae3_handle
*roce
= &vport
->roce
;
1952 struct hnae3_handle
*nic
= &vport
->nic
;
1954 roce
->rinfo
.num_vectors
= vport
->back
->num_roce_msi
;
1956 if (vport
->back
->num_msi_left
< vport
->roce
.rinfo
.num_vectors
||
1957 vport
->back
->num_msi_left
== 0)
1960 roce
->rinfo
.base_vector
= vport
->back
->roce_base_vector
;
1962 roce
->rinfo
.netdev
= nic
->kinfo
.netdev
;
1963 roce
->rinfo
.roce_io_base
= vport
->back
->hw
.io_base
;
1965 roce
->pdev
= nic
->pdev
;
1966 roce
->ae_algo
= nic
->ae_algo
;
1967 roce
->numa_node_mask
= nic
->numa_node_mask
;
1972 static int hclge_init_msi(struct hclge_dev
*hdev
)
1974 struct pci_dev
*pdev
= hdev
->pdev
;
1978 vectors
= pci_alloc_irq_vectors(pdev
, 1, hdev
->num_msi
,
1979 PCI_IRQ_MSI
| PCI_IRQ_MSIX
);
1982 "failed(%d) to allocate MSI/MSI-X vectors\n",
1986 if (vectors
< hdev
->num_msi
)
1987 dev_warn(&hdev
->pdev
->dev
,
1988 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1989 hdev
->num_msi
, vectors
);
1991 hdev
->num_msi
= vectors
;
1992 hdev
->num_msi_left
= vectors
;
1993 hdev
->base_msi_vector
= pdev
->irq
;
1994 hdev
->roce_base_vector
= hdev
->base_msi_vector
+
1995 HCLGE_ROCE_VECTOR_OFFSET
;
1997 hdev
->vector_status
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
1998 sizeof(u16
), GFP_KERNEL
);
1999 if (!hdev
->vector_status
) {
2000 pci_free_irq_vectors(pdev
);
2004 for (i
= 0; i
< hdev
->num_msi
; i
++)
2005 hdev
->vector_status
[i
] = HCLGE_INVALID_VPORT
;
2007 hdev
->vector_irq
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
2008 sizeof(int), GFP_KERNEL
);
2009 if (!hdev
->vector_irq
) {
2010 pci_free_irq_vectors(pdev
);
2017 static void hclge_check_speed_dup(struct hclge_dev
*hdev
, int duplex
, int speed
)
2019 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2021 if ((speed
== HCLGE_MAC_SPEED_10M
) || (speed
== HCLGE_MAC_SPEED_100M
))
2022 mac
->duplex
= (u8
)duplex
;
2024 mac
->duplex
= HCLGE_MAC_FULL
;
2029 int hclge_cfg_mac_speed_dup(struct hclge_dev
*hdev
, int speed
, u8 duplex
)
2031 struct hclge_config_mac_speed_dup_cmd
*req
;
2032 struct hclge_desc desc
;
2035 req
= (struct hclge_config_mac_speed_dup_cmd
*)desc
.data
;
2037 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_SPEED_DUP
, false);
2039 hnae_set_bit(req
->speed_dup
, HCLGE_CFG_DUPLEX_B
, !!duplex
);
2042 case HCLGE_MAC_SPEED_10M
:
2043 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2044 HCLGE_CFG_SPEED_S
, 6);
2046 case HCLGE_MAC_SPEED_100M
:
2047 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2048 HCLGE_CFG_SPEED_S
, 7);
2050 case HCLGE_MAC_SPEED_1G
:
2051 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2052 HCLGE_CFG_SPEED_S
, 0);
2054 case HCLGE_MAC_SPEED_10G
:
2055 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2056 HCLGE_CFG_SPEED_S
, 1);
2058 case HCLGE_MAC_SPEED_25G
:
2059 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2060 HCLGE_CFG_SPEED_S
, 2);
2062 case HCLGE_MAC_SPEED_40G
:
2063 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2064 HCLGE_CFG_SPEED_S
, 3);
2066 case HCLGE_MAC_SPEED_50G
:
2067 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2068 HCLGE_CFG_SPEED_S
, 4);
2070 case HCLGE_MAC_SPEED_100G
:
2071 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2072 HCLGE_CFG_SPEED_S
, 5);
2075 dev_err(&hdev
->pdev
->dev
, "invalid speed (%d)\n", speed
);
2079 hnae_set_bit(req
->mac_change_fec_en
, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B
,
2082 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2084 dev_err(&hdev
->pdev
->dev
,
2085 "mac speed/duplex config cmd failed %d.\n", ret
);
2089 hclge_check_speed_dup(hdev
, duplex
, speed
);
2094 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle
*handle
, int speed
,
2097 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2098 struct hclge_dev
*hdev
= vport
->back
;
2100 return hclge_cfg_mac_speed_dup(hdev
, speed
, duplex
);
2103 static int hclge_query_mac_an_speed_dup(struct hclge_dev
*hdev
, int *speed
,
2106 struct hclge_query_an_speed_dup_cmd
*req
;
2107 struct hclge_desc desc
;
2111 req
= (struct hclge_query_an_speed_dup_cmd
*)desc
.data
;
2113 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_AN_RESULT
, true);
2114 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2116 dev_err(&hdev
->pdev
->dev
,
2117 "mac speed/autoneg/duplex query cmd failed %d\n",
2122 *duplex
= hnae_get_bit(req
->an_syn_dup_speed
, HCLGE_QUERY_DUPLEX_B
);
2123 speed_tmp
= hnae_get_field(req
->an_syn_dup_speed
, HCLGE_QUERY_SPEED_M
,
2124 HCLGE_QUERY_SPEED_S
);
2126 ret
= hclge_parse_speed(speed_tmp
, speed
);
2128 dev_err(&hdev
->pdev
->dev
,
2129 "could not parse speed(=%d), %d\n", speed_tmp
, ret
);
2136 static int hclge_query_autoneg_result(struct hclge_dev
*hdev
)
2138 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2139 struct hclge_query_an_speed_dup_cmd
*req
;
2140 struct hclge_desc desc
;
2143 req
= (struct hclge_query_an_speed_dup_cmd
*)desc
.data
;
2145 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_AN_RESULT
, true);
2146 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2148 dev_err(&hdev
->pdev
->dev
,
2149 "autoneg result query cmd failed %d.\n", ret
);
2153 mac
->autoneg
= hnae_get_bit(req
->an_syn_dup_speed
, HCLGE_QUERY_AN_B
);
2158 static int hclge_set_autoneg_en(struct hclge_dev
*hdev
, bool enable
)
2160 struct hclge_config_auto_neg_cmd
*req
;
2161 struct hclge_desc desc
;
2165 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_AN_MODE
, false);
2167 req
= (struct hclge_config_auto_neg_cmd
*)desc
.data
;
2168 hnae_set_bit(flag
, HCLGE_MAC_CFG_AN_EN_B
, !!enable
);
2169 req
->cfg_an_cmd_flag
= cpu_to_le32(flag
);
2171 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2173 dev_err(&hdev
->pdev
->dev
, "auto neg set cmd failed %d.\n",
2181 static int hclge_set_autoneg(struct hnae3_handle
*handle
, bool enable
)
2183 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2184 struct hclge_dev
*hdev
= vport
->back
;
2186 return hclge_set_autoneg_en(hdev
, enable
);
2189 static int hclge_get_autoneg(struct hnae3_handle
*handle
)
2191 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2192 struct hclge_dev
*hdev
= vport
->back
;
2194 hclge_query_autoneg_result(hdev
);
2196 return hdev
->hw
.mac
.autoneg
;
2199 static int hclge_mac_init(struct hclge_dev
*hdev
)
2201 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2204 ret
= hclge_cfg_mac_speed_dup(hdev
, hdev
->hw
.mac
.speed
, HCLGE_MAC_FULL
);
2206 dev_err(&hdev
->pdev
->dev
,
2207 "Config mac speed dup fail ret=%d\n", ret
);
2213 /* Initialize the MTA table work mode */
2214 hdev
->accept_mta_mc
= true;
2215 hdev
->enable_mta
= true;
2216 hdev
->mta_mac_sel_type
= HCLGE_MAC_ADDR_47_36
;
2218 ret
= hclge_set_mta_filter_mode(hdev
,
2219 hdev
->mta_mac_sel_type
,
2222 dev_err(&hdev
->pdev
->dev
, "set mta filter mode failed %d\n",
2227 return hclge_cfg_func_mta_filter(hdev
, 0, hdev
->accept_mta_mc
);
2230 static void hclge_reset_task_schedule(struct hclge_dev
*hdev
)
2232 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
))
2233 schedule_work(&hdev
->rst_service_task
);
2236 static void hclge_task_schedule(struct hclge_dev
*hdev
)
2238 if (!test_bit(HCLGE_STATE_DOWN
, &hdev
->state
) &&
2239 !test_bit(HCLGE_STATE_REMOVING
, &hdev
->state
) &&
2240 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
))
2241 (void)schedule_work(&hdev
->service_task
);
2244 static int hclge_get_mac_link_status(struct hclge_dev
*hdev
)
2246 struct hclge_link_status_cmd
*req
;
2247 struct hclge_desc desc
;
2251 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_LINK_STATUS
, true);
2252 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2254 dev_err(&hdev
->pdev
->dev
, "get link status cmd failed %d\n",
2259 req
= (struct hclge_link_status_cmd
*)desc
.data
;
2260 link_status
= req
->status
& HCLGE_LINK_STATUS
;
2262 return !!link_status
;
2265 static int hclge_get_mac_phy_link(struct hclge_dev
*hdev
)
2270 mac_state
= hclge_get_mac_link_status(hdev
);
2272 if (hdev
->hw
.mac
.phydev
) {
2273 if (!genphy_read_status(hdev
->hw
.mac
.phydev
))
2274 link_stat
= mac_state
&
2275 hdev
->hw
.mac
.phydev
->link
;
2280 link_stat
= mac_state
;
2286 static void hclge_update_link_status(struct hclge_dev
*hdev
)
2288 struct hnae3_client
*client
= hdev
->nic_client
;
2289 struct hnae3_handle
*handle
;
2295 state
= hclge_get_mac_phy_link(hdev
);
2296 if (state
!= hdev
->hw
.mac
.link
) {
2297 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
2298 handle
= &hdev
->vport
[i
].nic
;
2299 client
->ops
->link_status_change(handle
, state
);
2301 hdev
->hw
.mac
.link
= state
;
2305 static int hclge_update_speed_duplex(struct hclge_dev
*hdev
)
2307 struct hclge_mac mac
= hdev
->hw
.mac
;
2312 /* get the speed and duplex as autoneg'result from mac cmd when phy
2315 if (mac
.phydev
|| !mac
.autoneg
)
2318 ret
= hclge_query_mac_an_speed_dup(hdev
, &speed
, &duplex
);
2320 dev_err(&hdev
->pdev
->dev
,
2321 "mac autoneg/speed/duplex query failed %d\n", ret
);
2325 if ((mac
.speed
!= speed
) || (mac
.duplex
!= duplex
)) {
2326 ret
= hclge_cfg_mac_speed_dup(hdev
, speed
, duplex
);
2328 dev_err(&hdev
->pdev
->dev
,
2329 "mac speed/duplex config failed %d\n", ret
);
2337 static int hclge_update_speed_duplex_h(struct hnae3_handle
*handle
)
2339 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2340 struct hclge_dev
*hdev
= vport
->back
;
2342 return hclge_update_speed_duplex(hdev
);
2345 static int hclge_get_status(struct hnae3_handle
*handle
)
2347 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2348 struct hclge_dev
*hdev
= vport
->back
;
2350 hclge_update_link_status(hdev
);
2352 return hdev
->hw
.mac
.link
;
2355 static void hclge_service_timer(struct timer_list
*t
)
2357 struct hclge_dev
*hdev
= from_timer(hdev
, t
, service_timer
);
2359 mod_timer(&hdev
->service_timer
, jiffies
+ HZ
);
2360 hclge_task_schedule(hdev
);
2363 static void hclge_service_complete(struct hclge_dev
*hdev
)
2365 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
));
2367 /* Flush memory before next watchdog */
2368 smp_mb__before_atomic();
2369 clear_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
);
2372 static u32
hclge_check_event_cause(struct hclge_dev
*hdev
, u32
*clearval
)
2376 /* fetch the events from their corresponding regs */
2377 rst_src_reg
= hclge_read_dev(&hdev
->hw
, HCLGE_MISC_RESET_STS_REG
);
2379 /* check for vector0 reset event sources */
2380 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
) & rst_src_reg
) {
2381 set_bit(HNAE3_GLOBAL_RESET
, &hdev
->reset_pending
);
2382 *clearval
= BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
);
2383 return HCLGE_VECTOR0_EVENT_RST
;
2386 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B
) & rst_src_reg
) {
2387 set_bit(HNAE3_CORE_RESET
, &hdev
->reset_pending
);
2388 *clearval
= BIT(HCLGE_VECTOR0_CORERESET_INT_B
);
2389 return HCLGE_VECTOR0_EVENT_RST
;
2392 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B
) & rst_src_reg
) {
2393 set_bit(HNAE3_IMP_RESET
, &hdev
->reset_pending
);
2394 *clearval
= BIT(HCLGE_VECTOR0_IMPRESET_INT_B
);
2395 return HCLGE_VECTOR0_EVENT_RST
;
2398 /* mailbox event sharing vector 0 interrupt would be placed here */
2400 return HCLGE_VECTOR0_EVENT_OTHER
;
2403 static void hclge_clear_event_cause(struct hclge_dev
*hdev
, u32 event_type
,
2406 if (event_type
== HCLGE_VECTOR0_EVENT_RST
)
2407 hclge_write_dev(&hdev
->hw
, HCLGE_MISC_RESET_STS_REG
, regclr
);
2409 /* mailbox event sharing vector 0 interrupt would be placed here */
2412 static void hclge_enable_vector(struct hclge_misc_vector
*vector
, bool enable
)
2414 writel(enable
? 1 : 0, vector
->addr
);
2417 static irqreturn_t
hclge_misc_irq_handle(int irq
, void *data
)
2419 struct hclge_dev
*hdev
= data
;
2423 hclge_enable_vector(&hdev
->misc_vector
, false);
2424 event_cause
= hclge_check_event_cause(hdev
, &clearval
);
2426 /* vector 0 interrupt is shared with reset and mailbox source events.
2427 * For now, we are not handling mailbox events.
2429 switch (event_cause
) {
2430 case HCLGE_VECTOR0_EVENT_RST
:
2431 hclge_reset_task_schedule(hdev
);
2434 dev_dbg(&hdev
->pdev
->dev
,
2435 "received unknown or unhandled event of vector0\n");
2439 /* we should clear the source of interrupt */
2440 hclge_clear_event_cause(hdev
, event_cause
, clearval
);
2441 hclge_enable_vector(&hdev
->misc_vector
, true);
2446 static void hclge_free_vector(struct hclge_dev
*hdev
, int vector_id
)
2448 hdev
->vector_status
[vector_id
] = HCLGE_INVALID_VPORT
;
2449 hdev
->num_msi_left
+= 1;
2450 hdev
->num_msi_used
-= 1;
2453 static void hclge_get_misc_vector(struct hclge_dev
*hdev
)
2455 struct hclge_misc_vector
*vector
= &hdev
->misc_vector
;
2457 vector
->vector_irq
= pci_irq_vector(hdev
->pdev
, 0);
2459 vector
->addr
= hdev
->hw
.io_base
+ HCLGE_MISC_VECTOR_REG_BASE
;
2460 hdev
->vector_status
[0] = 0;
2462 hdev
->num_msi_left
-= 1;
2463 hdev
->num_msi_used
+= 1;
2466 static int hclge_misc_irq_init(struct hclge_dev
*hdev
)
2470 hclge_get_misc_vector(hdev
);
2472 /* this would be explicitly freed in the end */
2473 ret
= request_irq(hdev
->misc_vector
.vector_irq
, hclge_misc_irq_handle
,
2474 0, "hclge_misc", hdev
);
2476 hclge_free_vector(hdev
, 0);
2477 dev_err(&hdev
->pdev
->dev
, "request misc irq(%d) fail\n",
2478 hdev
->misc_vector
.vector_irq
);
2484 static void hclge_misc_irq_uninit(struct hclge_dev
*hdev
)
2486 free_irq(hdev
->misc_vector
.vector_irq
, hdev
);
2487 hclge_free_vector(hdev
, 0);
2490 static int hclge_notify_client(struct hclge_dev
*hdev
,
2491 enum hnae3_reset_notify_type type
)
2493 struct hnae3_client
*client
= hdev
->nic_client
;
2496 if (!client
->ops
->reset_notify
)
2499 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
2500 struct hnae3_handle
*handle
= &hdev
->vport
[i
].nic
;
2503 ret
= client
->ops
->reset_notify(handle
, type
);
2511 static int hclge_reset_wait(struct hclge_dev
*hdev
)
2513 #define HCLGE_RESET_WATI_MS 100
2514 #define HCLGE_RESET_WAIT_CNT 5
2515 u32 val
, reg
, reg_bit
;
2518 switch (hdev
->reset_type
) {
2519 case HNAE3_GLOBAL_RESET
:
2520 reg
= HCLGE_GLOBAL_RESET_REG
;
2521 reg_bit
= HCLGE_GLOBAL_RESET_BIT
;
2523 case HNAE3_CORE_RESET
:
2524 reg
= HCLGE_GLOBAL_RESET_REG
;
2525 reg_bit
= HCLGE_CORE_RESET_BIT
;
2527 case HNAE3_FUNC_RESET
:
2528 reg
= HCLGE_FUN_RST_ING
;
2529 reg_bit
= HCLGE_FUN_RST_ING_B
;
2532 dev_err(&hdev
->pdev
->dev
,
2533 "Wait for unsupported reset type: %d\n",
2538 val
= hclge_read_dev(&hdev
->hw
, reg
);
2539 while (hnae_get_bit(val
, reg_bit
) && cnt
< HCLGE_RESET_WAIT_CNT
) {
2540 msleep(HCLGE_RESET_WATI_MS
);
2541 val
= hclge_read_dev(&hdev
->hw
, reg
);
2545 if (cnt
>= HCLGE_RESET_WAIT_CNT
) {
2546 dev_warn(&hdev
->pdev
->dev
,
2547 "Wait for reset timeout: %d\n", hdev
->reset_type
);
2554 static int hclge_func_reset_cmd(struct hclge_dev
*hdev
, int func_id
)
2556 struct hclge_desc desc
;
2557 struct hclge_reset_cmd
*req
= (struct hclge_reset_cmd
*)desc
.data
;
2560 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_RST_TRIGGER
, false);
2561 hnae_set_bit(req
->mac_func_reset
, HCLGE_CFG_RESET_MAC_B
, 0);
2562 hnae_set_bit(req
->mac_func_reset
, HCLGE_CFG_RESET_FUNC_B
, 1);
2563 req
->fun_reset_vfid
= func_id
;
2565 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2567 dev_err(&hdev
->pdev
->dev
,
2568 "send function reset cmd fail, status =%d\n", ret
);
2573 static void hclge_do_reset(struct hclge_dev
*hdev
)
2575 struct pci_dev
*pdev
= hdev
->pdev
;
2578 switch (hdev
->reset_type
) {
2579 case HNAE3_GLOBAL_RESET
:
2580 val
= hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
);
2581 hnae_set_bit(val
, HCLGE_GLOBAL_RESET_BIT
, 1);
2582 hclge_write_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
, val
);
2583 dev_info(&pdev
->dev
, "Global Reset requested\n");
2585 case HNAE3_CORE_RESET
:
2586 val
= hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
);
2587 hnae_set_bit(val
, HCLGE_CORE_RESET_BIT
, 1);
2588 hclge_write_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
, val
);
2589 dev_info(&pdev
->dev
, "Core Reset requested\n");
2591 case HNAE3_FUNC_RESET
:
2592 dev_info(&pdev
->dev
, "PF Reset requested\n");
2593 hclge_func_reset_cmd(hdev
, 0);
2594 /* schedule again to check later */
2595 set_bit(HNAE3_FUNC_RESET
, &hdev
->reset_pending
);
2596 hclge_reset_task_schedule(hdev
);
2599 dev_warn(&pdev
->dev
,
2600 "Unsupported reset type: %d\n", hdev
->reset_type
);
2605 static enum hnae3_reset_type
hclge_get_reset_level(struct hclge_dev
*hdev
,
2606 unsigned long *addr
)
2608 enum hnae3_reset_type rst_level
= HNAE3_NONE_RESET
;
2610 /* return the highest priority reset level amongst all */
2611 if (test_bit(HNAE3_GLOBAL_RESET
, addr
))
2612 rst_level
= HNAE3_GLOBAL_RESET
;
2613 else if (test_bit(HNAE3_CORE_RESET
, addr
))
2614 rst_level
= HNAE3_CORE_RESET
;
2615 else if (test_bit(HNAE3_IMP_RESET
, addr
))
2616 rst_level
= HNAE3_IMP_RESET
;
2617 else if (test_bit(HNAE3_FUNC_RESET
, addr
))
2618 rst_level
= HNAE3_FUNC_RESET
;
2620 /* now, clear all other resets */
2621 clear_bit(HNAE3_GLOBAL_RESET
, addr
);
2622 clear_bit(HNAE3_CORE_RESET
, addr
);
2623 clear_bit(HNAE3_IMP_RESET
, addr
);
2624 clear_bit(HNAE3_FUNC_RESET
, addr
);
2629 static void hclge_reset(struct hclge_dev
*hdev
)
2631 /* perform reset of the stack & ae device for a client */
2633 hclge_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
2635 if (!hclge_reset_wait(hdev
)) {
2637 hclge_notify_client(hdev
, HNAE3_UNINIT_CLIENT
);
2638 hclge_reset_ae_dev(hdev
->ae_dev
);
2639 hclge_notify_client(hdev
, HNAE3_INIT_CLIENT
);
2642 /* schedule again to check pending resets later */
2643 set_bit(hdev
->reset_type
, &hdev
->reset_pending
);
2644 hclge_reset_task_schedule(hdev
);
2647 hclge_notify_client(hdev
, HNAE3_UP_CLIENT
);
2650 static void hclge_reset_event(struct hnae3_handle
*handle
,
2651 enum hnae3_reset_type reset
)
2653 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2654 struct hclge_dev
*hdev
= vport
->back
;
2656 dev_info(&hdev
->pdev
->dev
,
2657 "Receive reset event , reset_type is %d", reset
);
2660 case HNAE3_FUNC_RESET
:
2661 case HNAE3_CORE_RESET
:
2662 case HNAE3_GLOBAL_RESET
:
2663 /* request reset & schedule reset task */
2664 set_bit(reset
, &hdev
->reset_request
);
2665 hclge_reset_task_schedule(hdev
);
2668 dev_warn(&hdev
->pdev
->dev
, "Unsupported reset event:%d", reset
);
2673 static void hclge_reset_subtask(struct hclge_dev
*hdev
)
2675 /* check if there is any ongoing reset in the hardware. This status can
2676 * be checked from reset_pending. If there is then, we need to wait for
2677 * hardware to complete reset.
2678 * a. If we are able to figure out in reasonable time that hardware
2679 * has fully resetted then, we can proceed with driver, client
2681 * b. else, we can come back later to check this status so re-sched
2684 hdev
->reset_type
= hclge_get_reset_level(hdev
, &hdev
->reset_pending
);
2685 if (hdev
->reset_type
!= HNAE3_NONE_RESET
)
2688 /* check if we got any *new* reset requests to be honored */
2689 hdev
->reset_type
= hclge_get_reset_level(hdev
, &hdev
->reset_request
);
2690 if (hdev
->reset_type
!= HNAE3_NONE_RESET
)
2691 hclge_do_reset(hdev
);
2693 hdev
->reset_type
= HNAE3_NONE_RESET
;
2696 static void hclge_reset_service_task(struct work_struct
*work
)
2698 struct hclge_dev
*hdev
=
2699 container_of(work
, struct hclge_dev
, rst_service_task
);
2701 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
2704 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
);
2706 hclge_reset_subtask(hdev
);
2708 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
2711 static void hclge_service_task(struct work_struct
*work
)
2713 struct hclge_dev
*hdev
=
2714 container_of(work
, struct hclge_dev
, service_task
);
2716 hclge_update_speed_duplex(hdev
);
2717 hclge_update_link_status(hdev
);
2718 hclge_update_stats_for_all(hdev
);
2719 hclge_service_complete(hdev
);
2722 static void hclge_disable_sriov(struct hclge_dev
*hdev
)
2724 /* If our VFs are assigned we cannot shut down SR-IOV
2725 * without causing issues, so just leave the hardware
2726 * available but disabled
2728 if (pci_vfs_assigned(hdev
->pdev
)) {
2729 dev_warn(&hdev
->pdev
->dev
,
2730 "disabling driver while VFs are assigned\n");
2734 pci_disable_sriov(hdev
->pdev
);
2737 struct hclge_vport
*hclge_get_vport(struct hnae3_handle
*handle
)
2739 /* VF handle has no client */
2740 if (!handle
->client
)
2741 return container_of(handle
, struct hclge_vport
, nic
);
2742 else if (handle
->client
->type
== HNAE3_CLIENT_ROCE
)
2743 return container_of(handle
, struct hclge_vport
, roce
);
2745 return container_of(handle
, struct hclge_vport
, nic
);
2748 static int hclge_get_vector(struct hnae3_handle
*handle
, u16 vector_num
,
2749 struct hnae3_vector_info
*vector_info
)
2751 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2752 struct hnae3_vector_info
*vector
= vector_info
;
2753 struct hclge_dev
*hdev
= vport
->back
;
2757 vector_num
= min(hdev
->num_msi_left
, vector_num
);
2759 for (j
= 0; j
< vector_num
; j
++) {
2760 for (i
= 1; i
< hdev
->num_msi
; i
++) {
2761 if (hdev
->vector_status
[i
] == HCLGE_INVALID_VPORT
) {
2762 vector
->vector
= pci_irq_vector(hdev
->pdev
, i
);
2763 vector
->io_addr
= hdev
->hw
.io_base
+
2764 HCLGE_VECTOR_REG_BASE
+
2765 (i
- 1) * HCLGE_VECTOR_REG_OFFSET
+
2767 HCLGE_VECTOR_VF_OFFSET
;
2768 hdev
->vector_status
[i
] = vport
->vport_id
;
2769 hdev
->vector_irq
[i
] = vector
->vector
;
2778 hdev
->num_msi_left
-= alloc
;
2779 hdev
->num_msi_used
+= alloc
;
2784 static int hclge_get_vector_index(struct hclge_dev
*hdev
, int vector
)
2788 for (i
= 0; i
< hdev
->num_msi
; i
++)
2789 if (vector
== hdev
->vector_irq
[i
])
2795 static u32
hclge_get_rss_key_size(struct hnae3_handle
*handle
)
2797 return HCLGE_RSS_KEY_SIZE
;
2800 static u32
hclge_get_rss_indir_size(struct hnae3_handle
*handle
)
2802 return HCLGE_RSS_IND_TBL_SIZE
;
2805 static int hclge_get_rss_algo(struct hclge_dev
*hdev
)
2807 struct hclge_rss_config_cmd
*req
;
2808 struct hclge_desc desc
;
2812 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_GENERIC_CONFIG
, true);
2814 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2816 dev_err(&hdev
->pdev
->dev
,
2817 "Get link status error, status =%d\n", ret
);
2821 req
= (struct hclge_rss_config_cmd
*)desc
.data
;
2822 rss_hash_algo
= (req
->hash_config
& HCLGE_RSS_HASH_ALGO_MASK
);
2824 if (rss_hash_algo
== HCLGE_RSS_HASH_ALGO_TOEPLITZ
)
2825 return ETH_RSS_HASH_TOP
;
2830 static int hclge_set_rss_algo_key(struct hclge_dev
*hdev
,
2831 const u8 hfunc
, const u8
*key
)
2833 struct hclge_rss_config_cmd
*req
;
2834 struct hclge_desc desc
;
2839 req
= (struct hclge_rss_config_cmd
*)desc
.data
;
2841 for (key_offset
= 0; key_offset
< 3; key_offset
++) {
2842 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_GENERIC_CONFIG
,
2845 req
->hash_config
|= (hfunc
& HCLGE_RSS_HASH_ALGO_MASK
);
2846 req
->hash_config
|= (key_offset
<< HCLGE_RSS_HASH_KEY_OFFSET_B
);
2848 if (key_offset
== 2)
2850 HCLGE_RSS_KEY_SIZE
- HCLGE_RSS_HASH_KEY_NUM
* 2;
2852 key_size
= HCLGE_RSS_HASH_KEY_NUM
;
2854 memcpy(req
->hash_key
,
2855 key
+ key_offset
* HCLGE_RSS_HASH_KEY_NUM
, key_size
);
2857 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2859 dev_err(&hdev
->pdev
->dev
,
2860 "Configure RSS config fail, status = %d\n",
2868 static int hclge_set_rss_indir_table(struct hclge_dev
*hdev
, const u32
*indir
)
2870 struct hclge_rss_indirection_table_cmd
*req
;
2871 struct hclge_desc desc
;
2875 req
= (struct hclge_rss_indirection_table_cmd
*)desc
.data
;
2877 for (i
= 0; i
< HCLGE_RSS_CFG_TBL_NUM
; i
++) {
2878 hclge_cmd_setup_basic_desc
2879 (&desc
, HCLGE_OPC_RSS_INDIR_TABLE
, false);
2881 req
->start_table_index
=
2882 cpu_to_le16(i
* HCLGE_RSS_CFG_TBL_SIZE
);
2883 req
->rss_set_bitmap
= cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK
);
2885 for (j
= 0; j
< HCLGE_RSS_CFG_TBL_SIZE
; j
++)
2886 req
->rss_result
[j
] =
2887 indir
[i
* HCLGE_RSS_CFG_TBL_SIZE
+ j
];
2889 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2891 dev_err(&hdev
->pdev
->dev
,
2892 "Configure rss indir table fail,status = %d\n",
2900 static int hclge_set_rss_tc_mode(struct hclge_dev
*hdev
, u16
*tc_valid
,
2901 u16
*tc_size
, u16
*tc_offset
)
2903 struct hclge_rss_tc_mode_cmd
*req
;
2904 struct hclge_desc desc
;
2908 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_TC_MODE
, false);
2909 req
= (struct hclge_rss_tc_mode_cmd
*)desc
.data
;
2911 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
2914 hnae_set_bit(mode
, HCLGE_RSS_TC_VALID_B
, (tc_valid
[i
] & 0x1));
2915 hnae_set_field(mode
, HCLGE_RSS_TC_SIZE_M
,
2916 HCLGE_RSS_TC_SIZE_S
, tc_size
[i
]);
2917 hnae_set_field(mode
, HCLGE_RSS_TC_OFFSET_M
,
2918 HCLGE_RSS_TC_OFFSET_S
, tc_offset
[i
]);
2920 req
->rss_tc_mode
[i
] = cpu_to_le16(mode
);
2923 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2925 dev_err(&hdev
->pdev
->dev
,
2926 "Configure rss tc mode fail, status = %d\n", ret
);
2933 static int hclge_set_rss_input_tuple(struct hclge_dev
*hdev
)
2935 struct hclge_rss_input_tuple_cmd
*req
;
2936 struct hclge_desc desc
;
2939 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_INPUT_TUPLE
, false);
2941 req
= (struct hclge_rss_input_tuple_cmd
*)desc
.data
;
2942 req
->ipv4_tcp_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2943 req
->ipv4_udp_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2944 req
->ipv4_sctp_en
= HCLGE_RSS_INPUT_TUPLE_SCTP
;
2945 req
->ipv4_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2946 req
->ipv6_tcp_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2947 req
->ipv6_udp_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2948 req
->ipv6_sctp_en
= HCLGE_RSS_INPUT_TUPLE_SCTP
;
2949 req
->ipv6_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
2950 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2952 dev_err(&hdev
->pdev
->dev
,
2953 "Configure rss input fail, status = %d\n", ret
);
2960 static int hclge_get_rss(struct hnae3_handle
*handle
, u32
*indir
,
2963 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2964 struct hclge_dev
*hdev
= vport
->back
;
2967 /* Get hash algorithm */
2969 *hfunc
= hclge_get_rss_algo(hdev
);
2971 /* Get the RSS Key required by the user */
2973 memcpy(key
, vport
->rss_hash_key
, HCLGE_RSS_KEY_SIZE
);
2975 /* Get indirect table */
2977 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
2978 indir
[i
] = vport
->rss_indirection_tbl
[i
];
2983 static int hclge_set_rss(struct hnae3_handle
*handle
, const u32
*indir
,
2984 const u8
*key
, const u8 hfunc
)
2986 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2987 struct hclge_dev
*hdev
= vport
->back
;
2991 /* Set the RSS Hash Key if specififed by the user */
2993 /* Update the shadow RSS key with user specified qids */
2994 memcpy(vport
->rss_hash_key
, key
, HCLGE_RSS_KEY_SIZE
);
2996 if (hfunc
== ETH_RSS_HASH_TOP
||
2997 hfunc
== ETH_RSS_HASH_NO_CHANGE
)
2998 hash_algo
= HCLGE_RSS_HASH_ALGO_TOEPLITZ
;
3001 ret
= hclge_set_rss_algo_key(hdev
, hash_algo
, key
);
3006 /* Update the shadow RSS table with user specified qids */
3007 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
3008 vport
->rss_indirection_tbl
[i
] = indir
[i
];
3010 /* Update the hardware */
3011 ret
= hclge_set_rss_indir_table(hdev
, indir
);
3015 static u8
hclge_get_rss_hash_bits(struct ethtool_rxnfc
*nfc
)
3017 u8 hash_sets
= nfc
->data
& RXH_L4_B_0_1
? HCLGE_S_PORT_BIT
: 0;
3019 if (nfc
->data
& RXH_L4_B_2_3
)
3020 hash_sets
|= HCLGE_D_PORT_BIT
;
3022 hash_sets
&= ~HCLGE_D_PORT_BIT
;
3024 if (nfc
->data
& RXH_IP_SRC
)
3025 hash_sets
|= HCLGE_S_IP_BIT
;
3027 hash_sets
&= ~HCLGE_S_IP_BIT
;
3029 if (nfc
->data
& RXH_IP_DST
)
3030 hash_sets
|= HCLGE_D_IP_BIT
;
3032 hash_sets
&= ~HCLGE_D_IP_BIT
;
3034 if (nfc
->flow_type
== SCTP_V4_FLOW
|| nfc
->flow_type
== SCTP_V6_FLOW
)
3035 hash_sets
|= HCLGE_V_TAG_BIT
;
3040 static int hclge_set_rss_tuple(struct hnae3_handle
*handle
,
3041 struct ethtool_rxnfc
*nfc
)
3043 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3044 struct hclge_dev
*hdev
= vport
->back
;
3045 struct hclge_rss_input_tuple_cmd
*req
;
3046 struct hclge_desc desc
;
3050 if (nfc
->data
& ~(RXH_IP_SRC
| RXH_IP_DST
|
3051 RXH_L4_B_0_1
| RXH_L4_B_2_3
))
3054 req
= (struct hclge_rss_input_tuple_cmd
*)desc
.data
;
3055 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_INPUT_TUPLE
, true);
3056 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3058 dev_err(&hdev
->pdev
->dev
,
3059 "Read rss tuple fail, status = %d\n", ret
);
3063 hclge_cmd_reuse_desc(&desc
, false);
3065 tuple_sets
= hclge_get_rss_hash_bits(nfc
);
3066 switch (nfc
->flow_type
) {
3068 req
->ipv4_tcp_en
= tuple_sets
;
3071 req
->ipv6_tcp_en
= tuple_sets
;
3074 req
->ipv4_udp_en
= tuple_sets
;
3077 req
->ipv6_udp_en
= tuple_sets
;
3080 req
->ipv4_sctp_en
= tuple_sets
;
3083 if ((nfc
->data
& RXH_L4_B_0_1
) ||
3084 (nfc
->data
& RXH_L4_B_2_3
))
3087 req
->ipv6_sctp_en
= tuple_sets
;
3090 req
->ipv4_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3093 req
->ipv6_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3099 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3101 dev_err(&hdev
->pdev
->dev
,
3102 "Set rss tuple fail, status = %d\n", ret
);
3107 static int hclge_get_rss_tuple(struct hnae3_handle
*handle
,
3108 struct ethtool_rxnfc
*nfc
)
3110 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3111 struct hclge_dev
*hdev
= vport
->back
;
3112 struct hclge_rss_input_tuple_cmd
*req
;
3113 struct hclge_desc desc
;
3119 req
= (struct hclge_rss_input_tuple_cmd
*)desc
.data
;
3120 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_INPUT_TUPLE
, true);
3121 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3123 dev_err(&hdev
->pdev
->dev
,
3124 "Read rss tuple fail, status = %d\n", ret
);
3128 switch (nfc
->flow_type
) {
3130 tuple_sets
= req
->ipv4_tcp_en
;
3133 tuple_sets
= req
->ipv4_udp_en
;
3136 tuple_sets
= req
->ipv6_tcp_en
;
3139 tuple_sets
= req
->ipv6_udp_en
;
3142 tuple_sets
= req
->ipv4_sctp_en
;
3145 tuple_sets
= req
->ipv6_sctp_en
;
3149 tuple_sets
= HCLGE_S_IP_BIT
| HCLGE_D_IP_BIT
;
3158 if (tuple_sets
& HCLGE_D_PORT_BIT
)
3159 nfc
->data
|= RXH_L4_B_2_3
;
3160 if (tuple_sets
& HCLGE_S_PORT_BIT
)
3161 nfc
->data
|= RXH_L4_B_0_1
;
3162 if (tuple_sets
& HCLGE_D_IP_BIT
)
3163 nfc
->data
|= RXH_IP_DST
;
3164 if (tuple_sets
& HCLGE_S_IP_BIT
)
3165 nfc
->data
|= RXH_IP_SRC
;
3170 static int hclge_get_tc_size(struct hnae3_handle
*handle
)
3172 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3173 struct hclge_dev
*hdev
= vport
->back
;
3175 return hdev
->rss_size_max
;
3178 int hclge_rss_init_hw(struct hclge_dev
*hdev
)
3180 const u8 hfunc
= HCLGE_RSS_HASH_ALGO_TOEPLITZ
;
3181 struct hclge_vport
*vport
= hdev
->vport
;
3182 u16 tc_offset
[HCLGE_MAX_TC_NUM
];
3183 u8 rss_key
[HCLGE_RSS_KEY_SIZE
];
3184 u16 tc_valid
[HCLGE_MAX_TC_NUM
];
3185 u16 tc_size
[HCLGE_MAX_TC_NUM
];
3186 u32
*rss_indir
= NULL
;
3187 u16 rss_size
= 0, roundup_size
;
3191 rss_indir
= kcalloc(HCLGE_RSS_IND_TBL_SIZE
, sizeof(u32
), GFP_KERNEL
);
3195 /* Get default RSS key */
3196 netdev_rss_key_fill(rss_key
, HCLGE_RSS_KEY_SIZE
);
3198 /* Initialize RSS indirect table for each vport */
3199 for (j
= 0; j
< hdev
->num_vmdq_vport
+ 1; j
++) {
3200 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++) {
3201 vport
[j
].rss_indirection_tbl
[i
] =
3202 i
% vport
[j
].alloc_rss_size
;
3204 /* vport 0 is for PF */
3208 rss_size
= vport
[j
].alloc_rss_size
;
3209 rss_indir
[i
] = vport
[j
].rss_indirection_tbl
[i
];
3212 ret
= hclge_set_rss_indir_table(hdev
, rss_indir
);
3217 ret
= hclge_set_rss_algo_key(hdev
, hfunc
, key
);
3221 ret
= hclge_set_rss_input_tuple(hdev
);
3225 /* Each TC have the same queue size, and tc_size set to hardware is
3226 * the log2 of roundup power of two of rss_size, the acutal queue
3227 * size is limited by indirection table.
3229 if (rss_size
> HCLGE_RSS_TC_SIZE_7
|| rss_size
== 0) {
3230 dev_err(&hdev
->pdev
->dev
,
3231 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3237 roundup_size
= roundup_pow_of_two(rss_size
);
3238 roundup_size
= ilog2(roundup_size
);
3240 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
3243 if (!(hdev
->hw_tc_map
& BIT(i
)))
3247 tc_size
[i
] = roundup_size
;
3248 tc_offset
[i
] = rss_size
* i
;
3251 ret
= hclge_set_rss_tc_mode(hdev
, tc_valid
, tc_size
, tc_offset
);
3259 int hclge_map_vport_ring_to_vector(struct hclge_vport
*vport
, int vector_id
,
3260 struct hnae3_ring_chain_node
*ring_chain
)
3262 struct hclge_dev
*hdev
= vport
->back
;
3263 struct hclge_ctrl_vector_chain_cmd
*req
;
3264 struct hnae3_ring_chain_node
*node
;
3265 struct hclge_desc desc
;
3269 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_ADD_RING_TO_VECTOR
, false);
3271 req
= (struct hclge_ctrl_vector_chain_cmd
*)desc
.data
;
3272 req
->int_vector_id
= vector_id
;
3275 for (node
= ring_chain
; node
; node
= node
->next
) {
3276 u16 type_and_id
= 0;
3278 hnae_set_field(type_and_id
, HCLGE_INT_TYPE_M
, HCLGE_INT_TYPE_S
,
3279 hnae_get_bit(node
->flag
, HNAE3_RING_TYPE_B
));
3280 hnae_set_field(type_and_id
, HCLGE_TQP_ID_M
, HCLGE_TQP_ID_S
,
3282 hnae_set_field(type_and_id
, HCLGE_INT_GL_IDX_M
,
3284 hnae_get_bit(node
->flag
, HNAE3_RING_TYPE_B
));
3285 req
->tqp_type_and_id
[i
] = cpu_to_le16(type_and_id
);
3286 req
->vfid
= vport
->vport_id
;
3288 if (++i
>= HCLGE_VECTOR_ELEMENTS_PER_CMD
) {
3289 req
->int_cause_num
= HCLGE_VECTOR_ELEMENTS_PER_CMD
;
3291 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3293 dev_err(&hdev
->pdev
->dev
,
3294 "Map TQP fail, status is %d.\n",
3300 hclge_cmd_setup_basic_desc(&desc
,
3301 HCLGE_OPC_ADD_RING_TO_VECTOR
,
3303 req
->int_vector_id
= vector_id
;
3308 req
->int_cause_num
= i
;
3310 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3312 dev_err(&hdev
->pdev
->dev
,
3313 "Map TQP fail, status is %d.\n", ret
);
3321 static int hclge_map_handle_ring_to_vector(
3322 struct hnae3_handle
*handle
, int vector
,
3323 struct hnae3_ring_chain_node
*ring_chain
)
3325 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3326 struct hclge_dev
*hdev
= vport
->back
;
3329 vector_id
= hclge_get_vector_index(hdev
, vector
);
3330 if (vector_id
< 0) {
3331 dev_err(&hdev
->pdev
->dev
,
3332 "Get vector index fail. ret =%d\n", vector_id
);
3336 return hclge_map_vport_ring_to_vector(vport
, vector_id
, ring_chain
);
3339 static int hclge_unmap_ring_from_vector(
3340 struct hnae3_handle
*handle
, int vector
,
3341 struct hnae3_ring_chain_node
*ring_chain
)
3343 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3344 struct hclge_dev
*hdev
= vport
->back
;
3345 struct hclge_ctrl_vector_chain_cmd
*req
;
3346 struct hnae3_ring_chain_node
*node
;
3347 struct hclge_desc desc
;
3351 vector_id
= hclge_get_vector_index(hdev
, vector
);
3352 if (vector_id
< 0) {
3353 dev_err(&handle
->pdev
->dev
,
3354 "Get vector index fail. ret =%d\n", vector_id
);
3358 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_DEL_RING_TO_VECTOR
, false);
3360 req
= (struct hclge_ctrl_vector_chain_cmd
*)desc
.data
;
3361 req
->int_vector_id
= vector_id
;
3364 for (node
= ring_chain
; node
; node
= node
->next
) {
3365 u16 type_and_id
= 0;
3367 hnae_set_field(type_and_id
, HCLGE_INT_TYPE_M
, HCLGE_INT_TYPE_S
,
3368 hnae_get_bit(node
->flag
, HNAE3_RING_TYPE_B
));
3369 hnae_set_field(type_and_id
, HCLGE_TQP_ID_M
, HCLGE_TQP_ID_S
,
3371 hnae_set_field(type_and_id
, HCLGE_INT_GL_IDX_M
,
3373 hnae_get_bit(node
->flag
, HNAE3_RING_TYPE_B
));
3375 req
->tqp_type_and_id
[i
] = cpu_to_le16(type_and_id
);
3376 req
->vfid
= vport
->vport_id
;
3378 if (++i
>= HCLGE_VECTOR_ELEMENTS_PER_CMD
) {
3379 req
->int_cause_num
= HCLGE_VECTOR_ELEMENTS_PER_CMD
;
3381 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3383 dev_err(&hdev
->pdev
->dev
,
3384 "Unmap TQP fail, status is %d.\n",
3389 hclge_cmd_setup_basic_desc(&desc
,
3390 HCLGE_OPC_DEL_RING_TO_VECTOR
,
3392 req
->int_vector_id
= vector_id
;
3397 req
->int_cause_num
= i
;
3399 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3401 dev_err(&hdev
->pdev
->dev
,
3402 "Unmap TQP fail, status is %d.\n", ret
);
3410 int hclge_cmd_set_promisc_mode(struct hclge_dev
*hdev
,
3411 struct hclge_promisc_param
*param
)
3413 struct hclge_promisc_cfg_cmd
*req
;
3414 struct hclge_desc desc
;
3417 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_PROMISC_MODE
, false);
3419 req
= (struct hclge_promisc_cfg_cmd
*)desc
.data
;
3420 req
->vf_id
= param
->vf_id
;
3421 req
->flag
= (param
->enable
<< HCLGE_PROMISC_EN_B
);
3423 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3425 dev_err(&hdev
->pdev
->dev
,
3426 "Set promisc mode fail, status is %d.\n", ret
);
3432 void hclge_promisc_param_init(struct hclge_promisc_param
*param
, bool en_uc
,
3433 bool en_mc
, bool en_bc
, int vport_id
)
3438 memset(param
, 0, sizeof(struct hclge_promisc_param
));
3440 param
->enable
= HCLGE_PROMISC_EN_UC
;
3442 param
->enable
|= HCLGE_PROMISC_EN_MC
;
3444 param
->enable
|= HCLGE_PROMISC_EN_BC
;
3445 param
->vf_id
= vport_id
;
3448 static void hclge_set_promisc_mode(struct hnae3_handle
*handle
, u32 en
)
3450 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3451 struct hclge_dev
*hdev
= vport
->back
;
3452 struct hclge_promisc_param param
;
3454 hclge_promisc_param_init(¶m
, en
, en
, true, vport
->vport_id
);
3455 hclge_cmd_set_promisc_mode(hdev
, ¶m
);
3458 static void hclge_cfg_mac_mode(struct hclge_dev
*hdev
, bool enable
)
3460 struct hclge_desc desc
;
3461 struct hclge_config_mac_mode_cmd
*req
=
3462 (struct hclge_config_mac_mode_cmd
*)desc
.data
;
3466 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAC_MODE
, false);
3467 hnae_set_bit(loop_en
, HCLGE_MAC_TX_EN_B
, enable
);
3468 hnae_set_bit(loop_en
, HCLGE_MAC_RX_EN_B
, enable
);
3469 hnae_set_bit(loop_en
, HCLGE_MAC_PAD_TX_B
, enable
);
3470 hnae_set_bit(loop_en
, HCLGE_MAC_PAD_RX_B
, enable
);
3471 hnae_set_bit(loop_en
, HCLGE_MAC_1588_TX_B
, 0);
3472 hnae_set_bit(loop_en
, HCLGE_MAC_1588_RX_B
, 0);
3473 hnae_set_bit(loop_en
, HCLGE_MAC_APP_LP_B
, 0);
3474 hnae_set_bit(loop_en
, HCLGE_MAC_LINE_LP_B
, 0);
3475 hnae_set_bit(loop_en
, HCLGE_MAC_FCS_TX_B
, enable
);
3476 hnae_set_bit(loop_en
, HCLGE_MAC_RX_FCS_B
, enable
);
3477 hnae_set_bit(loop_en
, HCLGE_MAC_RX_FCS_STRIP_B
, enable
);
3478 hnae_set_bit(loop_en
, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B
, enable
);
3479 hnae_set_bit(loop_en
, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B
, enable
);
3480 hnae_set_bit(loop_en
, HCLGE_MAC_TX_UNDER_MIN_ERR_B
, enable
);
3481 req
->txrx_pad_fcs_loop_en
= cpu_to_le32(loop_en
);
3483 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3485 dev_err(&hdev
->pdev
->dev
,
3486 "mac enable fail, ret =%d.\n", ret
);
3489 static int hclge_set_loopback(struct hnae3_handle
*handle
,
3490 enum hnae3_loop loop_mode
, bool en
)
3492 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3493 struct hclge_config_mac_mode_cmd
*req
;
3494 struct hclge_dev
*hdev
= vport
->back
;
3495 struct hclge_desc desc
;
3499 switch (loop_mode
) {
3500 case HNAE3_MAC_INTER_LOOP_MAC
:
3501 req
= (struct hclge_config_mac_mode_cmd
*)&desc
.data
[0];
3502 /* 1 Read out the MAC mode config at first */
3503 hclge_cmd_setup_basic_desc(&desc
,
3504 HCLGE_OPC_CONFIG_MAC_MODE
,
3506 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3508 dev_err(&hdev
->pdev
->dev
,
3509 "mac loopback get fail, ret =%d.\n",
3514 /* 2 Then setup the loopback flag */
3515 loop_en
= le32_to_cpu(req
->txrx_pad_fcs_loop_en
);
3517 hnae_set_bit(loop_en
, HCLGE_MAC_APP_LP_B
, 1);
3519 hnae_set_bit(loop_en
, HCLGE_MAC_APP_LP_B
, 0);
3521 req
->txrx_pad_fcs_loop_en
= cpu_to_le32(loop_en
);
3523 /* 3 Config mac work mode with loopback flag
3524 * and its original configure parameters
3526 hclge_cmd_reuse_desc(&desc
, false);
3527 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3529 dev_err(&hdev
->pdev
->dev
,
3530 "mac loopback set fail, ret =%d.\n", ret
);
3534 dev_err(&hdev
->pdev
->dev
,
3535 "loop_mode %d is not supported\n", loop_mode
);
3542 static int hclge_tqp_enable(struct hclge_dev
*hdev
, int tqp_id
,
3543 int stream_id
, bool enable
)
3545 struct hclge_desc desc
;
3546 struct hclge_cfg_com_tqp_queue_cmd
*req
=
3547 (struct hclge_cfg_com_tqp_queue_cmd
*)desc
.data
;
3550 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_COM_TQP_QUEUE
, false);
3551 req
->tqp_id
= cpu_to_le16(tqp_id
& HCLGE_RING_ID_MASK
);
3552 req
->stream_id
= cpu_to_le16(stream_id
);
3553 req
->enable
|= enable
<< HCLGE_TQP_ENABLE_B
;
3555 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3557 dev_err(&hdev
->pdev
->dev
,
3558 "Tqp enable fail, status =%d.\n", ret
);
3562 static void hclge_reset_tqp_stats(struct hnae3_handle
*handle
)
3564 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3565 struct hnae3_queue
*queue
;
3566 struct hclge_tqp
*tqp
;
3569 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
3570 queue
= handle
->kinfo
.tqp
[i
];
3571 tqp
= container_of(queue
, struct hclge_tqp
, q
);
3572 memset(&tqp
->tqp_stats
, 0, sizeof(tqp
->tqp_stats
));
3576 static int hclge_ae_start(struct hnae3_handle
*handle
)
3578 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3579 struct hclge_dev
*hdev
= vport
->back
;
3580 int i
, queue_id
, ret
;
3582 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
3583 /* todo clear interrupt */
3585 queue_id
= hclge_get_queue_id(handle
->kinfo
.tqp
[i
]);
3587 dev_warn(&hdev
->pdev
->dev
,
3588 "Get invalid queue id, ignore it\n");
3592 hclge_tqp_enable(hdev
, queue_id
, 0, true);
3595 hclge_cfg_mac_mode(hdev
, true);
3596 clear_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
3597 mod_timer(&hdev
->service_timer
, jiffies
+ HZ
);
3599 ret
= hclge_mac_start_phy(hdev
);
3603 /* reset tqp stats */
3604 hclge_reset_tqp_stats(handle
);
3609 static void hclge_ae_stop(struct hnae3_handle
*handle
)
3611 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3612 struct hclge_dev
*hdev
= vport
->back
;
3615 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
3617 queue_id
= hclge_get_queue_id(handle
->kinfo
.tqp
[i
]);
3619 dev_warn(&hdev
->pdev
->dev
,
3620 "Get invalid queue id, ignore it\n");
3624 hclge_tqp_enable(hdev
, queue_id
, 0, false);
3627 hclge_cfg_mac_mode(hdev
, false);
3629 hclge_mac_stop_phy(hdev
);
3631 /* reset tqp stats */
3632 hclge_reset_tqp_stats(handle
);
3635 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport
*vport
,
3636 u16 cmdq_resp
, u8 resp_code
,
3637 enum hclge_mac_vlan_tbl_opcode op
)
3639 struct hclge_dev
*hdev
= vport
->back
;
3640 int return_status
= -EIO
;
3643 dev_err(&hdev
->pdev
->dev
,
3644 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
3649 if (op
== HCLGE_MAC_VLAN_ADD
) {
3650 if ((!resp_code
) || (resp_code
== 1)) {
3652 } else if (resp_code
== 2) {
3653 return_status
= -EIO
;
3654 dev_err(&hdev
->pdev
->dev
,
3655 "add mac addr failed for uc_overflow.\n");
3656 } else if (resp_code
== 3) {
3657 return_status
= -EIO
;
3658 dev_err(&hdev
->pdev
->dev
,
3659 "add mac addr failed for mc_overflow.\n");
3661 dev_err(&hdev
->pdev
->dev
,
3662 "add mac addr failed for undefined, code=%d.\n",
3665 } else if (op
== HCLGE_MAC_VLAN_REMOVE
) {
3668 } else if (resp_code
== 1) {
3669 return_status
= -EIO
;
3670 dev_dbg(&hdev
->pdev
->dev
,
3671 "remove mac addr failed for miss.\n");
3673 dev_err(&hdev
->pdev
->dev
,
3674 "remove mac addr failed for undefined, code=%d.\n",
3677 } else if (op
== HCLGE_MAC_VLAN_LKUP
) {
3680 } else if (resp_code
== 1) {
3681 return_status
= -EIO
;
3682 dev_dbg(&hdev
->pdev
->dev
,
3683 "lookup mac addr failed for miss.\n");
3685 dev_err(&hdev
->pdev
->dev
,
3686 "lookup mac addr failed for undefined, code=%d.\n",
3690 return_status
= -EIO
;
3691 dev_err(&hdev
->pdev
->dev
,
3692 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
3696 return return_status
;
3699 static int hclge_update_desc_vfid(struct hclge_desc
*desc
, int vfid
, bool clr
)
3704 if (vfid
> 255 || vfid
< 0)
3707 if (vfid
>= 0 && vfid
<= 191) {
3708 word_num
= vfid
/ 32;
3709 bit_num
= vfid
% 32;
3711 desc
[1].data
[word_num
] &= cpu_to_le32(~(1 << bit_num
));
3713 desc
[1].data
[word_num
] |= cpu_to_le32(1 << bit_num
);
3715 word_num
= (vfid
- 192) / 32;
3716 bit_num
= vfid
% 32;
3718 desc
[2].data
[word_num
] &= cpu_to_le32(~(1 << bit_num
));
3720 desc
[2].data
[word_num
] |= cpu_to_le32(1 << bit_num
);
3726 static bool hclge_is_all_function_id_zero(struct hclge_desc
*desc
)
3728 #define HCLGE_DESC_NUMBER 3
3729 #define HCLGE_FUNC_NUMBER_PER_DESC 6
3732 for (i
= 0; i
< HCLGE_DESC_NUMBER
; i
++)
3733 for (j
= 0; j
< HCLGE_FUNC_NUMBER_PER_DESC
; j
++)
3734 if (desc
[i
].data
[j
])
3740 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd
*new_req
,
3743 const unsigned char *mac_addr
= addr
;
3744 u32 high_val
= mac_addr
[2] << 16 | (mac_addr
[3] << 24) |
3745 (mac_addr
[0]) | (mac_addr
[1] << 8);
3746 u32 low_val
= mac_addr
[4] | (mac_addr
[5] << 8);
3748 new_req
->mac_addr_hi32
= cpu_to_le32(high_val
);
3749 new_req
->mac_addr_lo16
= cpu_to_le16(low_val
& 0xffff);
3752 static u16
hclge_get_mac_addr_to_mta_index(struct hclge_vport
*vport
,
3755 u16 high_val
= addr
[1] | (addr
[0] << 8);
3756 struct hclge_dev
*hdev
= vport
->back
;
3757 u32 rsh
= 4 - hdev
->mta_mac_sel_type
;
3758 u16 ret_val
= (high_val
>> rsh
) & 0xfff;
3763 static int hclge_set_mta_filter_mode(struct hclge_dev
*hdev
,
3764 enum hclge_mta_dmac_sel_type mta_mac_sel
,
3767 struct hclge_mta_filter_mode_cmd
*req
;
3768 struct hclge_desc desc
;
3771 req
= (struct hclge_mta_filter_mode_cmd
*)desc
.data
;
3772 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MTA_MAC_MODE_CFG
, false);
3774 hnae_set_bit(req
->dmac_sel_en
, HCLGE_CFG_MTA_MAC_EN_B
,
3776 hnae_set_field(req
->dmac_sel_en
, HCLGE_CFG_MTA_MAC_SEL_M
,
3777 HCLGE_CFG_MTA_MAC_SEL_S
, mta_mac_sel
);
3779 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3781 dev_err(&hdev
->pdev
->dev
,
3782 "Config mat filter mode failed for cmd_send, ret =%d.\n",
3790 int hclge_cfg_func_mta_filter(struct hclge_dev
*hdev
,
3794 struct hclge_cfg_func_mta_filter_cmd
*req
;
3795 struct hclge_desc desc
;
3798 req
= (struct hclge_cfg_func_mta_filter_cmd
*)desc
.data
;
3799 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MTA_MAC_FUNC_CFG
, false);
3801 hnae_set_bit(req
->accept
, HCLGE_CFG_FUNC_MTA_ACCEPT_B
,
3803 req
->function_id
= func_id
;
3805 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3807 dev_err(&hdev
->pdev
->dev
,
3808 "Config func_id enable failed for cmd_send, ret =%d.\n",
3816 static int hclge_set_mta_table_item(struct hclge_vport
*vport
,
3820 struct hclge_dev
*hdev
= vport
->back
;
3821 struct hclge_cfg_func_mta_item_cmd
*req
;
3822 struct hclge_desc desc
;
3826 req
= (struct hclge_cfg_func_mta_item_cmd
*)desc
.data
;
3827 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MTA_TBL_ITEM_CFG
, false);
3828 hnae_set_bit(req
->accept
, HCLGE_CFG_MTA_ITEM_ACCEPT_B
, enable
);
3830 hnae_set_field(item_idx
, HCLGE_CFG_MTA_ITEM_IDX_M
,
3831 HCLGE_CFG_MTA_ITEM_IDX_S
, idx
);
3832 req
->item_idx
= cpu_to_le16(item_idx
);
3834 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3836 dev_err(&hdev
->pdev
->dev
,
3837 "Config mta table item failed for cmd_send, ret =%d.\n",
3845 static int hclge_remove_mac_vlan_tbl(struct hclge_vport
*vport
,
3846 struct hclge_mac_vlan_tbl_entry_cmd
*req
)
3848 struct hclge_dev
*hdev
= vport
->back
;
3849 struct hclge_desc desc
;
3854 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_REMOVE
, false);
3856 memcpy(desc
.data
, req
, sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
3858 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3860 dev_err(&hdev
->pdev
->dev
,
3861 "del mac addr failed for cmd_send, ret =%d.\n",
3865 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
3866 retval
= le16_to_cpu(desc
.retval
);
3868 return hclge_get_mac_vlan_cmd_status(vport
, retval
, resp_code
,
3869 HCLGE_MAC_VLAN_REMOVE
);
3872 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport
*vport
,
3873 struct hclge_mac_vlan_tbl_entry_cmd
*req
,
3874 struct hclge_desc
*desc
,
3877 struct hclge_dev
*hdev
= vport
->back
;
3882 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_MAC_VLAN_ADD
, true);
3884 desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3885 memcpy(desc
[0].data
,
3887 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
3888 hclge_cmd_setup_basic_desc(&desc
[1],
3889 HCLGE_OPC_MAC_VLAN_ADD
,
3891 desc
[1].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3892 hclge_cmd_setup_basic_desc(&desc
[2],
3893 HCLGE_OPC_MAC_VLAN_ADD
,
3895 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 3);
3897 memcpy(desc
[0].data
,
3899 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
3900 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
3903 dev_err(&hdev
->pdev
->dev
,
3904 "lookup mac addr failed for cmd_send, ret =%d.\n",
3908 resp_code
= (le32_to_cpu(desc
[0].data
[0]) >> 8) & 0xff;
3909 retval
= le16_to_cpu(desc
[0].retval
);
3911 return hclge_get_mac_vlan_cmd_status(vport
, retval
, resp_code
,
3912 HCLGE_MAC_VLAN_LKUP
);
3915 static int hclge_add_mac_vlan_tbl(struct hclge_vport
*vport
,
3916 struct hclge_mac_vlan_tbl_entry_cmd
*req
,
3917 struct hclge_desc
*mc_desc
)
3919 struct hclge_dev
*hdev
= vport
->back
;
3926 struct hclge_desc desc
;
3928 hclge_cmd_setup_basic_desc(&desc
,
3929 HCLGE_OPC_MAC_VLAN_ADD
,
3931 memcpy(desc
.data
, req
,
3932 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
3933 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3934 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
3935 retval
= le16_to_cpu(desc
.retval
);
3937 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, retval
,
3939 HCLGE_MAC_VLAN_ADD
);
3941 hclge_cmd_reuse_desc(&mc_desc
[0], false);
3942 mc_desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3943 hclge_cmd_reuse_desc(&mc_desc
[1], false);
3944 mc_desc
[1].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3945 hclge_cmd_reuse_desc(&mc_desc
[2], false);
3946 mc_desc
[2].flag
&= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT
);
3947 memcpy(mc_desc
[0].data
, req
,
3948 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
3949 ret
= hclge_cmd_send(&hdev
->hw
, mc_desc
, 3);
3950 resp_code
= (le32_to_cpu(mc_desc
[0].data
[0]) >> 8) & 0xff;
3951 retval
= le16_to_cpu(mc_desc
[0].retval
);
3953 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, retval
,
3955 HCLGE_MAC_VLAN_ADD
);
3959 dev_err(&hdev
->pdev
->dev
,
3960 "add mac addr failed for cmd_send, ret =%d.\n",
3968 static int hclge_add_uc_addr(struct hnae3_handle
*handle
,
3969 const unsigned char *addr
)
3971 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3973 return hclge_add_uc_addr_common(vport
, addr
);
3976 int hclge_add_uc_addr_common(struct hclge_vport
*vport
,
3977 const unsigned char *addr
)
3979 struct hclge_dev
*hdev
= vport
->back
;
3980 struct hclge_mac_vlan_tbl_entry_cmd req
;
3981 enum hclge_cmd_status status
;
3982 u16 egress_port
= 0;
3984 /* mac addr check */
3985 if (is_zero_ether_addr(addr
) ||
3986 is_broadcast_ether_addr(addr
) ||
3987 is_multicast_ether_addr(addr
)) {
3988 dev_err(&hdev
->pdev
->dev
,
3989 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
3991 is_zero_ether_addr(addr
),
3992 is_broadcast_ether_addr(addr
),
3993 is_multicast_ether_addr(addr
));
3997 memset(&req
, 0, sizeof(req
));
3998 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
3999 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4000 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 0);
4001 hnae_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4003 hnae_set_bit(egress_port
, HCLGE_MAC_EPORT_SW_EN_B
, 0);
4004 hnae_set_bit(egress_port
, HCLGE_MAC_EPORT_TYPE_B
, 0);
4005 hnae_set_field(egress_port
, HCLGE_MAC_EPORT_VFID_M
,
4006 HCLGE_MAC_EPORT_VFID_S
, vport
->vport_id
);
4007 hnae_set_field(egress_port
, HCLGE_MAC_EPORT_PFID_M
,
4008 HCLGE_MAC_EPORT_PFID_S
, 0);
4010 req
.egress_port
= cpu_to_le16(egress_port
);
4012 hclge_prepare_mac_addr(&req
, addr
);
4014 status
= hclge_add_mac_vlan_tbl(vport
, &req
, NULL
);
4019 static int hclge_rm_uc_addr(struct hnae3_handle
*handle
,
4020 const unsigned char *addr
)
4022 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4024 return hclge_rm_uc_addr_common(vport
, addr
);
4027 int hclge_rm_uc_addr_common(struct hclge_vport
*vport
,
4028 const unsigned char *addr
)
4030 struct hclge_dev
*hdev
= vport
->back
;
4031 struct hclge_mac_vlan_tbl_entry_cmd req
;
4032 enum hclge_cmd_status status
;
4034 /* mac addr check */
4035 if (is_zero_ether_addr(addr
) ||
4036 is_broadcast_ether_addr(addr
) ||
4037 is_multicast_ether_addr(addr
)) {
4038 dev_dbg(&hdev
->pdev
->dev
,
4039 "Remove mac err! invalid mac:%pM.\n",
4044 memset(&req
, 0, sizeof(req
));
4045 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
4046 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4047 hclge_prepare_mac_addr(&req
, addr
);
4048 status
= hclge_remove_mac_vlan_tbl(vport
, &req
);
4053 static int hclge_add_mc_addr(struct hnae3_handle
*handle
,
4054 const unsigned char *addr
)
4056 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4058 return hclge_add_mc_addr_common(vport
, addr
);
4061 int hclge_add_mc_addr_common(struct hclge_vport
*vport
,
4062 const unsigned char *addr
)
4064 struct hclge_dev
*hdev
= vport
->back
;
4065 struct hclge_mac_vlan_tbl_entry_cmd req
;
4066 struct hclge_desc desc
[3];
4070 /* mac addr check */
4071 if (!is_multicast_ether_addr(addr
)) {
4072 dev_err(&hdev
->pdev
->dev
,
4073 "Add mc mac err! invalid mac:%pM.\n",
4077 memset(&req
, 0, sizeof(req
));
4078 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
4079 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4080 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 1);
4081 hnae_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4082 hclge_prepare_mac_addr(&req
, addr
);
4083 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
4085 /* This mac addr exist, update VFID for it */
4086 hclge_update_desc_vfid(desc
, vport
->vport_id
, false);
4087 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
4089 /* This mac addr do not exist, add new entry for it */
4090 memset(desc
[0].data
, 0, sizeof(desc
[0].data
));
4091 memset(desc
[1].data
, 0, sizeof(desc
[0].data
));
4092 memset(desc
[2].data
, 0, sizeof(desc
[0].data
));
4093 hclge_update_desc_vfid(desc
, vport
->vport_id
, false);
4094 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
4097 /* Set MTA table for this MAC address */
4098 tbl_idx
= hclge_get_mac_addr_to_mta_index(vport
, addr
);
4099 status
= hclge_set_mta_table_item(vport
, tbl_idx
, true);
4104 static int hclge_rm_mc_addr(struct hnae3_handle
*handle
,
4105 const unsigned char *addr
)
4107 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4109 return hclge_rm_mc_addr_common(vport
, addr
);
4112 int hclge_rm_mc_addr_common(struct hclge_vport
*vport
,
4113 const unsigned char *addr
)
4115 struct hclge_dev
*hdev
= vport
->back
;
4116 struct hclge_mac_vlan_tbl_entry_cmd req
;
4117 enum hclge_cmd_status status
;
4118 struct hclge_desc desc
[3];
4121 /* mac addr check */
4122 if (!is_multicast_ether_addr(addr
)) {
4123 dev_dbg(&hdev
->pdev
->dev
,
4124 "Remove mc mac err! invalid mac:%pM.\n",
4129 memset(&req
, 0, sizeof(req
));
4130 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
4131 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4132 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 1);
4133 hnae_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4134 hclge_prepare_mac_addr(&req
, addr
);
4135 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
4137 /* This mac addr exist, remove this handle's VFID for it */
4138 hclge_update_desc_vfid(desc
, vport
->vport_id
, true);
4140 if (hclge_is_all_function_id_zero(desc
))
4141 /* All the vfid is zero, so need to delete this entry */
4142 status
= hclge_remove_mac_vlan_tbl(vport
, &req
);
4144 /* Not all the vfid is zero, update the vfid */
4145 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
4148 /* This mac addr do not exist, can't delete it */
4149 dev_err(&hdev
->pdev
->dev
,
4150 "Rm multicast mac addr failed, ret = %d.\n",
4155 /* Set MTB table for this MAC address */
4156 tbl_idx
= hclge_get_mac_addr_to_mta_index(vport
, addr
);
4157 status
= hclge_set_mta_table_item(vport
, tbl_idx
, false);
4162 static void hclge_get_mac_addr(struct hnae3_handle
*handle
, u8
*p
)
4164 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4165 struct hclge_dev
*hdev
= vport
->back
;
4167 ether_addr_copy(p
, hdev
->hw
.mac
.mac_addr
);
4170 static int hclge_set_mac_addr(struct hnae3_handle
*handle
, void *p
)
4172 const unsigned char *new_addr
= (const unsigned char *)p
;
4173 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4174 struct hclge_dev
*hdev
= vport
->back
;
4176 /* mac addr check */
4177 if (is_zero_ether_addr(new_addr
) ||
4178 is_broadcast_ether_addr(new_addr
) ||
4179 is_multicast_ether_addr(new_addr
)) {
4180 dev_err(&hdev
->pdev
->dev
,
4181 "Change uc mac err! invalid mac:%p.\n",
4186 hclge_rm_uc_addr(handle
, hdev
->hw
.mac
.mac_addr
);
4188 if (!hclge_add_uc_addr(handle
, new_addr
)) {
4189 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, new_addr
);
4196 static int hclge_set_vlan_filter_ctrl(struct hclge_dev
*hdev
, u8 vlan_type
,
4199 struct hclge_vlan_filter_ctrl_cmd
*req
;
4200 struct hclge_desc desc
;
4203 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_CTRL
, false);
4205 req
= (struct hclge_vlan_filter_ctrl_cmd
*)desc
.data
;
4206 req
->vlan_type
= vlan_type
;
4207 req
->vlan_fe
= filter_en
;
4209 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4211 dev_err(&hdev
->pdev
->dev
, "set vlan filter fail, ret =%d.\n",
4219 int hclge_set_vf_vlan_common(struct hclge_dev
*hdev
, int vfid
,
4220 bool is_kill
, u16 vlan
, u8 qos
, __be16 proto
)
4222 #define HCLGE_MAX_VF_BYTES 16
4223 struct hclge_vlan_filter_vf_cfg_cmd
*req0
;
4224 struct hclge_vlan_filter_vf_cfg_cmd
*req1
;
4225 struct hclge_desc desc
[2];
4230 hclge_cmd_setup_basic_desc(&desc
[0],
4231 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
4232 hclge_cmd_setup_basic_desc(&desc
[1],
4233 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
4235 desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
4237 vf_byte_off
= vfid
/ 8;
4238 vf_byte_val
= 1 << (vfid
% 8);
4240 req0
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[0].data
;
4241 req1
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[1].data
;
4243 req0
->vlan_id
= cpu_to_le16(vlan
);
4244 req0
->vlan_cfg
= is_kill
;
4246 if (vf_byte_off
< HCLGE_MAX_VF_BYTES
)
4247 req0
->vf_bitmap
[vf_byte_off
] = vf_byte_val
;
4249 req1
->vf_bitmap
[vf_byte_off
- HCLGE_MAX_VF_BYTES
] = vf_byte_val
;
4251 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
4253 dev_err(&hdev
->pdev
->dev
,
4254 "Send vf vlan command fail, ret =%d.\n",
4260 if (!req0
->resp_code
|| req0
->resp_code
== 1)
4263 dev_err(&hdev
->pdev
->dev
,
4264 "Add vf vlan filter fail, ret =%d.\n",
4267 if (!req0
->resp_code
)
4270 dev_err(&hdev
->pdev
->dev
,
4271 "Kill vf vlan filter fail, ret =%d.\n",
4278 static int hclge_set_port_vlan_filter(struct hnae3_handle
*handle
,
4279 __be16 proto
, u16 vlan_id
,
4282 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4283 struct hclge_dev
*hdev
= vport
->back
;
4284 struct hclge_vlan_filter_pf_cfg_cmd
*req
;
4285 struct hclge_desc desc
;
4286 u8 vlan_offset_byte_val
;
4287 u8 vlan_offset_byte
;
4291 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_PF_CFG
, false);
4293 vlan_offset_160
= vlan_id
/ 160;
4294 vlan_offset_byte
= (vlan_id
% 160) / 8;
4295 vlan_offset_byte_val
= 1 << (vlan_id
% 8);
4297 req
= (struct hclge_vlan_filter_pf_cfg_cmd
*)desc
.data
;
4298 req
->vlan_offset
= vlan_offset_160
;
4299 req
->vlan_cfg
= is_kill
;
4300 req
->vlan_offset_bitmap
[vlan_offset_byte
] = vlan_offset_byte_val
;
4302 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4304 dev_err(&hdev
->pdev
->dev
,
4305 "port vlan command, send fail, ret =%d.\n",
4310 ret
= hclge_set_vf_vlan_common(hdev
, 0, is_kill
, vlan_id
, 0, proto
);
4312 dev_err(&hdev
->pdev
->dev
,
4313 "Set pf vlan filter config fail, ret =%d.\n",
4321 static int hclge_set_vf_vlan_filter(struct hnae3_handle
*handle
, int vfid
,
4322 u16 vlan
, u8 qos
, __be16 proto
)
4324 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4325 struct hclge_dev
*hdev
= vport
->back
;
4327 if ((vfid
>= hdev
->num_alloc_vfs
) || (vlan
> 4095) || (qos
> 7))
4329 if (proto
!= htons(ETH_P_8021Q
))
4330 return -EPROTONOSUPPORT
;
4332 return hclge_set_vf_vlan_common(hdev
, vfid
, false, vlan
, qos
, proto
);
4335 static int hclge_init_vlan_config(struct hclge_dev
*hdev
)
4337 #define HCLGE_VLAN_TYPE_VF_TABLE 0
4338 #define HCLGE_VLAN_TYPE_PORT_TABLE 1
4339 struct hnae3_handle
*handle
;
4342 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_VLAN_TYPE_VF_TABLE
,
4347 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_VLAN_TYPE_PORT_TABLE
,
4352 handle
= &hdev
->vport
[0].nic
;
4353 return hclge_set_port_vlan_filter(handle
, htons(ETH_P_8021Q
), 0, false);
4356 static int hclge_set_mtu(struct hnae3_handle
*handle
, int new_mtu
)
4358 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4359 struct hclge_config_max_frm_size_cmd
*req
;
4360 struct hclge_dev
*hdev
= vport
->back
;
4361 struct hclge_desc desc
;
4364 if ((new_mtu
< HCLGE_MAC_MIN_MTU
) || (new_mtu
> HCLGE_MAC_MAX_MTU
))
4367 hdev
->mps
= new_mtu
;
4368 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAX_FRM_SIZE
, false);
4370 req
= (struct hclge_config_max_frm_size_cmd
*)desc
.data
;
4371 req
->max_frm_size
= cpu_to_le16(new_mtu
);
4373 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4375 dev_err(&hdev
->pdev
->dev
, "set mtu fail, ret =%d.\n", ret
);
4382 static int hclge_send_reset_tqp_cmd(struct hclge_dev
*hdev
, u16 queue_id
,
4385 struct hclge_reset_tqp_queue_cmd
*req
;
4386 struct hclge_desc desc
;
4389 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, false);
4391 req
= (struct hclge_reset_tqp_queue_cmd
*)desc
.data
;
4392 req
->tqp_id
= cpu_to_le16(queue_id
& HCLGE_RING_ID_MASK
);
4393 hnae_set_bit(req
->reset_req
, HCLGE_TQP_RESET_B
, enable
);
4395 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4397 dev_err(&hdev
->pdev
->dev
,
4398 "Send tqp reset cmd error, status =%d\n", ret
);
4405 static int hclge_get_reset_status(struct hclge_dev
*hdev
, u16 queue_id
)
4407 struct hclge_reset_tqp_queue_cmd
*req
;
4408 struct hclge_desc desc
;
4411 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, true);
4413 req
= (struct hclge_reset_tqp_queue_cmd
*)desc
.data
;
4414 req
->tqp_id
= cpu_to_le16(queue_id
& HCLGE_RING_ID_MASK
);
4416 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4418 dev_err(&hdev
->pdev
->dev
,
4419 "Get reset status error, status =%d\n", ret
);
4423 return hnae_get_bit(req
->ready_to_reset
, HCLGE_TQP_RESET_B
);
4426 static void hclge_reset_tqp(struct hnae3_handle
*handle
, u16 queue_id
)
4428 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4429 struct hclge_dev
*hdev
= vport
->back
;
4430 int reset_try_times
= 0;
4434 ret
= hclge_tqp_enable(hdev
, queue_id
, 0, false);
4436 dev_warn(&hdev
->pdev
->dev
, "Disable tqp fail, ret = %d\n", ret
);
4440 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_id
, true);
4442 dev_warn(&hdev
->pdev
->dev
,
4443 "Send reset tqp cmd fail, ret = %d\n", ret
);
4447 reset_try_times
= 0;
4448 while (reset_try_times
++ < HCLGE_TQP_RESET_TRY_TIMES
) {
4449 /* Wait for tqp hw reset */
4451 reset_status
= hclge_get_reset_status(hdev
, queue_id
);
4456 if (reset_try_times
>= HCLGE_TQP_RESET_TRY_TIMES
) {
4457 dev_warn(&hdev
->pdev
->dev
, "Reset TQP fail\n");
4461 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_id
, false);
4463 dev_warn(&hdev
->pdev
->dev
,
4464 "Deassert the soft reset fail, ret = %d\n", ret
);
4469 static u32
hclge_get_fw_version(struct hnae3_handle
*handle
)
4471 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4472 struct hclge_dev
*hdev
= vport
->back
;
4474 return hdev
->fw_version
;
4477 static void hclge_get_pauseparam(struct hnae3_handle
*handle
, u32
*auto_neg
,
4478 u32
*rx_en
, u32
*tx_en
)
4480 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4481 struct hclge_dev
*hdev
= vport
->back
;
4483 *auto_neg
= hclge_get_autoneg(handle
);
4485 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
) {
4491 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_RX_PAUSE
) {
4494 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_TX_PAUSE
) {
4497 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_FULL
) {
4506 static void hclge_get_ksettings_an_result(struct hnae3_handle
*handle
,
4507 u8
*auto_neg
, u32
*speed
, u8
*duplex
)
4509 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4510 struct hclge_dev
*hdev
= vport
->back
;
4513 *speed
= hdev
->hw
.mac
.speed
;
4515 *duplex
= hdev
->hw
.mac
.duplex
;
4517 *auto_neg
= hdev
->hw
.mac
.autoneg
;
4520 static void hclge_get_media_type(struct hnae3_handle
*handle
, u8
*media_type
)
4522 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4523 struct hclge_dev
*hdev
= vport
->back
;
4526 *media_type
= hdev
->hw
.mac
.media_type
;
4529 static void hclge_get_mdix_mode(struct hnae3_handle
*handle
,
4530 u8
*tp_mdix_ctrl
, u8
*tp_mdix
)
4532 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4533 struct hclge_dev
*hdev
= vport
->back
;
4534 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
4535 int mdix_ctrl
, mdix
, retval
, is_resolved
;
4538 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
4539 *tp_mdix
= ETH_TP_MDI_INVALID
;
4543 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_MDIX
);
4545 retval
= phy_read(phydev
, HCLGE_PHY_CSC_REG
);
4546 mdix_ctrl
= hnae_get_field(retval
, HCLGE_PHY_MDIX_CTRL_M
,
4547 HCLGE_PHY_MDIX_CTRL_S
);
4549 retval
= phy_read(phydev
, HCLGE_PHY_CSS_REG
);
4550 mdix
= hnae_get_bit(retval
, HCLGE_PHY_MDIX_STATUS_B
);
4551 is_resolved
= hnae_get_bit(retval
, HCLGE_PHY_SPEED_DUP_RESOLVE_B
);
4553 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_COPPER
);
4555 switch (mdix_ctrl
) {
4557 *tp_mdix_ctrl
= ETH_TP_MDI
;
4560 *tp_mdix_ctrl
= ETH_TP_MDI_X
;
4563 *tp_mdix_ctrl
= ETH_TP_MDI_AUTO
;
4566 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
4571 *tp_mdix
= ETH_TP_MDI_INVALID
;
4573 *tp_mdix
= ETH_TP_MDI_X
;
4575 *tp_mdix
= ETH_TP_MDI
;
4578 static int hclge_init_client_instance(struct hnae3_client
*client
,
4579 struct hnae3_ae_dev
*ae_dev
)
4581 struct hclge_dev
*hdev
= ae_dev
->priv
;
4582 struct hclge_vport
*vport
;
4585 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
4586 vport
= &hdev
->vport
[i
];
4588 switch (client
->type
) {
4589 case HNAE3_CLIENT_KNIC
:
4591 hdev
->nic_client
= client
;
4592 vport
->nic
.client
= client
;
4593 ret
= client
->ops
->init_instance(&vport
->nic
);
4597 if (hdev
->roce_client
&&
4598 hnae3_dev_roce_supported(hdev
)) {
4599 struct hnae3_client
*rc
= hdev
->roce_client
;
4601 ret
= hclge_init_roce_base_info(vport
);
4605 ret
= rc
->ops
->init_instance(&vport
->roce
);
4611 case HNAE3_CLIENT_UNIC
:
4612 hdev
->nic_client
= client
;
4613 vport
->nic
.client
= client
;
4615 ret
= client
->ops
->init_instance(&vport
->nic
);
4620 case HNAE3_CLIENT_ROCE
:
4621 if (hnae3_dev_roce_supported(hdev
)) {
4622 hdev
->roce_client
= client
;
4623 vport
->roce
.client
= client
;
4626 if (hdev
->roce_client
&& hdev
->nic_client
) {
4627 ret
= hclge_init_roce_base_info(vport
);
4631 ret
= client
->ops
->init_instance(&vport
->roce
);
4643 static void hclge_uninit_client_instance(struct hnae3_client
*client
,
4644 struct hnae3_ae_dev
*ae_dev
)
4646 struct hclge_dev
*hdev
= ae_dev
->priv
;
4647 struct hclge_vport
*vport
;
4650 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
4651 vport
= &hdev
->vport
[i
];
4652 if (hdev
->roce_client
) {
4653 hdev
->roce_client
->ops
->uninit_instance(&vport
->roce
,
4655 hdev
->roce_client
= NULL
;
4656 vport
->roce
.client
= NULL
;
4658 if (client
->type
== HNAE3_CLIENT_ROCE
)
4660 if (client
->ops
->uninit_instance
) {
4661 client
->ops
->uninit_instance(&vport
->nic
, 0);
4662 hdev
->nic_client
= NULL
;
4663 vport
->nic
.client
= NULL
;
4668 static int hclge_pci_init(struct hclge_dev
*hdev
)
4670 struct pci_dev
*pdev
= hdev
->pdev
;
4671 struct hclge_hw
*hw
;
4674 ret
= pci_enable_device(pdev
);
4676 dev_err(&pdev
->dev
, "failed to enable PCI device\n");
4677 goto err_no_drvdata
;
4680 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
4682 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
4685 "can't set consistent PCI DMA");
4686 goto err_disable_device
;
4688 dev_warn(&pdev
->dev
, "set DMA mask to 32 bits\n");
4691 ret
= pci_request_regions(pdev
, HCLGE_DRIVER_NAME
);
4693 dev_err(&pdev
->dev
, "PCI request regions failed %d\n", ret
);
4694 goto err_disable_device
;
4697 pci_set_master(pdev
);
4700 hw
->io_base
= pcim_iomap(pdev
, 2, 0);
4702 dev_err(&pdev
->dev
, "Can't map configuration register space\n");
4704 goto err_clr_master
;
4707 hdev
->num_req_vfs
= pci_sriov_get_totalvfs(pdev
);
4711 pci_clear_master(pdev
);
4712 pci_release_regions(pdev
);
4714 pci_disable_device(pdev
);
4716 pci_set_drvdata(pdev
, NULL
);
4721 static void hclge_pci_uninit(struct hclge_dev
*hdev
)
4723 struct pci_dev
*pdev
= hdev
->pdev
;
4725 pci_free_irq_vectors(pdev
);
4726 pci_clear_master(pdev
);
4727 pci_release_mem_regions(pdev
);
4728 pci_disable_device(pdev
);
4731 static int hclge_init_ae_dev(struct hnae3_ae_dev
*ae_dev
)
4733 struct pci_dev
*pdev
= ae_dev
->pdev
;
4734 struct hclge_dev
*hdev
;
4737 hdev
= devm_kzalloc(&pdev
->dev
, sizeof(*hdev
), GFP_KERNEL
);
4744 hdev
->ae_dev
= ae_dev
;
4745 hdev
->reset_type
= HNAE3_NONE_RESET
;
4746 hdev
->reset_request
= 0;
4747 hdev
->reset_pending
= 0;
4748 ae_dev
->priv
= hdev
;
4750 ret
= hclge_pci_init(hdev
);
4752 dev_err(&pdev
->dev
, "PCI init failed\n");
4756 /* Firmware command queue initialize */
4757 ret
= hclge_cmd_queue_init(hdev
);
4759 dev_err(&pdev
->dev
, "Cmd queue init failed, ret = %d.\n", ret
);
4763 /* Firmware command initialize */
4764 ret
= hclge_cmd_init(hdev
);
4768 ret
= hclge_get_cap(hdev
);
4770 dev_err(&pdev
->dev
, "get hw capability error, ret = %d.\n",
4775 ret
= hclge_configure(hdev
);
4777 dev_err(&pdev
->dev
, "Configure dev error, ret = %d.\n", ret
);
4781 ret
= hclge_init_msi(hdev
);
4783 dev_err(&pdev
->dev
, "Init MSI/MSI-X error, ret = %d.\n", ret
);
4787 ret
= hclge_misc_irq_init(hdev
);
4790 "Misc IRQ(vector0) init error, ret = %d.\n",
4795 ret
= hclge_alloc_tqps(hdev
);
4797 dev_err(&pdev
->dev
, "Allocate TQPs error, ret = %d.\n", ret
);
4801 ret
= hclge_alloc_vport(hdev
);
4803 dev_err(&pdev
->dev
, "Allocate vport error, ret = %d.\n", ret
);
4807 ret
= hclge_map_tqp(hdev
);
4809 dev_err(&pdev
->dev
, "Map tqp error, ret = %d.\n", ret
);
4813 ret
= hclge_mac_mdio_config(hdev
);
4815 dev_warn(&hdev
->pdev
->dev
,
4816 "mdio config fail ret=%d\n", ret
);
4820 ret
= hclge_mac_init(hdev
);
4822 dev_err(&pdev
->dev
, "Mac init error, ret = %d\n", ret
);
4825 ret
= hclge_buffer_alloc(hdev
);
4827 dev_err(&pdev
->dev
, "Buffer allocate fail, ret =%d\n", ret
);
4831 ret
= hclge_config_tso(hdev
, HCLGE_TSO_MSS_MIN
, HCLGE_TSO_MSS_MAX
);
4833 dev_err(&pdev
->dev
, "Enable tso fail, ret =%d\n", ret
);
4837 ret
= hclge_init_vlan_config(hdev
);
4839 dev_err(&pdev
->dev
, "VLAN init fail, ret =%d\n", ret
);
4843 ret
= hclge_tm_schd_init(hdev
);
4845 dev_err(&pdev
->dev
, "tm schd init fail, ret =%d\n", ret
);
4849 ret
= hclge_rss_init_hw(hdev
);
4851 dev_err(&pdev
->dev
, "Rss init fail, ret =%d\n", ret
);
4855 hclge_dcb_ops_set(hdev
);
4857 timer_setup(&hdev
->service_timer
, hclge_service_timer
, 0);
4858 INIT_WORK(&hdev
->service_task
, hclge_service_task
);
4859 INIT_WORK(&hdev
->rst_service_task
, hclge_reset_service_task
);
4861 /* Enable MISC vector(vector0) */
4862 hclge_enable_vector(&hdev
->misc_vector
, true);
4864 set_bit(HCLGE_STATE_SERVICE_INITED
, &hdev
->state
);
4865 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
4866 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
);
4867 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
4869 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME
);
4873 pci_release_regions(pdev
);
4875 pci_set_drvdata(pdev
, NULL
);
4880 static void hclge_stats_clear(struct hclge_dev
*hdev
)
4882 memset(&hdev
->hw_stats
, 0, sizeof(hdev
->hw_stats
));
4885 static int hclge_reset_ae_dev(struct hnae3_ae_dev
*ae_dev
)
4887 struct hclge_dev
*hdev
= ae_dev
->priv
;
4888 struct pci_dev
*pdev
= ae_dev
->pdev
;
4891 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
4893 hclge_stats_clear(hdev
);
4895 ret
= hclge_cmd_init(hdev
);
4897 dev_err(&pdev
->dev
, "Cmd queue init failed\n");
4901 ret
= hclge_get_cap(hdev
);
4903 dev_err(&pdev
->dev
, "get hw capability error, ret = %d.\n",
4908 ret
= hclge_configure(hdev
);
4910 dev_err(&pdev
->dev
, "Configure dev error, ret = %d.\n", ret
);
4914 ret
= hclge_map_tqp(hdev
);
4916 dev_err(&pdev
->dev
, "Map tqp error, ret = %d.\n", ret
);
4920 ret
= hclge_mac_init(hdev
);
4922 dev_err(&pdev
->dev
, "Mac init error, ret = %d\n", ret
);
4926 ret
= hclge_buffer_alloc(hdev
);
4928 dev_err(&pdev
->dev
, "Buffer allocate fail, ret =%d\n", ret
);
4932 ret
= hclge_config_tso(hdev
, HCLGE_TSO_MSS_MIN
, HCLGE_TSO_MSS_MAX
);
4934 dev_err(&pdev
->dev
, "Enable tso fail, ret =%d\n", ret
);
4938 ret
= hclge_init_vlan_config(hdev
);
4940 dev_err(&pdev
->dev
, "VLAN init fail, ret =%d\n", ret
);
4944 ret
= hclge_tm_schd_init(hdev
);
4946 dev_err(&pdev
->dev
, "tm schd init fail, ret =%d\n", ret
);
4950 ret
= hclge_rss_init_hw(hdev
);
4952 dev_err(&pdev
->dev
, "Rss init fail, ret =%d\n", ret
);
4956 /* Enable MISC vector(vector0) */
4957 hclge_enable_vector(&hdev
->misc_vector
, true);
4959 dev_info(&pdev
->dev
, "Reset done, %s driver initialization finished.\n",
4965 static void hclge_uninit_ae_dev(struct hnae3_ae_dev
*ae_dev
)
4967 struct hclge_dev
*hdev
= ae_dev
->priv
;
4968 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
4970 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
4972 if (IS_ENABLED(CONFIG_PCI_IOV
))
4973 hclge_disable_sriov(hdev
);
4975 if (hdev
->service_timer
.function
)
4976 del_timer_sync(&hdev
->service_timer
);
4977 if (hdev
->service_task
.func
)
4978 cancel_work_sync(&hdev
->service_task
);
4979 if (hdev
->rst_service_task
.func
)
4980 cancel_work_sync(&hdev
->rst_service_task
);
4983 mdiobus_unregister(mac
->mdio_bus
);
4985 /* Disable MISC vector(vector0) */
4986 hclge_enable_vector(&hdev
->misc_vector
, false);
4987 hclge_destroy_cmd_queue(&hdev
->hw
);
4988 hclge_misc_irq_uninit(hdev
);
4989 hclge_pci_uninit(hdev
);
4990 ae_dev
->priv
= NULL
;
4993 static const struct hnae3_ae_ops hclge_ops
= {
4994 .init_ae_dev
= hclge_init_ae_dev
,
4995 .uninit_ae_dev
= hclge_uninit_ae_dev
,
4996 .init_client_instance
= hclge_init_client_instance
,
4997 .uninit_client_instance
= hclge_uninit_client_instance
,
4998 .map_ring_to_vector
= hclge_map_handle_ring_to_vector
,
4999 .unmap_ring_from_vector
= hclge_unmap_ring_from_vector
,
5000 .get_vector
= hclge_get_vector
,
5001 .set_promisc_mode
= hclge_set_promisc_mode
,
5002 .set_loopback
= hclge_set_loopback
,
5003 .start
= hclge_ae_start
,
5004 .stop
= hclge_ae_stop
,
5005 .get_status
= hclge_get_status
,
5006 .get_ksettings_an_result
= hclge_get_ksettings_an_result
,
5007 .update_speed_duplex_h
= hclge_update_speed_duplex_h
,
5008 .cfg_mac_speed_dup_h
= hclge_cfg_mac_speed_dup_h
,
5009 .get_media_type
= hclge_get_media_type
,
5010 .get_rss_key_size
= hclge_get_rss_key_size
,
5011 .get_rss_indir_size
= hclge_get_rss_indir_size
,
5012 .get_rss
= hclge_get_rss
,
5013 .set_rss
= hclge_set_rss
,
5014 .set_rss_tuple
= hclge_set_rss_tuple
,
5015 .get_rss_tuple
= hclge_get_rss_tuple
,
5016 .get_tc_size
= hclge_get_tc_size
,
5017 .get_mac_addr
= hclge_get_mac_addr
,
5018 .set_mac_addr
= hclge_set_mac_addr
,
5019 .add_uc_addr
= hclge_add_uc_addr
,
5020 .rm_uc_addr
= hclge_rm_uc_addr
,
5021 .add_mc_addr
= hclge_add_mc_addr
,
5022 .rm_mc_addr
= hclge_rm_mc_addr
,
5023 .set_autoneg
= hclge_set_autoneg
,
5024 .get_autoneg
= hclge_get_autoneg
,
5025 .get_pauseparam
= hclge_get_pauseparam
,
5026 .set_mtu
= hclge_set_mtu
,
5027 .reset_queue
= hclge_reset_tqp
,
5028 .get_stats
= hclge_get_stats
,
5029 .update_stats
= hclge_update_stats
,
5030 .get_strings
= hclge_get_strings
,
5031 .get_sset_count
= hclge_get_sset_count
,
5032 .get_fw_version
= hclge_get_fw_version
,
5033 .get_mdix_mode
= hclge_get_mdix_mode
,
5034 .set_vlan_filter
= hclge_set_port_vlan_filter
,
5035 .set_vf_vlan_filter
= hclge_set_vf_vlan_filter
,
5036 .reset_event
= hclge_reset_event
,
5039 static struct hnae3_ae_algo ae_algo
= {
5042 .pdev_id_table
= ae_algo_pci_tbl
,
5045 static int hclge_init(void)
5047 pr_info("%s is initializing\n", HCLGE_NAME
);
5049 return hnae3_register_ae_algo(&ae_algo
);
5052 static void hclge_exit(void)
5054 hnae3_unregister_ae_algo(&ae_algo
);
5056 module_init(hclge_init
);
5057 module_exit(hclge_exit
);
5059 MODULE_LICENSE("GPL");
5060 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
5061 MODULE_DESCRIPTION("HCLGE Driver");
5062 MODULE_VERSION(HCLGE_MOD_VERSION
);