2 * Copyright (c) 2016-2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/acpi.h>
11 #include <linux/device.h>
12 #include <linux/etherdevice.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/pci.h>
19 #include <linux/platform_device.h>
20 #include <linux/if_vlan.h>
21 #include <net/rtnetlink.h>
22 #include "hclge_cmd.h"
23 #include "hclge_dcb.h"
24 #include "hclge_main.h"
25 #include "hclge_mbx.h"
26 #include "hclge_mdio.h"
30 #define HCLGE_NAME "hclge"
31 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
32 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
33 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
34 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
36 static int hclge_set_mta_filter_mode(struct hclge_dev
*hdev
,
37 enum hclge_mta_dmac_sel_type mta_mac_sel
,
39 static int hclge_init_vlan_config(struct hclge_dev
*hdev
);
40 static int hclge_reset_ae_dev(struct hnae3_ae_dev
*ae_dev
);
42 static struct hnae3_ae_algo ae_algo
;
44 static const struct pci_device_id ae_algo_pci_tbl
[] = {
45 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
46 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
47 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
), 0},
48 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
), 0},
49 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
), 0},
50 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
), 0},
51 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
), 0},
52 /* required last entry */
56 static const char hns3_nic_test_strs
[][ETH_GSTRING_LEN
] = {
58 "Serdes Loopback test",
62 static const struct hclge_comm_stats_str g_all_64bit_stats_string
[] = {
63 {"igu_rx_oversize_pkt",
64 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt
)},
65 {"igu_rx_undersize_pkt",
66 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt
)},
67 {"igu_rx_out_all_pkt",
68 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt
)},
70 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt
)},
72 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt
)},
74 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt
)},
75 {"egu_tx_out_all_pkt",
76 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt
)},
78 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt
)},
80 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt
)},
82 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt
)},
83 {"ssu_ppp_mac_key_num",
84 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num
)},
85 {"ssu_ppp_host_key_num",
86 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num
)},
87 {"ppp_ssu_mac_rlt_num",
88 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num
)},
89 {"ppp_ssu_host_rlt_num",
90 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num
)},
92 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num
)},
94 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num
)},
96 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num
)},
98 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num
)}
101 static const struct hclge_comm_stats_str g_all_32bit_stats_string
[] = {
103 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt
)},
104 {"igu_rx_no_eof_pkt",
105 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt
)},
106 {"igu_rx_no_sof_pkt",
107 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt
)},
109 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt
)},
110 {"ssu_full_drop_num",
111 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num
)},
112 {"ssu_part_drop_num",
113 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num
)},
115 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num
)},
117 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num
)},
119 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num
)},
121 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt
)},
123 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt
)},
125 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt
)},
126 {"qcn_fb_invaild_cnt",
127 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt
)},
128 {"rx_packet_tc0_in_cnt",
129 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt
)},
130 {"rx_packet_tc1_in_cnt",
131 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt
)},
132 {"rx_packet_tc2_in_cnt",
133 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt
)},
134 {"rx_packet_tc3_in_cnt",
135 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt
)},
136 {"rx_packet_tc4_in_cnt",
137 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt
)},
138 {"rx_packet_tc5_in_cnt",
139 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt
)},
140 {"rx_packet_tc6_in_cnt",
141 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt
)},
142 {"rx_packet_tc7_in_cnt",
143 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt
)},
144 {"rx_packet_tc0_out_cnt",
145 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt
)},
146 {"rx_packet_tc1_out_cnt",
147 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt
)},
148 {"rx_packet_tc2_out_cnt",
149 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt
)},
150 {"rx_packet_tc3_out_cnt",
151 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt
)},
152 {"rx_packet_tc4_out_cnt",
153 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt
)},
154 {"rx_packet_tc5_out_cnt",
155 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt
)},
156 {"rx_packet_tc6_out_cnt",
157 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt
)},
158 {"rx_packet_tc7_out_cnt",
159 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt
)},
160 {"tx_packet_tc0_in_cnt",
161 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt
)},
162 {"tx_packet_tc1_in_cnt",
163 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt
)},
164 {"tx_packet_tc2_in_cnt",
165 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt
)},
166 {"tx_packet_tc3_in_cnt",
167 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt
)},
168 {"tx_packet_tc4_in_cnt",
169 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt
)},
170 {"tx_packet_tc5_in_cnt",
171 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt
)},
172 {"tx_packet_tc6_in_cnt",
173 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt
)},
174 {"tx_packet_tc7_in_cnt",
175 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt
)},
176 {"tx_packet_tc0_out_cnt",
177 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt
)},
178 {"tx_packet_tc1_out_cnt",
179 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt
)},
180 {"tx_packet_tc2_out_cnt",
181 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt
)},
182 {"tx_packet_tc3_out_cnt",
183 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt
)},
184 {"tx_packet_tc4_out_cnt",
185 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt
)},
186 {"tx_packet_tc5_out_cnt",
187 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt
)},
188 {"tx_packet_tc6_out_cnt",
189 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt
)},
190 {"tx_packet_tc7_out_cnt",
191 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt
)},
192 {"pkt_curr_buf_tc0_cnt",
193 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt
)},
194 {"pkt_curr_buf_tc1_cnt",
195 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt
)},
196 {"pkt_curr_buf_tc2_cnt",
197 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt
)},
198 {"pkt_curr_buf_tc3_cnt",
199 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt
)},
200 {"pkt_curr_buf_tc4_cnt",
201 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt
)},
202 {"pkt_curr_buf_tc5_cnt",
203 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt
)},
204 {"pkt_curr_buf_tc6_cnt",
205 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt
)},
206 {"pkt_curr_buf_tc7_cnt",
207 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt
)},
209 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num
)},
210 {"lo_pri_unicast_rlt_drop_num",
211 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num
)},
212 {"hi_pri_multicast_rlt_drop_num",
213 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num
)},
214 {"lo_pri_multicast_rlt_drop_num",
215 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num
)},
216 {"rx_oq_drop_pkt_cnt",
217 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt
)},
218 {"tx_oq_drop_pkt_cnt",
219 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt
)},
220 {"nic_l2_err_drop_pkt_cnt",
221 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt
)},
222 {"roc_l2_err_drop_pkt_cnt",
223 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt
)}
226 static const struct hclge_comm_stats_str g_mac_stats_string
[] = {
227 {"mac_tx_mac_pause_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num
)},
229 {"mac_rx_mac_pause_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num
)},
231 {"mac_tx_pfc_pri0_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num
)},
233 {"mac_tx_pfc_pri1_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num
)},
235 {"mac_tx_pfc_pri2_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num
)},
237 {"mac_tx_pfc_pri3_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num
)},
239 {"mac_tx_pfc_pri4_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num
)},
241 {"mac_tx_pfc_pri5_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num
)},
243 {"mac_tx_pfc_pri6_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num
)},
245 {"mac_tx_pfc_pri7_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num
)},
247 {"mac_rx_pfc_pri0_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num
)},
249 {"mac_rx_pfc_pri1_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num
)},
251 {"mac_rx_pfc_pri2_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num
)},
253 {"mac_rx_pfc_pri3_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num
)},
255 {"mac_rx_pfc_pri4_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num
)},
257 {"mac_rx_pfc_pri5_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num
)},
259 {"mac_rx_pfc_pri6_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num
)},
261 {"mac_rx_pfc_pri7_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num
)},
263 {"mac_tx_total_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num
)},
265 {"mac_tx_total_oct_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num
)},
267 {"mac_tx_good_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num
)},
269 {"mac_tx_bad_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num
)},
271 {"mac_tx_good_oct_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num
)},
273 {"mac_tx_bad_oct_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num
)},
275 {"mac_tx_uni_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num
)},
277 {"mac_tx_multi_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num
)},
279 {"mac_tx_broad_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num
)},
281 {"mac_tx_undersize_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num
)},
283 {"mac_tx_oversize_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num
)},
285 {"mac_tx_64_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num
)},
287 {"mac_tx_65_127_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num
)},
289 {"mac_tx_128_255_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num
)},
291 {"mac_tx_256_511_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num
)},
293 {"mac_tx_512_1023_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num
)},
295 {"mac_tx_1024_1518_oct_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num
)},
297 {"mac_tx_1519_max_oct_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num
)},
299 {"mac_rx_total_pkt_num",
300 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num
)},
301 {"mac_rx_total_oct_num",
302 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num
)},
303 {"mac_rx_good_pkt_num",
304 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num
)},
305 {"mac_rx_bad_pkt_num",
306 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num
)},
307 {"mac_rx_good_oct_num",
308 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num
)},
309 {"mac_rx_bad_oct_num",
310 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num
)},
311 {"mac_rx_uni_pkt_num",
312 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num
)},
313 {"mac_rx_multi_pkt_num",
314 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num
)},
315 {"mac_rx_broad_pkt_num",
316 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num
)},
317 {"mac_rx_undersize_pkt_num",
318 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num
)},
319 {"mac_rx_oversize_pkt_num",
320 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num
)},
321 {"mac_rx_64_oct_pkt_num",
322 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num
)},
323 {"mac_rx_65_127_oct_pkt_num",
324 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num
)},
325 {"mac_rx_128_255_oct_pkt_num",
326 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num
)},
327 {"mac_rx_256_511_oct_pkt_num",
328 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num
)},
329 {"mac_rx_512_1023_oct_pkt_num",
330 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num
)},
331 {"mac_rx_1024_1518_oct_pkt_num",
332 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num
)},
333 {"mac_rx_1519_max_oct_pkt_num",
334 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num
)},
336 {"mac_tx_fragment_pkt_num",
337 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num
)},
338 {"mac_tx_undermin_pkt_num",
339 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num
)},
340 {"mac_tx_jabber_pkt_num",
341 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num
)},
342 {"mac_tx_err_all_pkt_num",
343 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num
)},
344 {"mac_tx_from_app_good_pkt_num",
345 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num
)},
346 {"mac_tx_from_app_bad_pkt_num",
347 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num
)},
348 {"mac_rx_fragment_pkt_num",
349 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num
)},
350 {"mac_rx_undermin_pkt_num",
351 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num
)},
352 {"mac_rx_jabber_pkt_num",
353 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num
)},
354 {"mac_rx_fcs_err_pkt_num",
355 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num
)},
356 {"mac_rx_send_app_good_pkt_num",
357 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num
)},
358 {"mac_rx_send_app_bad_pkt_num",
359 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num
)}
362 static int hclge_64_bit_update_stats(struct hclge_dev
*hdev
)
364 #define HCLGE_64_BIT_CMD_NUM 5
365 #define HCLGE_64_BIT_RTN_DATANUM 4
366 u64
*data
= (u64
*)(&hdev
->hw_stats
.all_64_bit_stats
);
367 struct hclge_desc desc
[HCLGE_64_BIT_CMD_NUM
];
372 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_64_BIT
, true);
373 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_64_BIT_CMD_NUM
);
375 dev_err(&hdev
->pdev
->dev
,
376 "Get 64 bit pkt stats fail, status = %d.\n", ret
);
380 for (i
= 0; i
< HCLGE_64_BIT_CMD_NUM
; i
++) {
381 if (unlikely(i
== 0)) {
382 desc_data
= (__le64
*)(&desc
[i
].data
[0]);
383 n
= HCLGE_64_BIT_RTN_DATANUM
- 1;
385 desc_data
= (__le64
*)(&desc
[i
]);
386 n
= HCLGE_64_BIT_RTN_DATANUM
;
388 for (k
= 0; k
< n
; k
++) {
389 *data
++ += le64_to_cpu(*desc_data
);
397 static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats
*stats
)
399 stats
->pkt_curr_buf_cnt
= 0;
400 stats
->pkt_curr_buf_tc0_cnt
= 0;
401 stats
->pkt_curr_buf_tc1_cnt
= 0;
402 stats
->pkt_curr_buf_tc2_cnt
= 0;
403 stats
->pkt_curr_buf_tc3_cnt
= 0;
404 stats
->pkt_curr_buf_tc4_cnt
= 0;
405 stats
->pkt_curr_buf_tc5_cnt
= 0;
406 stats
->pkt_curr_buf_tc6_cnt
= 0;
407 stats
->pkt_curr_buf_tc7_cnt
= 0;
410 static int hclge_32_bit_update_stats(struct hclge_dev
*hdev
)
412 #define HCLGE_32_BIT_CMD_NUM 8
413 #define HCLGE_32_BIT_RTN_DATANUM 8
415 struct hclge_desc desc
[HCLGE_32_BIT_CMD_NUM
];
416 struct hclge_32_bit_stats
*all_32_bit_stats
;
422 all_32_bit_stats
= &hdev
->hw_stats
.all_32_bit_stats
;
423 data
= (u64
*)(&all_32_bit_stats
->egu_tx_1588_pkt
);
425 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_32_BIT
, true);
426 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_32_BIT_CMD_NUM
);
428 dev_err(&hdev
->pdev
->dev
,
429 "Get 32 bit pkt stats fail, status = %d.\n", ret
);
434 hclge_reset_partial_32bit_counter(all_32_bit_stats
);
435 for (i
= 0; i
< HCLGE_32_BIT_CMD_NUM
; i
++) {
436 if (unlikely(i
== 0)) {
437 __le16
*desc_data_16bit
;
439 all_32_bit_stats
->igu_rx_err_pkt
+=
440 le32_to_cpu(desc
[i
].data
[0]);
442 desc_data_16bit
= (__le16
*)&desc
[i
].data
[1];
443 all_32_bit_stats
->igu_rx_no_eof_pkt
+=
444 le16_to_cpu(*desc_data_16bit
);
447 all_32_bit_stats
->igu_rx_no_sof_pkt
+=
448 le16_to_cpu(*desc_data_16bit
);
450 desc_data
= &desc
[i
].data
[2];
451 n
= HCLGE_32_BIT_RTN_DATANUM
- 4;
453 desc_data
= (__le32
*)&desc
[i
];
454 n
= HCLGE_32_BIT_RTN_DATANUM
;
456 for (k
= 0; k
< n
; k
++) {
457 *data
++ += le32_to_cpu(*desc_data
);
465 static int hclge_mac_update_stats(struct hclge_dev
*hdev
)
467 #define HCLGE_MAC_CMD_NUM 17
468 #define HCLGE_RTN_DATA_NUM 4
470 u64
*data
= (u64
*)(&hdev
->hw_stats
.mac_stats
);
471 struct hclge_desc desc
[HCLGE_MAC_CMD_NUM
];
476 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_MAC
, true);
477 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_MAC_CMD_NUM
);
479 dev_err(&hdev
->pdev
->dev
,
480 "Get MAC pkt stats fail, status = %d.\n", ret
);
485 for (i
= 0; i
< HCLGE_MAC_CMD_NUM
; i
++) {
486 if (unlikely(i
== 0)) {
487 desc_data
= (__le64
*)(&desc
[i
].data
[0]);
488 n
= HCLGE_RTN_DATA_NUM
- 2;
490 desc_data
= (__le64
*)(&desc
[i
]);
491 n
= HCLGE_RTN_DATA_NUM
;
493 for (k
= 0; k
< n
; k
++) {
494 *data
++ += le64_to_cpu(*desc_data
);
502 static int hclge_tqps_update_stats(struct hnae3_handle
*handle
)
504 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
505 struct hclge_vport
*vport
= hclge_get_vport(handle
);
506 struct hclge_dev
*hdev
= vport
->back
;
507 struct hnae3_queue
*queue
;
508 struct hclge_desc desc
[1];
509 struct hclge_tqp
*tqp
;
512 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
513 queue
= handle
->kinfo
.tqp
[i
];
514 tqp
= container_of(queue
, struct hclge_tqp
, q
);
515 /* command : HCLGE_OPC_QUERY_IGU_STAT */
516 hclge_cmd_setup_basic_desc(&desc
[0],
517 HCLGE_OPC_QUERY_RX_STATUS
,
520 desc
[0].data
[0] = cpu_to_le32((tqp
->index
& 0x1ff));
521 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
523 dev_err(&hdev
->pdev
->dev
,
524 "Query tqp stat fail, status = %d,queue = %d\n",
528 tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
+=
529 le32_to_cpu(desc
[0].data
[1]);
532 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
533 queue
= handle
->kinfo
.tqp
[i
];
534 tqp
= container_of(queue
, struct hclge_tqp
, q
);
535 /* command : HCLGE_OPC_QUERY_IGU_STAT */
536 hclge_cmd_setup_basic_desc(&desc
[0],
537 HCLGE_OPC_QUERY_TX_STATUS
,
540 desc
[0].data
[0] = cpu_to_le32((tqp
->index
& 0x1ff));
541 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
543 dev_err(&hdev
->pdev
->dev
,
544 "Query tqp stat fail, status = %d,queue = %d\n",
548 tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
+=
549 le32_to_cpu(desc
[0].data
[1]);
555 static u64
*hclge_tqps_get_stats(struct hnae3_handle
*handle
, u64
*data
)
557 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
558 struct hclge_tqp
*tqp
;
562 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
563 tqp
= container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
564 *buff
++ = tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
;
567 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
568 tqp
= container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
569 *buff
++ = tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
;
575 static int hclge_tqps_get_sset_count(struct hnae3_handle
*handle
, int stringset
)
577 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
579 return kinfo
->num_tqps
* (2);
582 static u8
*hclge_tqps_get_strings(struct hnae3_handle
*handle
, u8
*data
)
584 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
588 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
589 struct hclge_tqp
*tqp
= container_of(handle
->kinfo
.tqp
[i
],
590 struct hclge_tqp
, q
);
591 snprintf(buff
, ETH_GSTRING_LEN
, "txq#%d_pktnum_rcd",
593 buff
= buff
+ ETH_GSTRING_LEN
;
596 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
597 struct hclge_tqp
*tqp
= container_of(kinfo
->tqp
[i
],
598 struct hclge_tqp
, q
);
599 snprintf(buff
, ETH_GSTRING_LEN
, "rxq#%d_pktnum_rcd",
601 buff
= buff
+ ETH_GSTRING_LEN
;
607 static u64
*hclge_comm_get_stats(void *comm_stats
,
608 const struct hclge_comm_stats_str strs
[],
614 for (i
= 0; i
< size
; i
++)
615 buf
[i
] = HCLGE_STATS_READ(comm_stats
, strs
[i
].offset
);
620 static u8
*hclge_comm_get_strings(u32 stringset
,
621 const struct hclge_comm_stats_str strs
[],
624 char *buff
= (char *)data
;
627 if (stringset
!= ETH_SS_STATS
)
630 for (i
= 0; i
< size
; i
++) {
631 snprintf(buff
, ETH_GSTRING_LEN
,
633 buff
= buff
+ ETH_GSTRING_LEN
;
639 static void hclge_update_netstat(struct hclge_hw_stats
*hw_stats
,
640 struct net_device_stats
*net_stats
)
642 net_stats
->tx_dropped
= 0;
643 net_stats
->rx_dropped
= hw_stats
->all_32_bit_stats
.ssu_full_drop_num
;
644 net_stats
->rx_dropped
+= hw_stats
->all_32_bit_stats
.ppp_key_drop_num
;
645 net_stats
->rx_dropped
+= hw_stats
->all_32_bit_stats
.ssu_key_drop_num
;
647 net_stats
->rx_errors
= hw_stats
->mac_stats
.mac_rx_oversize_pkt_num
;
648 net_stats
->rx_errors
+= hw_stats
->mac_stats
.mac_rx_undersize_pkt_num
;
649 net_stats
->rx_errors
+= hw_stats
->all_32_bit_stats
.igu_rx_no_eof_pkt
;
650 net_stats
->rx_errors
+= hw_stats
->all_32_bit_stats
.igu_rx_no_sof_pkt
;
651 net_stats
->rx_errors
+= hw_stats
->mac_stats
.mac_rx_fcs_err_pkt_num
;
653 net_stats
->multicast
= hw_stats
->mac_stats
.mac_tx_multi_pkt_num
;
654 net_stats
->multicast
+= hw_stats
->mac_stats
.mac_rx_multi_pkt_num
;
656 net_stats
->rx_crc_errors
= hw_stats
->mac_stats
.mac_rx_fcs_err_pkt_num
;
657 net_stats
->rx_length_errors
=
658 hw_stats
->mac_stats
.mac_rx_undersize_pkt_num
;
659 net_stats
->rx_length_errors
+=
660 hw_stats
->mac_stats
.mac_rx_oversize_pkt_num
;
661 net_stats
->rx_over_errors
=
662 hw_stats
->mac_stats
.mac_rx_oversize_pkt_num
;
665 static void hclge_update_stats_for_all(struct hclge_dev
*hdev
)
667 struct hnae3_handle
*handle
;
670 handle
= &hdev
->vport
[0].nic
;
671 if (handle
->client
) {
672 status
= hclge_tqps_update_stats(handle
);
674 dev_err(&hdev
->pdev
->dev
,
675 "Update TQPS stats fail, status = %d.\n",
680 status
= hclge_mac_update_stats(hdev
);
682 dev_err(&hdev
->pdev
->dev
,
683 "Update MAC stats fail, status = %d.\n", status
);
685 status
= hclge_32_bit_update_stats(hdev
);
687 dev_err(&hdev
->pdev
->dev
,
688 "Update 32 bit stats fail, status = %d.\n",
691 hclge_update_netstat(&hdev
->hw_stats
, &handle
->kinfo
.netdev
->stats
);
694 static void hclge_update_stats(struct hnae3_handle
*handle
,
695 struct net_device_stats
*net_stats
)
697 struct hclge_vport
*vport
= hclge_get_vport(handle
);
698 struct hclge_dev
*hdev
= vport
->back
;
699 struct hclge_hw_stats
*hw_stats
= &hdev
->hw_stats
;
702 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING
, &hdev
->state
))
705 status
= hclge_mac_update_stats(hdev
);
707 dev_err(&hdev
->pdev
->dev
,
708 "Update MAC stats fail, status = %d.\n",
711 status
= hclge_32_bit_update_stats(hdev
);
713 dev_err(&hdev
->pdev
->dev
,
714 "Update 32 bit stats fail, status = %d.\n",
717 status
= hclge_64_bit_update_stats(hdev
);
719 dev_err(&hdev
->pdev
->dev
,
720 "Update 64 bit stats fail, status = %d.\n",
723 status
= hclge_tqps_update_stats(handle
);
725 dev_err(&hdev
->pdev
->dev
,
726 "Update TQPS stats fail, status = %d.\n",
729 hclge_update_netstat(hw_stats
, net_stats
);
731 clear_bit(HCLGE_STATE_STATISTICS_UPDATING
, &hdev
->state
);
734 static int hclge_get_sset_count(struct hnae3_handle
*handle
, int stringset
)
736 #define HCLGE_LOOPBACK_TEST_FLAGS 0x7
738 struct hclge_vport
*vport
= hclge_get_vport(handle
);
739 struct hclge_dev
*hdev
= vport
->back
;
742 /* Loopback test support rules:
743 * mac: only GE mode support
744 * serdes: all mac mode will support include GE/XGE/LGE/CGE
745 * phy: only support when phy device exist on board
747 if (stringset
== ETH_SS_TEST
) {
748 /* clear loopback bit flags at first */
749 handle
->flags
= (handle
->flags
& (~HCLGE_LOOPBACK_TEST_FLAGS
));
750 if (hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_10M
||
751 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_100M
||
752 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_1G
) {
754 handle
->flags
|= HNAE3_SUPPORT_MAC_LOOPBACK
;
758 } else if (stringset
== ETH_SS_STATS
) {
759 count
= ARRAY_SIZE(g_mac_stats_string
) +
760 ARRAY_SIZE(g_all_32bit_stats_string
) +
761 ARRAY_SIZE(g_all_64bit_stats_string
) +
762 hclge_tqps_get_sset_count(handle
, stringset
);
768 static void hclge_get_strings(struct hnae3_handle
*handle
,
772 u8
*p
= (char *)data
;
775 if (stringset
== ETH_SS_STATS
) {
776 size
= ARRAY_SIZE(g_mac_stats_string
);
777 p
= hclge_comm_get_strings(stringset
,
781 size
= ARRAY_SIZE(g_all_32bit_stats_string
);
782 p
= hclge_comm_get_strings(stringset
,
783 g_all_32bit_stats_string
,
786 size
= ARRAY_SIZE(g_all_64bit_stats_string
);
787 p
= hclge_comm_get_strings(stringset
,
788 g_all_64bit_stats_string
,
791 p
= hclge_tqps_get_strings(handle
, p
);
792 } else if (stringset
== ETH_SS_TEST
) {
793 if (handle
->flags
& HNAE3_SUPPORT_MAC_LOOPBACK
) {
795 hns3_nic_test_strs
[HNAE3_MAC_INTER_LOOP_MAC
],
797 p
+= ETH_GSTRING_LEN
;
799 if (handle
->flags
& HNAE3_SUPPORT_SERDES_LOOPBACK
) {
801 hns3_nic_test_strs
[HNAE3_MAC_INTER_LOOP_SERDES
],
803 p
+= ETH_GSTRING_LEN
;
805 if (handle
->flags
& HNAE3_SUPPORT_PHY_LOOPBACK
) {
807 hns3_nic_test_strs
[HNAE3_MAC_INTER_LOOP_PHY
],
809 p
+= ETH_GSTRING_LEN
;
814 static void hclge_get_stats(struct hnae3_handle
*handle
, u64
*data
)
816 struct hclge_vport
*vport
= hclge_get_vport(handle
);
817 struct hclge_dev
*hdev
= vport
->back
;
820 p
= hclge_comm_get_stats(&hdev
->hw_stats
.mac_stats
,
822 ARRAY_SIZE(g_mac_stats_string
),
824 p
= hclge_comm_get_stats(&hdev
->hw_stats
.all_32_bit_stats
,
825 g_all_32bit_stats_string
,
826 ARRAY_SIZE(g_all_32bit_stats_string
),
828 p
= hclge_comm_get_stats(&hdev
->hw_stats
.all_64_bit_stats
,
829 g_all_64bit_stats_string
,
830 ARRAY_SIZE(g_all_64bit_stats_string
),
832 p
= hclge_tqps_get_stats(handle
, p
);
835 static int hclge_parse_func_status(struct hclge_dev
*hdev
,
836 struct hclge_func_status_cmd
*status
)
838 if (!(status
->pf_state
& HCLGE_PF_STATE_DONE
))
841 /* Set the pf to main pf */
842 if (status
->pf_state
& HCLGE_PF_STATE_MAIN
)
843 hdev
->flag
|= HCLGE_FLAG_MAIN
;
845 hdev
->flag
&= ~HCLGE_FLAG_MAIN
;
850 static int hclge_query_function_status(struct hclge_dev
*hdev
)
852 struct hclge_func_status_cmd
*req
;
853 struct hclge_desc desc
;
857 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_FUNC_STATUS
, true);
858 req
= (struct hclge_func_status_cmd
*)desc
.data
;
861 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
863 dev_err(&hdev
->pdev
->dev
,
864 "query function status failed %d.\n",
870 /* Check pf reset is done */
873 usleep_range(1000, 2000);
874 } while (timeout
++ < 5);
876 ret
= hclge_parse_func_status(hdev
, req
);
881 static int hclge_query_pf_resource(struct hclge_dev
*hdev
)
883 struct hclge_pf_res_cmd
*req
;
884 struct hclge_desc desc
;
887 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_PF_RSRC
, true);
888 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
890 dev_err(&hdev
->pdev
->dev
,
891 "query pf resource failed %d.\n", ret
);
895 req
= (struct hclge_pf_res_cmd
*)desc
.data
;
896 hdev
->num_tqps
= __le16_to_cpu(req
->tqp_num
);
897 hdev
->pkt_buf_size
= __le16_to_cpu(req
->buf_size
) << HCLGE_BUF_UNIT_S
;
899 if (hnae3_dev_roce_supported(hdev
)) {
901 hnae_get_field(__le16_to_cpu(req
->pf_intr_vector_number
),
902 HCLGE_PF_VEC_NUM_M
, HCLGE_PF_VEC_NUM_S
);
904 /* PF should have NIC vectors and Roce vectors,
905 * NIC vectors are queued before Roce vectors.
907 hdev
->num_msi
= hdev
->num_roce_msi
+ HCLGE_ROCE_VECTOR_OFFSET
;
910 hnae_get_field(__le16_to_cpu(req
->pf_intr_vector_number
),
911 HCLGE_PF_VEC_NUM_M
, HCLGE_PF_VEC_NUM_S
);
917 static int hclge_parse_speed(int speed_cmd
, int *speed
)
921 *speed
= HCLGE_MAC_SPEED_10M
;
924 *speed
= HCLGE_MAC_SPEED_100M
;
927 *speed
= HCLGE_MAC_SPEED_1G
;
930 *speed
= HCLGE_MAC_SPEED_10G
;
933 *speed
= HCLGE_MAC_SPEED_25G
;
936 *speed
= HCLGE_MAC_SPEED_40G
;
939 *speed
= HCLGE_MAC_SPEED_50G
;
942 *speed
= HCLGE_MAC_SPEED_100G
;
951 static void hclge_parse_cfg(struct hclge_cfg
*cfg
, struct hclge_desc
*desc
)
953 struct hclge_cfg_param_cmd
*req
;
954 u64 mac_addr_tmp_high
;
958 req
= (struct hclge_cfg_param_cmd
*)desc
[0].data
;
960 /* get the configuration */
961 cfg
->vmdq_vport_num
= hnae_get_field(__le32_to_cpu(req
->param
[0]),
964 cfg
->tc_num
= hnae_get_field(__le32_to_cpu(req
->param
[0]),
965 HCLGE_CFG_TC_NUM_M
, HCLGE_CFG_TC_NUM_S
);
966 cfg
->tqp_desc_num
= hnae_get_field(__le32_to_cpu(req
->param
[0]),
967 HCLGE_CFG_TQP_DESC_N_M
,
968 HCLGE_CFG_TQP_DESC_N_S
);
970 cfg
->phy_addr
= hnae_get_field(__le32_to_cpu(req
->param
[1]),
971 HCLGE_CFG_PHY_ADDR_M
,
972 HCLGE_CFG_PHY_ADDR_S
);
973 cfg
->media_type
= hnae_get_field(__le32_to_cpu(req
->param
[1]),
974 HCLGE_CFG_MEDIA_TP_M
,
975 HCLGE_CFG_MEDIA_TP_S
);
976 cfg
->rx_buf_len
= hnae_get_field(__le32_to_cpu(req
->param
[1]),
977 HCLGE_CFG_RX_BUF_LEN_M
,
978 HCLGE_CFG_RX_BUF_LEN_S
);
979 /* get mac_address */
980 mac_addr_tmp
= __le32_to_cpu(req
->param
[2]);
981 mac_addr_tmp_high
= hnae_get_field(__le32_to_cpu(req
->param
[3]),
982 HCLGE_CFG_MAC_ADDR_H_M
,
983 HCLGE_CFG_MAC_ADDR_H_S
);
985 mac_addr_tmp
|= (mac_addr_tmp_high
<< 31) << 1;
987 cfg
->default_speed
= hnae_get_field(__le32_to_cpu(req
->param
[3]),
988 HCLGE_CFG_DEFAULT_SPEED_M
,
989 HCLGE_CFG_DEFAULT_SPEED_S
);
990 cfg
->rss_size_max
= hnae_get_field(__le32_to_cpu(req
->param
[3]),
991 HCLGE_CFG_RSS_SIZE_M
,
992 HCLGE_CFG_RSS_SIZE_S
);
994 for (i
= 0; i
< ETH_ALEN
; i
++)
995 cfg
->mac_addr
[i
] = (mac_addr_tmp
>> (8 * i
)) & 0xff;
997 req
= (struct hclge_cfg_param_cmd
*)desc
[1].data
;
998 cfg
->numa_node_map
= __le32_to_cpu(req
->param
[0]);
1001 /* hclge_get_cfg: query the static parameter from flash
1002 * @hdev: pointer to struct hclge_dev
1003 * @hcfg: the config structure to be getted
1005 static int hclge_get_cfg(struct hclge_dev
*hdev
, struct hclge_cfg
*hcfg
)
1007 struct hclge_desc desc
[HCLGE_PF_CFG_DESC_NUM
];
1008 struct hclge_cfg_param_cmd
*req
;
1011 for (i
= 0; i
< HCLGE_PF_CFG_DESC_NUM
; i
++) {
1014 req
= (struct hclge_cfg_param_cmd
*)desc
[i
].data
;
1015 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_GET_CFG_PARAM
,
1017 hnae_set_field(offset
, HCLGE_CFG_OFFSET_M
,
1018 HCLGE_CFG_OFFSET_S
, i
* HCLGE_CFG_RD_LEN_BYTES
);
1019 /* Len should be united by 4 bytes when send to hardware */
1020 hnae_set_field(offset
, HCLGE_CFG_RD_LEN_M
, HCLGE_CFG_RD_LEN_S
,
1021 HCLGE_CFG_RD_LEN_BYTES
/ HCLGE_CFG_RD_LEN_UNIT
);
1022 req
->offset
= cpu_to_le32(offset
);
1025 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_PF_CFG_DESC_NUM
);
1027 dev_err(&hdev
->pdev
->dev
,
1028 "get config failed %d.\n", ret
);
1032 hclge_parse_cfg(hcfg
, desc
);
1036 static int hclge_get_cap(struct hclge_dev
*hdev
)
1040 ret
= hclge_query_function_status(hdev
);
1042 dev_err(&hdev
->pdev
->dev
,
1043 "query function status error %d.\n", ret
);
1047 /* get pf resource */
1048 ret
= hclge_query_pf_resource(hdev
);
1050 dev_err(&hdev
->pdev
->dev
,
1051 "query pf resource error %d.\n", ret
);
1058 static int hclge_configure(struct hclge_dev
*hdev
)
1060 struct hclge_cfg cfg
;
1063 ret
= hclge_get_cfg(hdev
, &cfg
);
1065 dev_err(&hdev
->pdev
->dev
, "get mac mode error %d.\n", ret
);
1069 hdev
->num_vmdq_vport
= cfg
.vmdq_vport_num
;
1070 hdev
->base_tqp_pid
= 0;
1071 hdev
->rss_size_max
= cfg
.rss_size_max
;
1072 hdev
->rx_buf_len
= cfg
.rx_buf_len
;
1073 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, cfg
.mac_addr
);
1074 hdev
->hw
.mac
.media_type
= cfg
.media_type
;
1075 hdev
->hw
.mac
.phy_addr
= cfg
.phy_addr
;
1076 hdev
->num_desc
= cfg
.tqp_desc_num
;
1077 hdev
->tm_info
.num_pg
= 1;
1078 hdev
->tc_max
= cfg
.tc_num
;
1079 hdev
->tm_info
.hw_pfc_map
= 0;
1081 ret
= hclge_parse_speed(cfg
.default_speed
, &hdev
->hw
.mac
.speed
);
1083 dev_err(&hdev
->pdev
->dev
, "Get wrong speed ret=%d.\n", ret
);
1087 if ((hdev
->tc_max
> HNAE3_MAX_TC
) ||
1088 (hdev
->tc_max
< 1)) {
1089 dev_warn(&hdev
->pdev
->dev
, "TC num = %d.\n",
1094 /* Dev does not support DCB */
1095 if (!hnae3_dev_dcb_supported(hdev
)) {
1099 hdev
->pfc_max
= hdev
->tc_max
;
1102 hdev
->tm_info
.num_tc
= hdev
->tc_max
;
1104 /* Currently not support uncontiuous tc */
1105 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++)
1106 hnae_set_bit(hdev
->hw_tc_map
, i
, 1);
1108 hdev
->tx_sch_mode
= HCLGE_FLAG_TC_BASE_SCH_MODE
;
1113 static int hclge_config_tso(struct hclge_dev
*hdev
, int tso_mss_min
,
1116 struct hclge_cfg_tso_status_cmd
*req
;
1117 struct hclge_desc desc
;
1120 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TSO_GENERIC_CONFIG
, false);
1122 req
= (struct hclge_cfg_tso_status_cmd
*)desc
.data
;
1125 hnae_set_field(tso_mss
, HCLGE_TSO_MSS_MIN_M
,
1126 HCLGE_TSO_MSS_MIN_S
, tso_mss_min
);
1127 req
->tso_mss_min
= cpu_to_le16(tso_mss
);
1130 hnae_set_field(tso_mss
, HCLGE_TSO_MSS_MIN_M
,
1131 HCLGE_TSO_MSS_MIN_S
, tso_mss_max
);
1132 req
->tso_mss_max
= cpu_to_le16(tso_mss
);
1134 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1137 static int hclge_alloc_tqps(struct hclge_dev
*hdev
)
1139 struct hclge_tqp
*tqp
;
1142 hdev
->htqp
= devm_kcalloc(&hdev
->pdev
->dev
, hdev
->num_tqps
,
1143 sizeof(struct hclge_tqp
), GFP_KERNEL
);
1149 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
1150 tqp
->dev
= &hdev
->pdev
->dev
;
1153 tqp
->q
.ae_algo
= &ae_algo
;
1154 tqp
->q
.buf_size
= hdev
->rx_buf_len
;
1155 tqp
->q
.desc_num
= hdev
->num_desc
;
1156 tqp
->q
.io_base
= hdev
->hw
.io_base
+ HCLGE_TQP_REG_OFFSET
+
1157 i
* HCLGE_TQP_REG_SIZE
;
1165 static int hclge_map_tqps_to_func(struct hclge_dev
*hdev
, u16 func_id
,
1166 u16 tqp_pid
, u16 tqp_vid
, bool is_pf
)
1168 struct hclge_tqp_map_cmd
*req
;
1169 struct hclge_desc desc
;
1172 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_SET_TQP_MAP
, false);
1174 req
= (struct hclge_tqp_map_cmd
*)desc
.data
;
1175 req
->tqp_id
= cpu_to_le16(tqp_pid
);
1176 req
->tqp_vf
= func_id
;
1177 req
->tqp_flag
= !is_pf
<< HCLGE_TQP_MAP_TYPE_B
|
1178 1 << HCLGE_TQP_MAP_EN_B
;
1179 req
->tqp_vid
= cpu_to_le16(tqp_vid
);
1181 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1183 dev_err(&hdev
->pdev
->dev
, "TQP map failed %d.\n",
1191 static int hclge_assign_tqp(struct hclge_vport
*vport
,
1192 struct hnae3_queue
**tqp
, u16 num_tqps
)
1194 struct hclge_dev
*hdev
= vport
->back
;
1197 for (i
= 0, alloced
= 0; i
< hdev
->num_tqps
&&
1198 alloced
< num_tqps
; i
++) {
1199 if (!hdev
->htqp
[i
].alloced
) {
1200 hdev
->htqp
[i
].q
.handle
= &vport
->nic
;
1201 hdev
->htqp
[i
].q
.tqp_index
= alloced
;
1202 tqp
[alloced
] = &hdev
->htqp
[i
].q
;
1203 hdev
->htqp
[i
].alloced
= true;
1207 vport
->alloc_tqps
= num_tqps
;
1212 static int hclge_knic_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1214 struct hnae3_handle
*nic
= &vport
->nic
;
1215 struct hnae3_knic_private_info
*kinfo
= &nic
->kinfo
;
1216 struct hclge_dev
*hdev
= vport
->back
;
1219 kinfo
->num_desc
= hdev
->num_desc
;
1220 kinfo
->rx_buf_len
= hdev
->rx_buf_len
;
1221 kinfo
->num_tc
= min_t(u16
, num_tqps
, hdev
->tm_info
.num_tc
);
1223 = min_t(u16
, hdev
->rss_size_max
, num_tqps
/ kinfo
->num_tc
);
1224 kinfo
->num_tqps
= kinfo
->rss_size
* kinfo
->num_tc
;
1226 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
1227 if (hdev
->hw_tc_map
& BIT(i
)) {
1228 kinfo
->tc_info
[i
].enable
= true;
1229 kinfo
->tc_info
[i
].tqp_offset
= i
* kinfo
->rss_size
;
1230 kinfo
->tc_info
[i
].tqp_count
= kinfo
->rss_size
;
1231 kinfo
->tc_info
[i
].tc
= i
;
1233 /* Set to default queue if TC is disable */
1234 kinfo
->tc_info
[i
].enable
= false;
1235 kinfo
->tc_info
[i
].tqp_offset
= 0;
1236 kinfo
->tc_info
[i
].tqp_count
= 1;
1237 kinfo
->tc_info
[i
].tc
= 0;
1241 kinfo
->tqp
= devm_kcalloc(&hdev
->pdev
->dev
, kinfo
->num_tqps
,
1242 sizeof(struct hnae3_queue
*), GFP_KERNEL
);
1246 ret
= hclge_assign_tqp(vport
, kinfo
->tqp
, kinfo
->num_tqps
);
1248 dev_err(&hdev
->pdev
->dev
, "fail to assign TQPs %d.\n", ret
);
1255 static int hclge_map_tqp_to_vport(struct hclge_dev
*hdev
,
1256 struct hclge_vport
*vport
)
1258 struct hnae3_handle
*nic
= &vport
->nic
;
1259 struct hnae3_knic_private_info
*kinfo
;
1262 kinfo
= &nic
->kinfo
;
1263 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
1264 struct hclge_tqp
*q
=
1265 container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
1269 is_pf
= !(vport
->vport_id
);
1270 ret
= hclge_map_tqps_to_func(hdev
, vport
->vport_id
, q
->index
,
1279 static int hclge_map_tqp(struct hclge_dev
*hdev
)
1281 struct hclge_vport
*vport
= hdev
->vport
;
1284 num_vport
= hdev
->num_vmdq_vport
+ hdev
->num_req_vfs
+ 1;
1285 for (i
= 0; i
< num_vport
; i
++) {
1288 ret
= hclge_map_tqp_to_vport(hdev
, vport
);
1298 static void hclge_unic_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1300 /* this would be initialized later */
1303 static int hclge_vport_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1305 struct hnae3_handle
*nic
= &vport
->nic
;
1306 struct hclge_dev
*hdev
= vport
->back
;
1309 nic
->pdev
= hdev
->pdev
;
1310 nic
->ae_algo
= &ae_algo
;
1311 nic
->numa_node_mask
= hdev
->numa_node_mask
;
1313 if (hdev
->ae_dev
->dev_type
== HNAE3_DEV_KNIC
) {
1314 ret
= hclge_knic_setup(vport
, num_tqps
);
1316 dev_err(&hdev
->pdev
->dev
, "knic setup failed %d\n",
1321 hclge_unic_setup(vport
, num_tqps
);
1327 static int hclge_alloc_vport(struct hclge_dev
*hdev
)
1329 struct pci_dev
*pdev
= hdev
->pdev
;
1330 struct hclge_vport
*vport
;
1336 /* We need to alloc a vport for main NIC of PF */
1337 num_vport
= hdev
->num_vmdq_vport
+ hdev
->num_req_vfs
+ 1;
1339 if (hdev
->num_tqps
< num_vport
)
1340 num_vport
= hdev
->num_tqps
;
1342 /* Alloc the same number of TQPs for every vport */
1343 tqp_per_vport
= hdev
->num_tqps
/ num_vport
;
1344 tqp_main_vport
= tqp_per_vport
+ hdev
->num_tqps
% num_vport
;
1346 vport
= devm_kcalloc(&pdev
->dev
, num_vport
, sizeof(struct hclge_vport
),
1351 hdev
->vport
= vport
;
1352 hdev
->num_alloc_vport
= num_vport
;
1354 #ifdef CONFIG_PCI_IOV
1356 if (hdev
->num_req_vfs
) {
1357 dev_info(&pdev
->dev
, "active VFs(%d) found, enabling SRIOV\n",
1359 ret
= pci_enable_sriov(hdev
->pdev
, hdev
->num_req_vfs
);
1361 hdev
->num_alloc_vfs
= 0;
1362 dev_err(&pdev
->dev
, "SRIOV enable failed %d\n",
1367 hdev
->num_alloc_vfs
= hdev
->num_req_vfs
;
1370 for (i
= 0; i
< num_vport
; i
++) {
1372 vport
->vport_id
= i
;
1375 ret
= hclge_vport_setup(vport
, tqp_main_vport
);
1377 ret
= hclge_vport_setup(vport
, tqp_per_vport
);
1380 "vport setup failed for vport %d, %d\n",
1391 static int hclge_cmd_alloc_tx_buff(struct hclge_dev
*hdev
,
1392 struct hclge_pkt_buf_alloc
*buf_alloc
)
1394 /* TX buffer size is unit by 128 byte */
1395 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1396 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1397 struct hclge_tx_buff_alloc_cmd
*req
;
1398 struct hclge_desc desc
;
1402 req
= (struct hclge_tx_buff_alloc_cmd
*)desc
.data
;
1404 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TX_BUFF_ALLOC
, 0);
1405 for (i
= 0; i
< HCLGE_TC_NUM
; i
++) {
1406 u32 buf_size
= buf_alloc
->priv_buf
[i
].tx_buf_size
;
1408 req
->tx_pkt_buff
[i
] =
1409 cpu_to_le16((buf_size
>> HCLGE_BUF_SIZE_UNIT_SHIFT
) |
1410 HCLGE_BUF_SIZE_UPDATE_EN_MSK
);
1413 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1415 dev_err(&hdev
->pdev
->dev
, "tx buffer alloc cmd failed %d.\n",
1423 static int hclge_tx_buffer_alloc(struct hclge_dev
*hdev
,
1424 struct hclge_pkt_buf_alloc
*buf_alloc
)
1426 int ret
= hclge_cmd_alloc_tx_buff(hdev
, buf_alloc
);
1429 dev_err(&hdev
->pdev
->dev
,
1430 "tx buffer alloc failed %d\n", ret
);
1437 static int hclge_get_tc_num(struct hclge_dev
*hdev
)
1441 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1442 if (hdev
->hw_tc_map
& BIT(i
))
1447 static int hclge_get_pfc_enalbe_num(struct hclge_dev
*hdev
)
1451 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1452 if (hdev
->hw_tc_map
& BIT(i
) &&
1453 hdev
->tm_info
.hw_pfc_map
& BIT(i
))
1458 /* Get the number of pfc enabled TCs, which have private buffer */
1459 static int hclge_get_pfc_priv_num(struct hclge_dev
*hdev
,
1460 struct hclge_pkt_buf_alloc
*buf_alloc
)
1462 struct hclge_priv_buf
*priv
;
1465 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1466 priv
= &buf_alloc
->priv_buf
[i
];
1467 if ((hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1475 /* Get the number of pfc disabled TCs, which have private buffer */
1476 static int hclge_get_no_pfc_priv_num(struct hclge_dev
*hdev
,
1477 struct hclge_pkt_buf_alloc
*buf_alloc
)
1479 struct hclge_priv_buf
*priv
;
1482 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1483 priv
= &buf_alloc
->priv_buf
[i
];
1484 if (hdev
->hw_tc_map
& BIT(i
) &&
1485 !(hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1493 static u32
hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc
*buf_alloc
)
1495 struct hclge_priv_buf
*priv
;
1499 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1500 priv
= &buf_alloc
->priv_buf
[i
];
1502 rx_priv
+= priv
->buf_size
;
1507 static u32
hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc
*buf_alloc
)
1509 u32 i
, total_tx_size
= 0;
1511 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1512 total_tx_size
+= buf_alloc
->priv_buf
[i
].tx_buf_size
;
1514 return total_tx_size
;
1517 static bool hclge_is_rx_buf_ok(struct hclge_dev
*hdev
,
1518 struct hclge_pkt_buf_alloc
*buf_alloc
,
1521 u32 shared_buf_min
, shared_buf_tc
, shared_std
;
1522 int tc_num
, pfc_enable_num
;
1527 tc_num
= hclge_get_tc_num(hdev
);
1528 pfc_enable_num
= hclge_get_pfc_enalbe_num(hdev
);
1530 if (hnae3_dev_dcb_supported(hdev
))
1531 shared_buf_min
= 2 * hdev
->mps
+ HCLGE_DEFAULT_DV
;
1533 shared_buf_min
= 2 * hdev
->mps
+ HCLGE_DEFAULT_NON_DCB_DV
;
1535 shared_buf_tc
= pfc_enable_num
* hdev
->mps
+
1536 (tc_num
- pfc_enable_num
) * hdev
->mps
/ 2 +
1538 shared_std
= max_t(u32
, shared_buf_min
, shared_buf_tc
);
1540 rx_priv
= hclge_get_rx_priv_buff_alloced(buf_alloc
);
1541 if (rx_all
<= rx_priv
+ shared_std
)
1544 shared_buf
= rx_all
- rx_priv
;
1545 buf_alloc
->s_buf
.buf_size
= shared_buf
;
1546 buf_alloc
->s_buf
.self
.high
= shared_buf
;
1547 buf_alloc
->s_buf
.self
.low
= 2 * hdev
->mps
;
1549 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1550 if ((hdev
->hw_tc_map
& BIT(i
)) &&
1551 (hdev
->tm_info
.hw_pfc_map
& BIT(i
))) {
1552 buf_alloc
->s_buf
.tc_thrd
[i
].low
= hdev
->mps
;
1553 buf_alloc
->s_buf
.tc_thrd
[i
].high
= 2 * hdev
->mps
;
1555 buf_alloc
->s_buf
.tc_thrd
[i
].low
= 0;
1556 buf_alloc
->s_buf
.tc_thrd
[i
].high
= hdev
->mps
;
1563 static int hclge_tx_buffer_calc(struct hclge_dev
*hdev
,
1564 struct hclge_pkt_buf_alloc
*buf_alloc
)
1568 total_size
= hdev
->pkt_buf_size
;
1570 /* alloc tx buffer for all enabled tc */
1571 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1572 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
1574 if (total_size
< HCLGE_DEFAULT_TX_BUF
)
1577 if (hdev
->hw_tc_map
& BIT(i
))
1578 priv
->tx_buf_size
= HCLGE_DEFAULT_TX_BUF
;
1580 priv
->tx_buf_size
= 0;
1582 total_size
-= priv
->tx_buf_size
;
1588 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1589 * @hdev: pointer to struct hclge_dev
1590 * @buf_alloc: pointer to buffer calculation data
1591 * @return: 0: calculate sucessful, negative: fail
1593 static int hclge_rx_buffer_calc(struct hclge_dev
*hdev
,
1594 struct hclge_pkt_buf_alloc
*buf_alloc
)
1596 u32 rx_all
= hdev
->pkt_buf_size
;
1597 int no_pfc_priv_num
, pfc_priv_num
;
1598 struct hclge_priv_buf
*priv
;
1601 rx_all
-= hclge_get_tx_buff_alloced(buf_alloc
);
1603 /* When DCB is not supported, rx private
1604 * buffer is not allocated.
1606 if (!hnae3_dev_dcb_supported(hdev
)) {
1607 if (!hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1613 /* step 1, try to alloc private buffer for all enabled tc */
1614 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1615 priv
= &buf_alloc
->priv_buf
[i
];
1616 if (hdev
->hw_tc_map
& BIT(i
)) {
1618 if (hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1619 priv
->wl
.low
= hdev
->mps
;
1620 priv
->wl
.high
= priv
->wl
.low
+ hdev
->mps
;
1621 priv
->buf_size
= priv
->wl
.high
+
1625 priv
->wl
.high
= 2 * hdev
->mps
;
1626 priv
->buf_size
= priv
->wl
.high
;
1636 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1639 /* step 2, try to decrease the buffer size of
1640 * no pfc TC's private buffer
1642 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1643 priv
= &buf_alloc
->priv_buf
[i
];
1650 if (!(hdev
->hw_tc_map
& BIT(i
)))
1655 if (hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1657 priv
->wl
.high
= priv
->wl
.low
+ hdev
->mps
;
1658 priv
->buf_size
= priv
->wl
.high
+ HCLGE_DEFAULT_DV
;
1661 priv
->wl
.high
= hdev
->mps
;
1662 priv
->buf_size
= priv
->wl
.high
;
1666 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1669 /* step 3, try to reduce the number of pfc disabled TCs,
1670 * which have private buffer
1672 /* get the total no pfc enable TC number, which have private buffer */
1673 no_pfc_priv_num
= hclge_get_no_pfc_priv_num(hdev
, buf_alloc
);
1675 /* let the last to be cleared first */
1676 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
1677 priv
= &buf_alloc
->priv_buf
[i
];
1679 if (hdev
->hw_tc_map
& BIT(i
) &&
1680 !(hdev
->tm_info
.hw_pfc_map
& BIT(i
))) {
1681 /* Clear the no pfc TC private buffer */
1689 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
) ||
1690 no_pfc_priv_num
== 0)
1694 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1697 /* step 4, try to reduce the number of pfc enabled TCs
1698 * which have private buffer.
1700 pfc_priv_num
= hclge_get_pfc_priv_num(hdev
, buf_alloc
);
1702 /* let the last to be cleared first */
1703 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
1704 priv
= &buf_alloc
->priv_buf
[i
];
1706 if (hdev
->hw_tc_map
& BIT(i
) &&
1707 hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1708 /* Reduce the number of pfc TC with private buffer */
1716 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
) ||
1720 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1726 static int hclge_rx_priv_buf_alloc(struct hclge_dev
*hdev
,
1727 struct hclge_pkt_buf_alloc
*buf_alloc
)
1729 struct hclge_rx_priv_buff_cmd
*req
;
1730 struct hclge_desc desc
;
1734 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_PRIV_BUFF_ALLOC
, false);
1735 req
= (struct hclge_rx_priv_buff_cmd
*)desc
.data
;
1737 /* Alloc private buffer TCs */
1738 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1739 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
1742 cpu_to_le16(priv
->buf_size
>> HCLGE_BUF_UNIT_S
);
1744 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B
);
1748 cpu_to_le16((buf_alloc
->s_buf
.buf_size
>> HCLGE_BUF_UNIT_S
) |
1749 (1 << HCLGE_TC0_PRI_BUF_EN_B
));
1751 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1753 dev_err(&hdev
->pdev
->dev
,
1754 "rx private buffer alloc cmd failed %d\n", ret
);
1761 #define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
1763 static int hclge_rx_priv_wl_config(struct hclge_dev
*hdev
,
1764 struct hclge_pkt_buf_alloc
*buf_alloc
)
1766 struct hclge_rx_priv_wl_buf
*req
;
1767 struct hclge_priv_buf
*priv
;
1768 struct hclge_desc desc
[2];
1772 for (i
= 0; i
< 2; i
++) {
1773 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_RX_PRIV_WL_ALLOC
,
1775 req
= (struct hclge_rx_priv_wl_buf
*)desc
[i
].data
;
1777 /* The first descriptor set the NEXT bit to 1 */
1779 desc
[i
].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1781 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1783 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
1784 u32 idx
= i
* HCLGE_TC_NUM_ONE_DESC
+ j
;
1786 priv
= &buf_alloc
->priv_buf
[idx
];
1787 req
->tc_wl
[j
].high
=
1788 cpu_to_le16(priv
->wl
.high
>> HCLGE_BUF_UNIT_S
);
1789 req
->tc_wl
[j
].high
|=
1790 cpu_to_le16(HCLGE_PRIV_ENABLE(priv
->wl
.high
) <<
1791 HCLGE_RX_PRIV_EN_B
);
1793 cpu_to_le16(priv
->wl
.low
>> HCLGE_BUF_UNIT_S
);
1794 req
->tc_wl
[j
].low
|=
1795 cpu_to_le16(HCLGE_PRIV_ENABLE(priv
->wl
.low
) <<
1796 HCLGE_RX_PRIV_EN_B
);
1800 /* Send 2 descriptor at one time */
1801 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
1803 dev_err(&hdev
->pdev
->dev
,
1804 "rx private waterline config cmd failed %d\n",
1811 static int hclge_common_thrd_config(struct hclge_dev
*hdev
,
1812 struct hclge_pkt_buf_alloc
*buf_alloc
)
1814 struct hclge_shared_buf
*s_buf
= &buf_alloc
->s_buf
;
1815 struct hclge_rx_com_thrd
*req
;
1816 struct hclge_desc desc
[2];
1817 struct hclge_tc_thrd
*tc
;
1821 for (i
= 0; i
< 2; i
++) {
1822 hclge_cmd_setup_basic_desc(&desc
[i
],
1823 HCLGE_OPC_RX_COM_THRD_ALLOC
, false);
1824 req
= (struct hclge_rx_com_thrd
*)&desc
[i
].data
;
1826 /* The first descriptor set the NEXT bit to 1 */
1828 desc
[i
].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1830 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1832 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
1833 tc
= &s_buf
->tc_thrd
[i
* HCLGE_TC_NUM_ONE_DESC
+ j
];
1835 req
->com_thrd
[j
].high
=
1836 cpu_to_le16(tc
->high
>> HCLGE_BUF_UNIT_S
);
1837 req
->com_thrd
[j
].high
|=
1838 cpu_to_le16(HCLGE_PRIV_ENABLE(tc
->high
) <<
1839 HCLGE_RX_PRIV_EN_B
);
1840 req
->com_thrd
[j
].low
=
1841 cpu_to_le16(tc
->low
>> HCLGE_BUF_UNIT_S
);
1842 req
->com_thrd
[j
].low
|=
1843 cpu_to_le16(HCLGE_PRIV_ENABLE(tc
->low
) <<
1844 HCLGE_RX_PRIV_EN_B
);
1848 /* Send 2 descriptors at one time */
1849 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
1851 dev_err(&hdev
->pdev
->dev
,
1852 "common threshold config cmd failed %d\n", ret
);
1858 static int hclge_common_wl_config(struct hclge_dev
*hdev
,
1859 struct hclge_pkt_buf_alloc
*buf_alloc
)
1861 struct hclge_shared_buf
*buf
= &buf_alloc
->s_buf
;
1862 struct hclge_rx_com_wl
*req
;
1863 struct hclge_desc desc
;
1866 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_COM_WL_ALLOC
, false);
1868 req
= (struct hclge_rx_com_wl
*)desc
.data
;
1869 req
->com_wl
.high
= cpu_to_le16(buf
->self
.high
>> HCLGE_BUF_UNIT_S
);
1871 cpu_to_le16(HCLGE_PRIV_ENABLE(buf
->self
.high
) <<
1872 HCLGE_RX_PRIV_EN_B
);
1874 req
->com_wl
.low
= cpu_to_le16(buf
->self
.low
>> HCLGE_BUF_UNIT_S
);
1876 cpu_to_le16(HCLGE_PRIV_ENABLE(buf
->self
.low
) <<
1877 HCLGE_RX_PRIV_EN_B
);
1879 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1881 dev_err(&hdev
->pdev
->dev
,
1882 "common waterline config cmd failed %d\n", ret
);
1889 int hclge_buffer_alloc(struct hclge_dev
*hdev
)
1891 struct hclge_pkt_buf_alloc
*pkt_buf
;
1894 pkt_buf
= kzalloc(sizeof(*pkt_buf
), GFP_KERNEL
);
1898 ret
= hclge_tx_buffer_calc(hdev
, pkt_buf
);
1900 dev_err(&hdev
->pdev
->dev
,
1901 "could not calc tx buffer size for all TCs %d\n", ret
);
1905 ret
= hclge_tx_buffer_alloc(hdev
, pkt_buf
);
1907 dev_err(&hdev
->pdev
->dev
,
1908 "could not alloc tx buffers %d\n", ret
);
1912 ret
= hclge_rx_buffer_calc(hdev
, pkt_buf
);
1914 dev_err(&hdev
->pdev
->dev
,
1915 "could not calc rx priv buffer size for all TCs %d\n",
1920 ret
= hclge_rx_priv_buf_alloc(hdev
, pkt_buf
);
1922 dev_err(&hdev
->pdev
->dev
, "could not alloc rx priv buffer %d\n",
1927 if (hnae3_dev_dcb_supported(hdev
)) {
1928 ret
= hclge_rx_priv_wl_config(hdev
, pkt_buf
);
1930 dev_err(&hdev
->pdev
->dev
,
1931 "could not configure rx private waterline %d\n",
1936 ret
= hclge_common_thrd_config(hdev
, pkt_buf
);
1938 dev_err(&hdev
->pdev
->dev
,
1939 "could not configure common threshold %d\n",
1945 ret
= hclge_common_wl_config(hdev
, pkt_buf
);
1947 dev_err(&hdev
->pdev
->dev
,
1948 "could not configure common waterline %d\n", ret
);
1955 static int hclge_init_roce_base_info(struct hclge_vport
*vport
)
1957 struct hnae3_handle
*roce
= &vport
->roce
;
1958 struct hnae3_handle
*nic
= &vport
->nic
;
1960 roce
->rinfo
.num_vectors
= vport
->back
->num_roce_msi
;
1962 if (vport
->back
->num_msi_left
< vport
->roce
.rinfo
.num_vectors
||
1963 vport
->back
->num_msi_left
== 0)
1966 roce
->rinfo
.base_vector
= vport
->back
->roce_base_vector
;
1968 roce
->rinfo
.netdev
= nic
->kinfo
.netdev
;
1969 roce
->rinfo
.roce_io_base
= vport
->back
->hw
.io_base
;
1971 roce
->pdev
= nic
->pdev
;
1972 roce
->ae_algo
= nic
->ae_algo
;
1973 roce
->numa_node_mask
= nic
->numa_node_mask
;
1978 static int hclge_init_msi(struct hclge_dev
*hdev
)
1980 struct pci_dev
*pdev
= hdev
->pdev
;
1984 vectors
= pci_alloc_irq_vectors(pdev
, 1, hdev
->num_msi
,
1985 PCI_IRQ_MSI
| PCI_IRQ_MSIX
);
1988 "failed(%d) to allocate MSI/MSI-X vectors\n",
1992 if (vectors
< hdev
->num_msi
)
1993 dev_warn(&hdev
->pdev
->dev
,
1994 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1995 hdev
->num_msi
, vectors
);
1997 hdev
->num_msi
= vectors
;
1998 hdev
->num_msi_left
= vectors
;
1999 hdev
->base_msi_vector
= pdev
->irq
;
2000 hdev
->roce_base_vector
= hdev
->base_msi_vector
+
2001 HCLGE_ROCE_VECTOR_OFFSET
;
2003 hdev
->vector_status
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
2004 sizeof(u16
), GFP_KERNEL
);
2005 if (!hdev
->vector_status
) {
2006 pci_free_irq_vectors(pdev
);
2010 for (i
= 0; i
< hdev
->num_msi
; i
++)
2011 hdev
->vector_status
[i
] = HCLGE_INVALID_VPORT
;
2013 hdev
->vector_irq
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
2014 sizeof(int), GFP_KERNEL
);
2015 if (!hdev
->vector_irq
) {
2016 pci_free_irq_vectors(pdev
);
2023 static void hclge_check_speed_dup(struct hclge_dev
*hdev
, int duplex
, int speed
)
2025 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2027 if ((speed
== HCLGE_MAC_SPEED_10M
) || (speed
== HCLGE_MAC_SPEED_100M
))
2028 mac
->duplex
= (u8
)duplex
;
2030 mac
->duplex
= HCLGE_MAC_FULL
;
2035 int hclge_cfg_mac_speed_dup(struct hclge_dev
*hdev
, int speed
, u8 duplex
)
2037 struct hclge_config_mac_speed_dup_cmd
*req
;
2038 struct hclge_desc desc
;
2041 req
= (struct hclge_config_mac_speed_dup_cmd
*)desc
.data
;
2043 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_SPEED_DUP
, false);
2045 hnae_set_bit(req
->speed_dup
, HCLGE_CFG_DUPLEX_B
, !!duplex
);
2048 case HCLGE_MAC_SPEED_10M
:
2049 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2050 HCLGE_CFG_SPEED_S
, 6);
2052 case HCLGE_MAC_SPEED_100M
:
2053 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2054 HCLGE_CFG_SPEED_S
, 7);
2056 case HCLGE_MAC_SPEED_1G
:
2057 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2058 HCLGE_CFG_SPEED_S
, 0);
2060 case HCLGE_MAC_SPEED_10G
:
2061 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2062 HCLGE_CFG_SPEED_S
, 1);
2064 case HCLGE_MAC_SPEED_25G
:
2065 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2066 HCLGE_CFG_SPEED_S
, 2);
2068 case HCLGE_MAC_SPEED_40G
:
2069 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2070 HCLGE_CFG_SPEED_S
, 3);
2072 case HCLGE_MAC_SPEED_50G
:
2073 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2074 HCLGE_CFG_SPEED_S
, 4);
2076 case HCLGE_MAC_SPEED_100G
:
2077 hnae_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
2078 HCLGE_CFG_SPEED_S
, 5);
2081 dev_err(&hdev
->pdev
->dev
, "invalid speed (%d)\n", speed
);
2085 hnae_set_bit(req
->mac_change_fec_en
, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B
,
2088 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2090 dev_err(&hdev
->pdev
->dev
,
2091 "mac speed/duplex config cmd failed %d.\n", ret
);
2095 hclge_check_speed_dup(hdev
, duplex
, speed
);
2100 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle
*handle
, int speed
,
2103 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2104 struct hclge_dev
*hdev
= vport
->back
;
2106 return hclge_cfg_mac_speed_dup(hdev
, speed
, duplex
);
2109 static int hclge_query_mac_an_speed_dup(struct hclge_dev
*hdev
, int *speed
,
2112 struct hclge_query_an_speed_dup_cmd
*req
;
2113 struct hclge_desc desc
;
2117 req
= (struct hclge_query_an_speed_dup_cmd
*)desc
.data
;
2119 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_AN_RESULT
, true);
2120 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2122 dev_err(&hdev
->pdev
->dev
,
2123 "mac speed/autoneg/duplex query cmd failed %d\n",
2128 *duplex
= hnae_get_bit(req
->an_syn_dup_speed
, HCLGE_QUERY_DUPLEX_B
);
2129 speed_tmp
= hnae_get_field(req
->an_syn_dup_speed
, HCLGE_QUERY_SPEED_M
,
2130 HCLGE_QUERY_SPEED_S
);
2132 ret
= hclge_parse_speed(speed_tmp
, speed
);
2134 dev_err(&hdev
->pdev
->dev
,
2135 "could not parse speed(=%d), %d\n", speed_tmp
, ret
);
2142 static int hclge_set_autoneg_en(struct hclge_dev
*hdev
, bool enable
)
2144 struct hclge_config_auto_neg_cmd
*req
;
2145 struct hclge_desc desc
;
2149 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_AN_MODE
, false);
2151 req
= (struct hclge_config_auto_neg_cmd
*)desc
.data
;
2152 hnae_set_bit(flag
, HCLGE_MAC_CFG_AN_EN_B
, !!enable
);
2153 req
->cfg_an_cmd_flag
= cpu_to_le32(flag
);
2155 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2157 dev_err(&hdev
->pdev
->dev
, "auto neg set cmd failed %d.\n",
2165 static int hclge_set_autoneg(struct hnae3_handle
*handle
, bool enable
)
2167 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2168 struct hclge_dev
*hdev
= vport
->back
;
2170 return hclge_set_autoneg_en(hdev
, enable
);
2173 static int hclge_get_autoneg(struct hnae3_handle
*handle
)
2175 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2176 struct hclge_dev
*hdev
= vport
->back
;
2177 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
2180 return phydev
->autoneg
;
2182 return hdev
->hw
.mac
.autoneg
;
2185 static int hclge_set_default_mac_vlan_mask(struct hclge_dev
*hdev
,
2189 struct hclge_mac_vlan_mask_entry_cmd
*req
;
2190 struct hclge_desc desc
;
2193 req
= (struct hclge_mac_vlan_mask_entry_cmd
*)desc
.data
;
2194 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_MASK_SET
, false);
2196 hnae_set_bit(req
->vlan_mask
, HCLGE_VLAN_MASK_EN_B
,
2198 ether_addr_copy(req
->mac_mask
, mac_mask
);
2200 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2202 dev_err(&hdev
->pdev
->dev
,
2203 "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
2209 static int hclge_mac_init(struct hclge_dev
*hdev
)
2211 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2212 u8 mac_mask
[ETH_ALEN
] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
2215 ret
= hclge_cfg_mac_speed_dup(hdev
, hdev
->hw
.mac
.speed
, HCLGE_MAC_FULL
);
2217 dev_err(&hdev
->pdev
->dev
,
2218 "Config mac speed dup fail ret=%d\n", ret
);
2224 /* Initialize the MTA table work mode */
2225 hdev
->accept_mta_mc
= true;
2226 hdev
->enable_mta
= true;
2227 hdev
->mta_mac_sel_type
= HCLGE_MAC_ADDR_47_36
;
2229 ret
= hclge_set_mta_filter_mode(hdev
,
2230 hdev
->mta_mac_sel_type
,
2233 dev_err(&hdev
->pdev
->dev
, "set mta filter mode failed %d\n",
2238 ret
= hclge_cfg_func_mta_filter(hdev
, 0, hdev
->accept_mta_mc
);
2240 dev_err(&hdev
->pdev
->dev
,
2241 "set mta filter mode fail ret=%d\n", ret
);
2245 ret
= hclge_set_default_mac_vlan_mask(hdev
, true, mac_mask
);
2247 dev_err(&hdev
->pdev
->dev
,
2248 "set default mac_vlan_mask fail ret=%d\n", ret
);
2253 static void hclge_mbx_task_schedule(struct hclge_dev
*hdev
)
2255 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
))
2256 schedule_work(&hdev
->mbx_service_task
);
2259 static void hclge_reset_task_schedule(struct hclge_dev
*hdev
)
2261 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
))
2262 schedule_work(&hdev
->rst_service_task
);
2265 static void hclge_task_schedule(struct hclge_dev
*hdev
)
2267 if (!test_bit(HCLGE_STATE_DOWN
, &hdev
->state
) &&
2268 !test_bit(HCLGE_STATE_REMOVING
, &hdev
->state
) &&
2269 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
))
2270 (void)schedule_work(&hdev
->service_task
);
2273 static int hclge_get_mac_link_status(struct hclge_dev
*hdev
)
2275 struct hclge_link_status_cmd
*req
;
2276 struct hclge_desc desc
;
2280 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_LINK_STATUS
, true);
2281 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2283 dev_err(&hdev
->pdev
->dev
, "get link status cmd failed %d\n",
2288 req
= (struct hclge_link_status_cmd
*)desc
.data
;
2289 link_status
= req
->status
& HCLGE_LINK_STATUS
;
2291 return !!link_status
;
2294 static int hclge_get_mac_phy_link(struct hclge_dev
*hdev
)
2299 mac_state
= hclge_get_mac_link_status(hdev
);
2301 if (hdev
->hw
.mac
.phydev
) {
2302 if (!genphy_read_status(hdev
->hw
.mac
.phydev
))
2303 link_stat
= mac_state
&
2304 hdev
->hw
.mac
.phydev
->link
;
2309 link_stat
= mac_state
;
2315 static void hclge_update_link_status(struct hclge_dev
*hdev
)
2317 struct hnae3_client
*client
= hdev
->nic_client
;
2318 struct hnae3_handle
*handle
;
2324 state
= hclge_get_mac_phy_link(hdev
);
2325 if (state
!= hdev
->hw
.mac
.link
) {
2326 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
2327 handle
= &hdev
->vport
[i
].nic
;
2328 client
->ops
->link_status_change(handle
, state
);
2330 hdev
->hw
.mac
.link
= state
;
2334 static int hclge_update_speed_duplex(struct hclge_dev
*hdev
)
2336 struct hclge_mac mac
= hdev
->hw
.mac
;
2341 /* get the speed and duplex as autoneg'result from mac cmd when phy
2344 if (mac
.phydev
|| !mac
.autoneg
)
2347 ret
= hclge_query_mac_an_speed_dup(hdev
, &speed
, &duplex
);
2349 dev_err(&hdev
->pdev
->dev
,
2350 "mac autoneg/speed/duplex query failed %d\n", ret
);
2354 if ((mac
.speed
!= speed
) || (mac
.duplex
!= duplex
)) {
2355 ret
= hclge_cfg_mac_speed_dup(hdev
, speed
, duplex
);
2357 dev_err(&hdev
->pdev
->dev
,
2358 "mac speed/duplex config failed %d\n", ret
);
2366 static int hclge_update_speed_duplex_h(struct hnae3_handle
*handle
)
2368 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2369 struct hclge_dev
*hdev
= vport
->back
;
2371 return hclge_update_speed_duplex(hdev
);
2374 static int hclge_get_status(struct hnae3_handle
*handle
)
2376 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2377 struct hclge_dev
*hdev
= vport
->back
;
2379 hclge_update_link_status(hdev
);
2381 return hdev
->hw
.mac
.link
;
2384 static void hclge_service_timer(struct timer_list
*t
)
2386 struct hclge_dev
*hdev
= from_timer(hdev
, t
, service_timer
);
2388 mod_timer(&hdev
->service_timer
, jiffies
+ HZ
);
2389 hdev
->hw_stats
.stats_timer
++;
2390 hclge_task_schedule(hdev
);
2393 static void hclge_service_complete(struct hclge_dev
*hdev
)
2395 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
));
2397 /* Flush memory before next watchdog */
2398 smp_mb__before_atomic();
2399 clear_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
);
2402 static u32
hclge_check_event_cause(struct hclge_dev
*hdev
, u32
*clearval
)
2407 /* fetch the events from their corresponding regs */
2408 rst_src_reg
= hclge_read_dev(&hdev
->hw
, HCLGE_MISC_RESET_STS_REG
);
2409 cmdq_src_reg
= hclge_read_dev(&hdev
->hw
, HCLGE_VECTOR0_CMDQ_SRC_REG
);
2411 /* Assumption: If by any chance reset and mailbox events are reported
2412 * together then we will only process reset event in this go and will
2413 * defer the processing of the mailbox events. Since, we would have not
2414 * cleared RX CMDQ event this time we would receive again another
2415 * interrupt from H/W just for the mailbox.
2418 /* check for vector0 reset event sources */
2419 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
) & rst_src_reg
) {
2420 set_bit(HNAE3_GLOBAL_RESET
, &hdev
->reset_pending
);
2421 *clearval
= BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
);
2422 return HCLGE_VECTOR0_EVENT_RST
;
2425 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B
) & rst_src_reg
) {
2426 set_bit(HNAE3_CORE_RESET
, &hdev
->reset_pending
);
2427 *clearval
= BIT(HCLGE_VECTOR0_CORERESET_INT_B
);
2428 return HCLGE_VECTOR0_EVENT_RST
;
2431 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B
) & rst_src_reg
) {
2432 set_bit(HNAE3_IMP_RESET
, &hdev
->reset_pending
);
2433 *clearval
= BIT(HCLGE_VECTOR0_IMPRESET_INT_B
);
2434 return HCLGE_VECTOR0_EVENT_RST
;
2437 /* check for vector0 mailbox(=CMDQ RX) event source */
2438 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B
) & cmdq_src_reg
) {
2439 cmdq_src_reg
&= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B
);
2440 *clearval
= cmdq_src_reg
;
2441 return HCLGE_VECTOR0_EVENT_MBX
;
2444 return HCLGE_VECTOR0_EVENT_OTHER
;
2447 static void hclge_clear_event_cause(struct hclge_dev
*hdev
, u32 event_type
,
2450 switch (event_type
) {
2451 case HCLGE_VECTOR0_EVENT_RST
:
2452 hclge_write_dev(&hdev
->hw
, HCLGE_MISC_RESET_STS_REG
, regclr
);
2454 case HCLGE_VECTOR0_EVENT_MBX
:
2455 hclge_write_dev(&hdev
->hw
, HCLGE_VECTOR0_CMDQ_SRC_REG
, regclr
);
2460 static void hclge_enable_vector(struct hclge_misc_vector
*vector
, bool enable
)
2462 writel(enable
? 1 : 0, vector
->addr
);
2465 static irqreturn_t
hclge_misc_irq_handle(int irq
, void *data
)
2467 struct hclge_dev
*hdev
= data
;
2471 hclge_enable_vector(&hdev
->misc_vector
, false);
2472 event_cause
= hclge_check_event_cause(hdev
, &clearval
);
2474 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2475 switch (event_cause
) {
2476 case HCLGE_VECTOR0_EVENT_RST
:
2477 hclge_reset_task_schedule(hdev
);
2479 case HCLGE_VECTOR0_EVENT_MBX
:
2480 /* If we are here then,
2481 * 1. Either we are not handling any mbx task and we are not
2484 * 2. We could be handling a mbx task but nothing more is
2486 * In both cases, we should schedule mbx task as there are more
2487 * mbx messages reported by this interrupt.
2489 hclge_mbx_task_schedule(hdev
);
2492 dev_dbg(&hdev
->pdev
->dev
,
2493 "received unknown or unhandled event of vector0\n");
2497 /* we should clear the source of interrupt */
2498 hclge_clear_event_cause(hdev
, event_cause
, clearval
);
2499 hclge_enable_vector(&hdev
->misc_vector
, true);
2504 static void hclge_free_vector(struct hclge_dev
*hdev
, int vector_id
)
2506 hdev
->vector_status
[vector_id
] = HCLGE_INVALID_VPORT
;
2507 hdev
->num_msi_left
+= 1;
2508 hdev
->num_msi_used
-= 1;
2511 static void hclge_get_misc_vector(struct hclge_dev
*hdev
)
2513 struct hclge_misc_vector
*vector
= &hdev
->misc_vector
;
2515 vector
->vector_irq
= pci_irq_vector(hdev
->pdev
, 0);
2517 vector
->addr
= hdev
->hw
.io_base
+ HCLGE_MISC_VECTOR_REG_BASE
;
2518 hdev
->vector_status
[0] = 0;
2520 hdev
->num_msi_left
-= 1;
2521 hdev
->num_msi_used
+= 1;
2524 static int hclge_misc_irq_init(struct hclge_dev
*hdev
)
2528 hclge_get_misc_vector(hdev
);
2530 /* this would be explicitly freed in the end */
2531 ret
= request_irq(hdev
->misc_vector
.vector_irq
, hclge_misc_irq_handle
,
2532 0, "hclge_misc", hdev
);
2534 hclge_free_vector(hdev
, 0);
2535 dev_err(&hdev
->pdev
->dev
, "request misc irq(%d) fail\n",
2536 hdev
->misc_vector
.vector_irq
);
2542 static void hclge_misc_irq_uninit(struct hclge_dev
*hdev
)
2544 free_irq(hdev
->misc_vector
.vector_irq
, hdev
);
2545 hclge_free_vector(hdev
, 0);
2548 static int hclge_notify_client(struct hclge_dev
*hdev
,
2549 enum hnae3_reset_notify_type type
)
2551 struct hnae3_client
*client
= hdev
->nic_client
;
2554 if (!client
->ops
->reset_notify
)
2557 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
2558 struct hnae3_handle
*handle
= &hdev
->vport
[i
].nic
;
2561 ret
= client
->ops
->reset_notify(handle
, type
);
2569 static int hclge_reset_wait(struct hclge_dev
*hdev
)
2571 #define HCLGE_RESET_WATI_MS 100
2572 #define HCLGE_RESET_WAIT_CNT 5
2573 u32 val
, reg
, reg_bit
;
2576 switch (hdev
->reset_type
) {
2577 case HNAE3_GLOBAL_RESET
:
2578 reg
= HCLGE_GLOBAL_RESET_REG
;
2579 reg_bit
= HCLGE_GLOBAL_RESET_BIT
;
2581 case HNAE3_CORE_RESET
:
2582 reg
= HCLGE_GLOBAL_RESET_REG
;
2583 reg_bit
= HCLGE_CORE_RESET_BIT
;
2585 case HNAE3_FUNC_RESET
:
2586 reg
= HCLGE_FUN_RST_ING
;
2587 reg_bit
= HCLGE_FUN_RST_ING_B
;
2590 dev_err(&hdev
->pdev
->dev
,
2591 "Wait for unsupported reset type: %d\n",
2596 val
= hclge_read_dev(&hdev
->hw
, reg
);
2597 while (hnae_get_bit(val
, reg_bit
) && cnt
< HCLGE_RESET_WAIT_CNT
) {
2598 msleep(HCLGE_RESET_WATI_MS
);
2599 val
= hclge_read_dev(&hdev
->hw
, reg
);
2603 if (cnt
>= HCLGE_RESET_WAIT_CNT
) {
2604 dev_warn(&hdev
->pdev
->dev
,
2605 "Wait for reset timeout: %d\n", hdev
->reset_type
);
2612 static int hclge_func_reset_cmd(struct hclge_dev
*hdev
, int func_id
)
2614 struct hclge_desc desc
;
2615 struct hclge_reset_cmd
*req
= (struct hclge_reset_cmd
*)desc
.data
;
2618 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_RST_TRIGGER
, false);
2619 hnae_set_bit(req
->mac_func_reset
, HCLGE_CFG_RESET_MAC_B
, 0);
2620 hnae_set_bit(req
->mac_func_reset
, HCLGE_CFG_RESET_FUNC_B
, 1);
2621 req
->fun_reset_vfid
= func_id
;
2623 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2625 dev_err(&hdev
->pdev
->dev
,
2626 "send function reset cmd fail, status =%d\n", ret
);
2631 static void hclge_do_reset(struct hclge_dev
*hdev
)
2633 struct pci_dev
*pdev
= hdev
->pdev
;
2636 switch (hdev
->reset_type
) {
2637 case HNAE3_GLOBAL_RESET
:
2638 val
= hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
);
2639 hnae_set_bit(val
, HCLGE_GLOBAL_RESET_BIT
, 1);
2640 hclge_write_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
, val
);
2641 dev_info(&pdev
->dev
, "Global Reset requested\n");
2643 case HNAE3_CORE_RESET
:
2644 val
= hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
);
2645 hnae_set_bit(val
, HCLGE_CORE_RESET_BIT
, 1);
2646 hclge_write_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
, val
);
2647 dev_info(&pdev
->dev
, "Core Reset requested\n");
2649 case HNAE3_FUNC_RESET
:
2650 dev_info(&pdev
->dev
, "PF Reset requested\n");
2651 hclge_func_reset_cmd(hdev
, 0);
2652 /* schedule again to check later */
2653 set_bit(HNAE3_FUNC_RESET
, &hdev
->reset_pending
);
2654 hclge_reset_task_schedule(hdev
);
2657 dev_warn(&pdev
->dev
,
2658 "Unsupported reset type: %d\n", hdev
->reset_type
);
2663 static enum hnae3_reset_type
hclge_get_reset_level(struct hclge_dev
*hdev
,
2664 unsigned long *addr
)
2666 enum hnae3_reset_type rst_level
= HNAE3_NONE_RESET
;
2668 /* return the highest priority reset level amongst all */
2669 if (test_bit(HNAE3_GLOBAL_RESET
, addr
))
2670 rst_level
= HNAE3_GLOBAL_RESET
;
2671 else if (test_bit(HNAE3_CORE_RESET
, addr
))
2672 rst_level
= HNAE3_CORE_RESET
;
2673 else if (test_bit(HNAE3_IMP_RESET
, addr
))
2674 rst_level
= HNAE3_IMP_RESET
;
2675 else if (test_bit(HNAE3_FUNC_RESET
, addr
))
2676 rst_level
= HNAE3_FUNC_RESET
;
2678 /* now, clear all other resets */
2679 clear_bit(HNAE3_GLOBAL_RESET
, addr
);
2680 clear_bit(HNAE3_CORE_RESET
, addr
);
2681 clear_bit(HNAE3_IMP_RESET
, addr
);
2682 clear_bit(HNAE3_FUNC_RESET
, addr
);
2687 static void hclge_reset(struct hclge_dev
*hdev
)
2689 /* perform reset of the stack & ae device for a client */
2691 hclge_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
2693 if (!hclge_reset_wait(hdev
)) {
2695 hclge_notify_client(hdev
, HNAE3_UNINIT_CLIENT
);
2696 hclge_reset_ae_dev(hdev
->ae_dev
);
2697 hclge_notify_client(hdev
, HNAE3_INIT_CLIENT
);
2700 /* schedule again to check pending resets later */
2701 set_bit(hdev
->reset_type
, &hdev
->reset_pending
);
2702 hclge_reset_task_schedule(hdev
);
2705 hclge_notify_client(hdev
, HNAE3_UP_CLIENT
);
2708 static void hclge_reset_event(struct hnae3_handle
*handle
,
2709 enum hnae3_reset_type reset
)
2711 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2712 struct hclge_dev
*hdev
= vport
->back
;
2714 dev_info(&hdev
->pdev
->dev
,
2715 "Receive reset event , reset_type is %d", reset
);
2718 case HNAE3_FUNC_RESET
:
2719 case HNAE3_CORE_RESET
:
2720 case HNAE3_GLOBAL_RESET
:
2721 /* request reset & schedule reset task */
2722 set_bit(reset
, &hdev
->reset_request
);
2723 hclge_reset_task_schedule(hdev
);
2726 dev_warn(&hdev
->pdev
->dev
, "Unsupported reset event:%d", reset
);
2731 static void hclge_reset_subtask(struct hclge_dev
*hdev
)
2733 /* check if there is any ongoing reset in the hardware. This status can
2734 * be checked from reset_pending. If there is then, we need to wait for
2735 * hardware to complete reset.
2736 * a. If we are able to figure out in reasonable time that hardware
2737 * has fully resetted then, we can proceed with driver, client
2739 * b. else, we can come back later to check this status so re-sched
2742 hdev
->reset_type
= hclge_get_reset_level(hdev
, &hdev
->reset_pending
);
2743 if (hdev
->reset_type
!= HNAE3_NONE_RESET
)
2746 /* check if we got any *new* reset requests to be honored */
2747 hdev
->reset_type
= hclge_get_reset_level(hdev
, &hdev
->reset_request
);
2748 if (hdev
->reset_type
!= HNAE3_NONE_RESET
)
2749 hclge_do_reset(hdev
);
2751 hdev
->reset_type
= HNAE3_NONE_RESET
;
2754 static void hclge_reset_service_task(struct work_struct
*work
)
2756 struct hclge_dev
*hdev
=
2757 container_of(work
, struct hclge_dev
, rst_service_task
);
2759 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
2762 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
);
2764 hclge_reset_subtask(hdev
);
2766 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
2769 static void hclge_mailbox_service_task(struct work_struct
*work
)
2771 struct hclge_dev
*hdev
=
2772 container_of(work
, struct hclge_dev
, mbx_service_task
);
2774 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
))
2777 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
2779 hclge_mbx_handler(hdev
);
2781 clear_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
);
2784 static void hclge_service_task(struct work_struct
*work
)
2786 struct hclge_dev
*hdev
=
2787 container_of(work
, struct hclge_dev
, service_task
);
2789 if (hdev
->hw_stats
.stats_timer
>= HCLGE_STATS_TIMER_INTERVAL
) {
2790 hclge_update_stats_for_all(hdev
);
2791 hdev
->hw_stats
.stats_timer
= 0;
2794 hclge_update_speed_duplex(hdev
);
2795 hclge_update_link_status(hdev
);
2796 hclge_service_complete(hdev
);
2799 static void hclge_disable_sriov(struct hclge_dev
*hdev
)
2801 /* If our VFs are assigned we cannot shut down SR-IOV
2802 * without causing issues, so just leave the hardware
2803 * available but disabled
2805 if (pci_vfs_assigned(hdev
->pdev
)) {
2806 dev_warn(&hdev
->pdev
->dev
,
2807 "disabling driver while VFs are assigned\n");
2811 pci_disable_sriov(hdev
->pdev
);
2814 struct hclge_vport
*hclge_get_vport(struct hnae3_handle
*handle
)
2816 /* VF handle has no client */
2817 if (!handle
->client
)
2818 return container_of(handle
, struct hclge_vport
, nic
);
2819 else if (handle
->client
->type
== HNAE3_CLIENT_ROCE
)
2820 return container_of(handle
, struct hclge_vport
, roce
);
2822 return container_of(handle
, struct hclge_vport
, nic
);
2825 static int hclge_get_vector(struct hnae3_handle
*handle
, u16 vector_num
,
2826 struct hnae3_vector_info
*vector_info
)
2828 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2829 struct hnae3_vector_info
*vector
= vector_info
;
2830 struct hclge_dev
*hdev
= vport
->back
;
2834 vector_num
= min(hdev
->num_msi_left
, vector_num
);
2836 for (j
= 0; j
< vector_num
; j
++) {
2837 for (i
= 1; i
< hdev
->num_msi
; i
++) {
2838 if (hdev
->vector_status
[i
] == HCLGE_INVALID_VPORT
) {
2839 vector
->vector
= pci_irq_vector(hdev
->pdev
, i
);
2840 vector
->io_addr
= hdev
->hw
.io_base
+
2841 HCLGE_VECTOR_REG_BASE
+
2842 (i
- 1) * HCLGE_VECTOR_REG_OFFSET
+
2844 HCLGE_VECTOR_VF_OFFSET
;
2845 hdev
->vector_status
[i
] = vport
->vport_id
;
2846 hdev
->vector_irq
[i
] = vector
->vector
;
2855 hdev
->num_msi_left
-= alloc
;
2856 hdev
->num_msi_used
+= alloc
;
2861 static int hclge_get_vector_index(struct hclge_dev
*hdev
, int vector
)
2865 for (i
= 0; i
< hdev
->num_msi
; i
++)
2866 if (vector
== hdev
->vector_irq
[i
])
2872 static u32
hclge_get_rss_key_size(struct hnae3_handle
*handle
)
2874 return HCLGE_RSS_KEY_SIZE
;
2877 static u32
hclge_get_rss_indir_size(struct hnae3_handle
*handle
)
2879 return HCLGE_RSS_IND_TBL_SIZE
;
2882 static int hclge_get_rss_algo(struct hclge_dev
*hdev
)
2884 struct hclge_rss_config_cmd
*req
;
2885 struct hclge_desc desc
;
2889 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_GENERIC_CONFIG
, true);
2891 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2893 dev_err(&hdev
->pdev
->dev
,
2894 "Get link status error, status =%d\n", ret
);
2898 req
= (struct hclge_rss_config_cmd
*)desc
.data
;
2899 rss_hash_algo
= (req
->hash_config
& HCLGE_RSS_HASH_ALGO_MASK
);
2901 if (rss_hash_algo
== HCLGE_RSS_HASH_ALGO_TOEPLITZ
)
2902 return ETH_RSS_HASH_TOP
;
2907 static int hclge_set_rss_algo_key(struct hclge_dev
*hdev
,
2908 const u8 hfunc
, const u8
*key
)
2910 struct hclge_rss_config_cmd
*req
;
2911 struct hclge_desc desc
;
2916 req
= (struct hclge_rss_config_cmd
*)desc
.data
;
2918 for (key_offset
= 0; key_offset
< 3; key_offset
++) {
2919 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_GENERIC_CONFIG
,
2922 req
->hash_config
|= (hfunc
& HCLGE_RSS_HASH_ALGO_MASK
);
2923 req
->hash_config
|= (key_offset
<< HCLGE_RSS_HASH_KEY_OFFSET_B
);
2925 if (key_offset
== 2)
2927 HCLGE_RSS_KEY_SIZE
- HCLGE_RSS_HASH_KEY_NUM
* 2;
2929 key_size
= HCLGE_RSS_HASH_KEY_NUM
;
2931 memcpy(req
->hash_key
,
2932 key
+ key_offset
* HCLGE_RSS_HASH_KEY_NUM
, key_size
);
2934 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2936 dev_err(&hdev
->pdev
->dev
,
2937 "Configure RSS config fail, status = %d\n",
2945 static int hclge_set_rss_indir_table(struct hclge_dev
*hdev
, const u32
*indir
)
2947 struct hclge_rss_indirection_table_cmd
*req
;
2948 struct hclge_desc desc
;
2952 req
= (struct hclge_rss_indirection_table_cmd
*)desc
.data
;
2954 for (i
= 0; i
< HCLGE_RSS_CFG_TBL_NUM
; i
++) {
2955 hclge_cmd_setup_basic_desc
2956 (&desc
, HCLGE_OPC_RSS_INDIR_TABLE
, false);
2958 req
->start_table_index
=
2959 cpu_to_le16(i
* HCLGE_RSS_CFG_TBL_SIZE
);
2960 req
->rss_set_bitmap
= cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK
);
2962 for (j
= 0; j
< HCLGE_RSS_CFG_TBL_SIZE
; j
++)
2963 req
->rss_result
[j
] =
2964 indir
[i
* HCLGE_RSS_CFG_TBL_SIZE
+ j
];
2966 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2968 dev_err(&hdev
->pdev
->dev
,
2969 "Configure rss indir table fail,status = %d\n",
2977 static int hclge_set_rss_tc_mode(struct hclge_dev
*hdev
, u16
*tc_valid
,
2978 u16
*tc_size
, u16
*tc_offset
)
2980 struct hclge_rss_tc_mode_cmd
*req
;
2981 struct hclge_desc desc
;
2985 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_TC_MODE
, false);
2986 req
= (struct hclge_rss_tc_mode_cmd
*)desc
.data
;
2988 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
2991 hnae_set_bit(mode
, HCLGE_RSS_TC_VALID_B
, (tc_valid
[i
] & 0x1));
2992 hnae_set_field(mode
, HCLGE_RSS_TC_SIZE_M
,
2993 HCLGE_RSS_TC_SIZE_S
, tc_size
[i
]);
2994 hnae_set_field(mode
, HCLGE_RSS_TC_OFFSET_M
,
2995 HCLGE_RSS_TC_OFFSET_S
, tc_offset
[i
]);
2997 req
->rss_tc_mode
[i
] = cpu_to_le16(mode
);
3000 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3002 dev_err(&hdev
->pdev
->dev
,
3003 "Configure rss tc mode fail, status = %d\n", ret
);
3010 static int hclge_set_rss_input_tuple(struct hclge_dev
*hdev
)
3012 struct hclge_rss_input_tuple_cmd
*req
;
3013 struct hclge_desc desc
;
3016 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_INPUT_TUPLE
, false);
3018 req
= (struct hclge_rss_input_tuple_cmd
*)desc
.data
;
3019 req
->ipv4_tcp_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3020 req
->ipv4_udp_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3021 req
->ipv4_sctp_en
= HCLGE_RSS_INPUT_TUPLE_SCTP
;
3022 req
->ipv4_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3023 req
->ipv6_tcp_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3024 req
->ipv6_udp_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3025 req
->ipv6_sctp_en
= HCLGE_RSS_INPUT_TUPLE_SCTP
;
3026 req
->ipv6_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3027 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3029 dev_err(&hdev
->pdev
->dev
,
3030 "Configure rss input fail, status = %d\n", ret
);
3037 static int hclge_get_rss(struct hnae3_handle
*handle
, u32
*indir
,
3040 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3041 struct hclge_dev
*hdev
= vport
->back
;
3044 /* Get hash algorithm */
3046 *hfunc
= hclge_get_rss_algo(hdev
);
3048 /* Get the RSS Key required by the user */
3050 memcpy(key
, vport
->rss_hash_key
, HCLGE_RSS_KEY_SIZE
);
3052 /* Get indirect table */
3054 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
3055 indir
[i
] = vport
->rss_indirection_tbl
[i
];
3060 static int hclge_set_rss(struct hnae3_handle
*handle
, const u32
*indir
,
3061 const u8
*key
, const u8 hfunc
)
3063 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3064 struct hclge_dev
*hdev
= vport
->back
;
3068 /* Set the RSS Hash Key if specififed by the user */
3070 /* Update the shadow RSS key with user specified qids */
3071 memcpy(vport
->rss_hash_key
, key
, HCLGE_RSS_KEY_SIZE
);
3073 if (hfunc
== ETH_RSS_HASH_TOP
||
3074 hfunc
== ETH_RSS_HASH_NO_CHANGE
)
3075 hash_algo
= HCLGE_RSS_HASH_ALGO_TOEPLITZ
;
3078 ret
= hclge_set_rss_algo_key(hdev
, hash_algo
, key
);
3083 /* Update the shadow RSS table with user specified qids */
3084 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
3085 vport
->rss_indirection_tbl
[i
] = indir
[i
];
3087 /* Update the hardware */
3088 ret
= hclge_set_rss_indir_table(hdev
, indir
);
3092 static u8
hclge_get_rss_hash_bits(struct ethtool_rxnfc
*nfc
)
3094 u8 hash_sets
= nfc
->data
& RXH_L4_B_0_1
? HCLGE_S_PORT_BIT
: 0;
3096 if (nfc
->data
& RXH_L4_B_2_3
)
3097 hash_sets
|= HCLGE_D_PORT_BIT
;
3099 hash_sets
&= ~HCLGE_D_PORT_BIT
;
3101 if (nfc
->data
& RXH_IP_SRC
)
3102 hash_sets
|= HCLGE_S_IP_BIT
;
3104 hash_sets
&= ~HCLGE_S_IP_BIT
;
3106 if (nfc
->data
& RXH_IP_DST
)
3107 hash_sets
|= HCLGE_D_IP_BIT
;
3109 hash_sets
&= ~HCLGE_D_IP_BIT
;
3111 if (nfc
->flow_type
== SCTP_V4_FLOW
|| nfc
->flow_type
== SCTP_V6_FLOW
)
3112 hash_sets
|= HCLGE_V_TAG_BIT
;
3117 static int hclge_set_rss_tuple(struct hnae3_handle
*handle
,
3118 struct ethtool_rxnfc
*nfc
)
3120 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3121 struct hclge_dev
*hdev
= vport
->back
;
3122 struct hclge_rss_input_tuple_cmd
*req
;
3123 struct hclge_desc desc
;
3127 if (nfc
->data
& ~(RXH_IP_SRC
| RXH_IP_DST
|
3128 RXH_L4_B_0_1
| RXH_L4_B_2_3
))
3131 req
= (struct hclge_rss_input_tuple_cmd
*)desc
.data
;
3132 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_INPUT_TUPLE
, true);
3133 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3135 dev_err(&hdev
->pdev
->dev
,
3136 "Read rss tuple fail, status = %d\n", ret
);
3140 hclge_cmd_reuse_desc(&desc
, false);
3142 tuple_sets
= hclge_get_rss_hash_bits(nfc
);
3143 switch (nfc
->flow_type
) {
3145 req
->ipv4_tcp_en
= tuple_sets
;
3148 req
->ipv6_tcp_en
= tuple_sets
;
3151 req
->ipv4_udp_en
= tuple_sets
;
3154 req
->ipv6_udp_en
= tuple_sets
;
3157 req
->ipv4_sctp_en
= tuple_sets
;
3160 if ((nfc
->data
& RXH_L4_B_0_1
) ||
3161 (nfc
->data
& RXH_L4_B_2_3
))
3164 req
->ipv6_sctp_en
= tuple_sets
;
3167 req
->ipv4_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3170 req
->ipv6_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3176 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3178 dev_err(&hdev
->pdev
->dev
,
3179 "Set rss tuple fail, status = %d\n", ret
);
3184 static int hclge_get_rss_tuple(struct hnae3_handle
*handle
,
3185 struct ethtool_rxnfc
*nfc
)
3187 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3188 struct hclge_dev
*hdev
= vport
->back
;
3189 struct hclge_rss_input_tuple_cmd
*req
;
3190 struct hclge_desc desc
;
3196 req
= (struct hclge_rss_input_tuple_cmd
*)desc
.data
;
3197 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_INPUT_TUPLE
, true);
3198 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3200 dev_err(&hdev
->pdev
->dev
,
3201 "Read rss tuple fail, status = %d\n", ret
);
3205 switch (nfc
->flow_type
) {
3207 tuple_sets
= req
->ipv4_tcp_en
;
3210 tuple_sets
= req
->ipv4_udp_en
;
3213 tuple_sets
= req
->ipv6_tcp_en
;
3216 tuple_sets
= req
->ipv6_udp_en
;
3219 tuple_sets
= req
->ipv4_sctp_en
;
3222 tuple_sets
= req
->ipv6_sctp_en
;
3226 tuple_sets
= HCLGE_S_IP_BIT
| HCLGE_D_IP_BIT
;
3235 if (tuple_sets
& HCLGE_D_PORT_BIT
)
3236 nfc
->data
|= RXH_L4_B_2_3
;
3237 if (tuple_sets
& HCLGE_S_PORT_BIT
)
3238 nfc
->data
|= RXH_L4_B_0_1
;
3239 if (tuple_sets
& HCLGE_D_IP_BIT
)
3240 nfc
->data
|= RXH_IP_DST
;
3241 if (tuple_sets
& HCLGE_S_IP_BIT
)
3242 nfc
->data
|= RXH_IP_SRC
;
3247 static int hclge_get_tc_size(struct hnae3_handle
*handle
)
3249 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3250 struct hclge_dev
*hdev
= vport
->back
;
3252 return hdev
->rss_size_max
;
3255 int hclge_rss_init_hw(struct hclge_dev
*hdev
)
3257 const u8 hfunc
= HCLGE_RSS_HASH_ALGO_TOEPLITZ
;
3258 struct hclge_vport
*vport
= hdev
->vport
;
3259 u16 tc_offset
[HCLGE_MAX_TC_NUM
];
3260 u8 rss_key
[HCLGE_RSS_KEY_SIZE
];
3261 u16 tc_valid
[HCLGE_MAX_TC_NUM
];
3262 u16 tc_size
[HCLGE_MAX_TC_NUM
];
3263 u32
*rss_indir
= NULL
;
3264 u16 rss_size
= 0, roundup_size
;
3268 rss_indir
= kcalloc(HCLGE_RSS_IND_TBL_SIZE
, sizeof(u32
), GFP_KERNEL
);
3272 /* Get default RSS key */
3273 netdev_rss_key_fill(rss_key
, HCLGE_RSS_KEY_SIZE
);
3275 /* Initialize RSS indirect table for each vport */
3276 for (j
= 0; j
< hdev
->num_vmdq_vport
+ 1; j
++) {
3277 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++) {
3278 vport
[j
].rss_indirection_tbl
[i
] =
3279 i
% vport
[j
].alloc_rss_size
;
3281 /* vport 0 is for PF */
3285 rss_size
= vport
[j
].alloc_rss_size
;
3286 rss_indir
[i
] = vport
[j
].rss_indirection_tbl
[i
];
3289 ret
= hclge_set_rss_indir_table(hdev
, rss_indir
);
3294 ret
= hclge_set_rss_algo_key(hdev
, hfunc
, key
);
3298 ret
= hclge_set_rss_input_tuple(hdev
);
3302 /* Each TC have the same queue size, and tc_size set to hardware is
3303 * the log2 of roundup power of two of rss_size, the acutal queue
3304 * size is limited by indirection table.
3306 if (rss_size
> HCLGE_RSS_TC_SIZE_7
|| rss_size
== 0) {
3307 dev_err(&hdev
->pdev
->dev
,
3308 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3314 roundup_size
= roundup_pow_of_two(rss_size
);
3315 roundup_size
= ilog2(roundup_size
);
3317 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
3320 if (!(hdev
->hw_tc_map
& BIT(i
)))
3324 tc_size
[i
] = roundup_size
;
3325 tc_offset
[i
] = rss_size
* i
;
3328 ret
= hclge_set_rss_tc_mode(hdev
, tc_valid
, tc_size
, tc_offset
);
3336 int hclge_bind_ring_with_vector(struct hclge_vport
*vport
,
3337 int vector_id
, bool en
,
3338 struct hnae3_ring_chain_node
*ring_chain
)
3340 struct hclge_dev
*hdev
= vport
->back
;
3341 struct hnae3_ring_chain_node
*node
;
3342 struct hclge_desc desc
;
3343 struct hclge_ctrl_vector_chain_cmd
*req
3344 = (struct hclge_ctrl_vector_chain_cmd
*)desc
.data
;
3345 enum hclge_cmd_status status
;
3346 enum hclge_opcode_type op
;
3347 u16 tqp_type_and_id
;
3350 op
= en
? HCLGE_OPC_ADD_RING_TO_VECTOR
: HCLGE_OPC_DEL_RING_TO_VECTOR
;
3351 hclge_cmd_setup_basic_desc(&desc
, op
, false);
3352 req
->int_vector_id
= vector_id
;
3355 for (node
= ring_chain
; node
; node
= node
->next
) {
3356 tqp_type_and_id
= le16_to_cpu(req
->tqp_type_and_id
[i
]);
3357 hnae_set_field(tqp_type_and_id
, HCLGE_INT_TYPE_M
,
3359 hnae_get_bit(node
->flag
, HNAE3_RING_TYPE_B
));
3360 hnae_set_field(tqp_type_and_id
, HCLGE_TQP_ID_M
,
3361 HCLGE_TQP_ID_S
, node
->tqp_index
);
3362 req
->tqp_type_and_id
[i
] = cpu_to_le16(tqp_type_and_id
);
3363 if (++i
>= HCLGE_VECTOR_ELEMENTS_PER_CMD
) {
3364 req
->int_cause_num
= HCLGE_VECTOR_ELEMENTS_PER_CMD
;
3365 req
->vfid
= vport
->vport_id
;
3367 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3369 dev_err(&hdev
->pdev
->dev
,
3370 "Map TQP fail, status is %d.\n",
3376 hclge_cmd_setup_basic_desc(&desc
,
3379 req
->int_vector_id
= vector_id
;
3384 req
->int_cause_num
= i
;
3385 req
->vfid
= vport
->vport_id
;
3386 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3388 dev_err(&hdev
->pdev
->dev
,
3389 "Map TQP fail, status is %d.\n", status
);
3397 static int hclge_map_ring_to_vector(struct hnae3_handle
*handle
,
3399 struct hnae3_ring_chain_node
*ring_chain
)
3401 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3402 struct hclge_dev
*hdev
= vport
->back
;
3405 vector_id
= hclge_get_vector_index(hdev
, vector
);
3406 if (vector_id
< 0) {
3407 dev_err(&hdev
->pdev
->dev
,
3408 "Get vector index fail. vector_id =%d\n", vector_id
);
3412 return hclge_bind_ring_with_vector(vport
, vector_id
, true, ring_chain
);
3415 static int hclge_unmap_ring_frm_vector(struct hnae3_handle
*handle
,
3417 struct hnae3_ring_chain_node
*ring_chain
)
3419 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3420 struct hclge_dev
*hdev
= vport
->back
;
3423 vector_id
= hclge_get_vector_index(hdev
, vector
);
3424 if (vector_id
< 0) {
3425 dev_err(&handle
->pdev
->dev
,
3426 "Get vector index fail. ret =%d\n", vector_id
);
3430 ret
= hclge_bind_ring_with_vector(vport
, vector_id
, false, ring_chain
);
3432 dev_err(&handle
->pdev
->dev
,
3433 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3439 /* Free this MSIX or MSI vector */
3440 hclge_free_vector(hdev
, vector_id
);
3445 int hclge_cmd_set_promisc_mode(struct hclge_dev
*hdev
,
3446 struct hclge_promisc_param
*param
)
3448 struct hclge_promisc_cfg_cmd
*req
;
3449 struct hclge_desc desc
;
3452 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_PROMISC_MODE
, false);
3454 req
= (struct hclge_promisc_cfg_cmd
*)desc
.data
;
3455 req
->vf_id
= param
->vf_id
;
3456 req
->flag
= (param
->enable
<< HCLGE_PROMISC_EN_B
);
3458 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3460 dev_err(&hdev
->pdev
->dev
,
3461 "Set promisc mode fail, status is %d.\n", ret
);
3467 void hclge_promisc_param_init(struct hclge_promisc_param
*param
, bool en_uc
,
3468 bool en_mc
, bool en_bc
, int vport_id
)
3473 memset(param
, 0, sizeof(struct hclge_promisc_param
));
3475 param
->enable
= HCLGE_PROMISC_EN_UC
;
3477 param
->enable
|= HCLGE_PROMISC_EN_MC
;
3479 param
->enable
|= HCLGE_PROMISC_EN_BC
;
3480 param
->vf_id
= vport_id
;
3483 static void hclge_set_promisc_mode(struct hnae3_handle
*handle
, u32 en
)
3485 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3486 struct hclge_dev
*hdev
= vport
->back
;
3487 struct hclge_promisc_param param
;
3489 hclge_promisc_param_init(¶m
, en
, en
, true, vport
->vport_id
);
3490 hclge_cmd_set_promisc_mode(hdev
, ¶m
);
3493 static void hclge_cfg_mac_mode(struct hclge_dev
*hdev
, bool enable
)
3495 struct hclge_desc desc
;
3496 struct hclge_config_mac_mode_cmd
*req
=
3497 (struct hclge_config_mac_mode_cmd
*)desc
.data
;
3501 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAC_MODE
, false);
3502 hnae_set_bit(loop_en
, HCLGE_MAC_TX_EN_B
, enable
);
3503 hnae_set_bit(loop_en
, HCLGE_MAC_RX_EN_B
, enable
);
3504 hnae_set_bit(loop_en
, HCLGE_MAC_PAD_TX_B
, enable
);
3505 hnae_set_bit(loop_en
, HCLGE_MAC_PAD_RX_B
, enable
);
3506 hnae_set_bit(loop_en
, HCLGE_MAC_1588_TX_B
, 0);
3507 hnae_set_bit(loop_en
, HCLGE_MAC_1588_RX_B
, 0);
3508 hnae_set_bit(loop_en
, HCLGE_MAC_APP_LP_B
, 0);
3509 hnae_set_bit(loop_en
, HCLGE_MAC_LINE_LP_B
, 0);
3510 hnae_set_bit(loop_en
, HCLGE_MAC_FCS_TX_B
, enable
);
3511 hnae_set_bit(loop_en
, HCLGE_MAC_RX_FCS_B
, enable
);
3512 hnae_set_bit(loop_en
, HCLGE_MAC_RX_FCS_STRIP_B
, enable
);
3513 hnae_set_bit(loop_en
, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B
, enable
);
3514 hnae_set_bit(loop_en
, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B
, enable
);
3515 hnae_set_bit(loop_en
, HCLGE_MAC_TX_UNDER_MIN_ERR_B
, enable
);
3516 req
->txrx_pad_fcs_loop_en
= cpu_to_le32(loop_en
);
3518 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3520 dev_err(&hdev
->pdev
->dev
,
3521 "mac enable fail, ret =%d.\n", ret
);
3524 static int hclge_set_loopback(struct hnae3_handle
*handle
,
3525 enum hnae3_loop loop_mode
, bool en
)
3527 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3528 struct hclge_config_mac_mode_cmd
*req
;
3529 struct hclge_dev
*hdev
= vport
->back
;
3530 struct hclge_desc desc
;
3534 switch (loop_mode
) {
3535 case HNAE3_MAC_INTER_LOOP_MAC
:
3536 req
= (struct hclge_config_mac_mode_cmd
*)&desc
.data
[0];
3537 /* 1 Read out the MAC mode config at first */
3538 hclge_cmd_setup_basic_desc(&desc
,
3539 HCLGE_OPC_CONFIG_MAC_MODE
,
3541 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3543 dev_err(&hdev
->pdev
->dev
,
3544 "mac loopback get fail, ret =%d.\n",
3549 /* 2 Then setup the loopback flag */
3550 loop_en
= le32_to_cpu(req
->txrx_pad_fcs_loop_en
);
3552 hnae_set_bit(loop_en
, HCLGE_MAC_APP_LP_B
, 1);
3554 hnae_set_bit(loop_en
, HCLGE_MAC_APP_LP_B
, 0);
3556 req
->txrx_pad_fcs_loop_en
= cpu_to_le32(loop_en
);
3558 /* 3 Config mac work mode with loopback flag
3559 * and its original configure parameters
3561 hclge_cmd_reuse_desc(&desc
, false);
3562 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3564 dev_err(&hdev
->pdev
->dev
,
3565 "mac loopback set fail, ret =%d.\n", ret
);
3569 dev_err(&hdev
->pdev
->dev
,
3570 "loop_mode %d is not supported\n", loop_mode
);
3577 static int hclge_tqp_enable(struct hclge_dev
*hdev
, int tqp_id
,
3578 int stream_id
, bool enable
)
3580 struct hclge_desc desc
;
3581 struct hclge_cfg_com_tqp_queue_cmd
*req
=
3582 (struct hclge_cfg_com_tqp_queue_cmd
*)desc
.data
;
3585 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_COM_TQP_QUEUE
, false);
3586 req
->tqp_id
= cpu_to_le16(tqp_id
& HCLGE_RING_ID_MASK
);
3587 req
->stream_id
= cpu_to_le16(stream_id
);
3588 req
->enable
|= enable
<< HCLGE_TQP_ENABLE_B
;
3590 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3592 dev_err(&hdev
->pdev
->dev
,
3593 "Tqp enable fail, status =%d.\n", ret
);
3597 static void hclge_reset_tqp_stats(struct hnae3_handle
*handle
)
3599 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3600 struct hnae3_queue
*queue
;
3601 struct hclge_tqp
*tqp
;
3604 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
3605 queue
= handle
->kinfo
.tqp
[i
];
3606 tqp
= container_of(queue
, struct hclge_tqp
, q
);
3607 memset(&tqp
->tqp_stats
, 0, sizeof(tqp
->tqp_stats
));
3611 static int hclge_ae_start(struct hnae3_handle
*handle
)
3613 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3614 struct hclge_dev
*hdev
= vport
->back
;
3615 int i
, queue_id
, ret
;
3617 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
3618 /* todo clear interrupt */
3620 queue_id
= hclge_get_queue_id(handle
->kinfo
.tqp
[i
]);
3622 dev_warn(&hdev
->pdev
->dev
,
3623 "Get invalid queue id, ignore it\n");
3627 hclge_tqp_enable(hdev
, queue_id
, 0, true);
3630 hclge_cfg_mac_mode(hdev
, true);
3631 clear_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
3632 mod_timer(&hdev
->service_timer
, jiffies
+ HZ
);
3634 ret
= hclge_mac_start_phy(hdev
);
3638 /* reset tqp stats */
3639 hclge_reset_tqp_stats(handle
);
3644 static void hclge_ae_stop(struct hnae3_handle
*handle
)
3646 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3647 struct hclge_dev
*hdev
= vport
->back
;
3650 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
3652 queue_id
= hclge_get_queue_id(handle
->kinfo
.tqp
[i
]);
3654 dev_warn(&hdev
->pdev
->dev
,
3655 "Get invalid queue id, ignore it\n");
3659 hclge_tqp_enable(hdev
, queue_id
, 0, false);
3662 hclge_cfg_mac_mode(hdev
, false);
3664 hclge_mac_stop_phy(hdev
);
3666 /* reset tqp stats */
3667 hclge_reset_tqp_stats(handle
);
3670 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport
*vport
,
3671 u16 cmdq_resp
, u8 resp_code
,
3672 enum hclge_mac_vlan_tbl_opcode op
)
3674 struct hclge_dev
*hdev
= vport
->back
;
3675 int return_status
= -EIO
;
3678 dev_err(&hdev
->pdev
->dev
,
3679 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
3684 if (op
== HCLGE_MAC_VLAN_ADD
) {
3685 if ((!resp_code
) || (resp_code
== 1)) {
3687 } else if (resp_code
== 2) {
3688 return_status
= -EIO
;
3689 dev_err(&hdev
->pdev
->dev
,
3690 "add mac addr failed for uc_overflow.\n");
3691 } else if (resp_code
== 3) {
3692 return_status
= -EIO
;
3693 dev_err(&hdev
->pdev
->dev
,
3694 "add mac addr failed for mc_overflow.\n");
3696 dev_err(&hdev
->pdev
->dev
,
3697 "add mac addr failed for undefined, code=%d.\n",
3700 } else if (op
== HCLGE_MAC_VLAN_REMOVE
) {
3703 } else if (resp_code
== 1) {
3704 return_status
= -EIO
;
3705 dev_dbg(&hdev
->pdev
->dev
,
3706 "remove mac addr failed for miss.\n");
3708 dev_err(&hdev
->pdev
->dev
,
3709 "remove mac addr failed for undefined, code=%d.\n",
3712 } else if (op
== HCLGE_MAC_VLAN_LKUP
) {
3715 } else if (resp_code
== 1) {
3716 return_status
= -EIO
;
3717 dev_dbg(&hdev
->pdev
->dev
,
3718 "lookup mac addr failed for miss.\n");
3720 dev_err(&hdev
->pdev
->dev
,
3721 "lookup mac addr failed for undefined, code=%d.\n",
3725 return_status
= -EIO
;
3726 dev_err(&hdev
->pdev
->dev
,
3727 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
3731 return return_status
;
3734 static int hclge_update_desc_vfid(struct hclge_desc
*desc
, int vfid
, bool clr
)
3739 if (vfid
> 255 || vfid
< 0)
3742 if (vfid
>= 0 && vfid
<= 191) {
3743 word_num
= vfid
/ 32;
3744 bit_num
= vfid
% 32;
3746 desc
[1].data
[word_num
] &= cpu_to_le32(~(1 << bit_num
));
3748 desc
[1].data
[word_num
] |= cpu_to_le32(1 << bit_num
);
3750 word_num
= (vfid
- 192) / 32;
3751 bit_num
= vfid
% 32;
3753 desc
[2].data
[word_num
] &= cpu_to_le32(~(1 << bit_num
));
3755 desc
[2].data
[word_num
] |= cpu_to_le32(1 << bit_num
);
3761 static bool hclge_is_all_function_id_zero(struct hclge_desc
*desc
)
3763 #define HCLGE_DESC_NUMBER 3
3764 #define HCLGE_FUNC_NUMBER_PER_DESC 6
3767 for (i
= 0; i
< HCLGE_DESC_NUMBER
; i
++)
3768 for (j
= 0; j
< HCLGE_FUNC_NUMBER_PER_DESC
; j
++)
3769 if (desc
[i
].data
[j
])
3775 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd
*new_req
,
3778 const unsigned char *mac_addr
= addr
;
3779 u32 high_val
= mac_addr
[2] << 16 | (mac_addr
[3] << 24) |
3780 (mac_addr
[0]) | (mac_addr
[1] << 8);
3781 u32 low_val
= mac_addr
[4] | (mac_addr
[5] << 8);
3783 new_req
->mac_addr_hi32
= cpu_to_le32(high_val
);
3784 new_req
->mac_addr_lo16
= cpu_to_le16(low_val
& 0xffff);
3787 static u16
hclge_get_mac_addr_to_mta_index(struct hclge_vport
*vport
,
3790 u16 high_val
= addr
[1] | (addr
[0] << 8);
3791 struct hclge_dev
*hdev
= vport
->back
;
3792 u32 rsh
= 4 - hdev
->mta_mac_sel_type
;
3793 u16 ret_val
= (high_val
>> rsh
) & 0xfff;
3798 static int hclge_set_mta_filter_mode(struct hclge_dev
*hdev
,
3799 enum hclge_mta_dmac_sel_type mta_mac_sel
,
3802 struct hclge_mta_filter_mode_cmd
*req
;
3803 struct hclge_desc desc
;
3806 req
= (struct hclge_mta_filter_mode_cmd
*)desc
.data
;
3807 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MTA_MAC_MODE_CFG
, false);
3809 hnae_set_bit(req
->dmac_sel_en
, HCLGE_CFG_MTA_MAC_EN_B
,
3811 hnae_set_field(req
->dmac_sel_en
, HCLGE_CFG_MTA_MAC_SEL_M
,
3812 HCLGE_CFG_MTA_MAC_SEL_S
, mta_mac_sel
);
3814 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3816 dev_err(&hdev
->pdev
->dev
,
3817 "Config mat filter mode failed for cmd_send, ret =%d.\n",
3825 int hclge_cfg_func_mta_filter(struct hclge_dev
*hdev
,
3829 struct hclge_cfg_func_mta_filter_cmd
*req
;
3830 struct hclge_desc desc
;
3833 req
= (struct hclge_cfg_func_mta_filter_cmd
*)desc
.data
;
3834 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MTA_MAC_FUNC_CFG
, false);
3836 hnae_set_bit(req
->accept
, HCLGE_CFG_FUNC_MTA_ACCEPT_B
,
3838 req
->function_id
= func_id
;
3840 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3842 dev_err(&hdev
->pdev
->dev
,
3843 "Config func_id enable failed for cmd_send, ret =%d.\n",
3851 static int hclge_set_mta_table_item(struct hclge_vport
*vport
,
3855 struct hclge_dev
*hdev
= vport
->back
;
3856 struct hclge_cfg_func_mta_item_cmd
*req
;
3857 struct hclge_desc desc
;
3861 req
= (struct hclge_cfg_func_mta_item_cmd
*)desc
.data
;
3862 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MTA_TBL_ITEM_CFG
, false);
3863 hnae_set_bit(req
->accept
, HCLGE_CFG_MTA_ITEM_ACCEPT_B
, enable
);
3865 hnae_set_field(item_idx
, HCLGE_CFG_MTA_ITEM_IDX_M
,
3866 HCLGE_CFG_MTA_ITEM_IDX_S
, idx
);
3867 req
->item_idx
= cpu_to_le16(item_idx
);
3869 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3871 dev_err(&hdev
->pdev
->dev
,
3872 "Config mta table item failed for cmd_send, ret =%d.\n",
3880 static int hclge_remove_mac_vlan_tbl(struct hclge_vport
*vport
,
3881 struct hclge_mac_vlan_tbl_entry_cmd
*req
)
3883 struct hclge_dev
*hdev
= vport
->back
;
3884 struct hclge_desc desc
;
3889 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_REMOVE
, false);
3891 memcpy(desc
.data
, req
, sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
3893 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3895 dev_err(&hdev
->pdev
->dev
,
3896 "del mac addr failed for cmd_send, ret =%d.\n",
3900 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
3901 retval
= le16_to_cpu(desc
.retval
);
3903 return hclge_get_mac_vlan_cmd_status(vport
, retval
, resp_code
,
3904 HCLGE_MAC_VLAN_REMOVE
);
3907 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport
*vport
,
3908 struct hclge_mac_vlan_tbl_entry_cmd
*req
,
3909 struct hclge_desc
*desc
,
3912 struct hclge_dev
*hdev
= vport
->back
;
3917 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_MAC_VLAN_ADD
, true);
3919 desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3920 memcpy(desc
[0].data
,
3922 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
3923 hclge_cmd_setup_basic_desc(&desc
[1],
3924 HCLGE_OPC_MAC_VLAN_ADD
,
3926 desc
[1].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3927 hclge_cmd_setup_basic_desc(&desc
[2],
3928 HCLGE_OPC_MAC_VLAN_ADD
,
3930 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 3);
3932 memcpy(desc
[0].data
,
3934 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
3935 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
3938 dev_err(&hdev
->pdev
->dev
,
3939 "lookup mac addr failed for cmd_send, ret =%d.\n",
3943 resp_code
= (le32_to_cpu(desc
[0].data
[0]) >> 8) & 0xff;
3944 retval
= le16_to_cpu(desc
[0].retval
);
3946 return hclge_get_mac_vlan_cmd_status(vport
, retval
, resp_code
,
3947 HCLGE_MAC_VLAN_LKUP
);
3950 static int hclge_add_mac_vlan_tbl(struct hclge_vport
*vport
,
3951 struct hclge_mac_vlan_tbl_entry_cmd
*req
,
3952 struct hclge_desc
*mc_desc
)
3954 struct hclge_dev
*hdev
= vport
->back
;
3961 struct hclge_desc desc
;
3963 hclge_cmd_setup_basic_desc(&desc
,
3964 HCLGE_OPC_MAC_VLAN_ADD
,
3966 memcpy(desc
.data
, req
,
3967 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
3968 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3969 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
3970 retval
= le16_to_cpu(desc
.retval
);
3972 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, retval
,
3974 HCLGE_MAC_VLAN_ADD
);
3976 hclge_cmd_reuse_desc(&mc_desc
[0], false);
3977 mc_desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3978 hclge_cmd_reuse_desc(&mc_desc
[1], false);
3979 mc_desc
[1].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3980 hclge_cmd_reuse_desc(&mc_desc
[2], false);
3981 mc_desc
[2].flag
&= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT
);
3982 memcpy(mc_desc
[0].data
, req
,
3983 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
3984 ret
= hclge_cmd_send(&hdev
->hw
, mc_desc
, 3);
3985 resp_code
= (le32_to_cpu(mc_desc
[0].data
[0]) >> 8) & 0xff;
3986 retval
= le16_to_cpu(mc_desc
[0].retval
);
3988 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, retval
,
3990 HCLGE_MAC_VLAN_ADD
);
3994 dev_err(&hdev
->pdev
->dev
,
3995 "add mac addr failed for cmd_send, ret =%d.\n",
4003 static int hclge_add_uc_addr(struct hnae3_handle
*handle
,
4004 const unsigned char *addr
)
4006 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4008 return hclge_add_uc_addr_common(vport
, addr
);
4011 int hclge_add_uc_addr_common(struct hclge_vport
*vport
,
4012 const unsigned char *addr
)
4014 struct hclge_dev
*hdev
= vport
->back
;
4015 struct hclge_mac_vlan_tbl_entry_cmd req
;
4016 enum hclge_cmd_status status
;
4017 u16 egress_port
= 0;
4019 /* mac addr check */
4020 if (is_zero_ether_addr(addr
) ||
4021 is_broadcast_ether_addr(addr
) ||
4022 is_multicast_ether_addr(addr
)) {
4023 dev_err(&hdev
->pdev
->dev
,
4024 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
4026 is_zero_ether_addr(addr
),
4027 is_broadcast_ether_addr(addr
),
4028 is_multicast_ether_addr(addr
));
4032 memset(&req
, 0, sizeof(req
));
4033 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
4034 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4035 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 0);
4036 hnae_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4038 hnae_set_bit(egress_port
, HCLGE_MAC_EPORT_SW_EN_B
, 0);
4039 hnae_set_bit(egress_port
, HCLGE_MAC_EPORT_TYPE_B
, 0);
4040 hnae_set_field(egress_port
, HCLGE_MAC_EPORT_VFID_M
,
4041 HCLGE_MAC_EPORT_VFID_S
, vport
->vport_id
);
4042 hnae_set_field(egress_port
, HCLGE_MAC_EPORT_PFID_M
,
4043 HCLGE_MAC_EPORT_PFID_S
, 0);
4045 req
.egress_port
= cpu_to_le16(egress_port
);
4047 hclge_prepare_mac_addr(&req
, addr
);
4049 status
= hclge_add_mac_vlan_tbl(vport
, &req
, NULL
);
4054 static int hclge_rm_uc_addr(struct hnae3_handle
*handle
,
4055 const unsigned char *addr
)
4057 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4059 return hclge_rm_uc_addr_common(vport
, addr
);
4062 int hclge_rm_uc_addr_common(struct hclge_vport
*vport
,
4063 const unsigned char *addr
)
4065 struct hclge_dev
*hdev
= vport
->back
;
4066 struct hclge_mac_vlan_tbl_entry_cmd req
;
4067 enum hclge_cmd_status status
;
4069 /* mac addr check */
4070 if (is_zero_ether_addr(addr
) ||
4071 is_broadcast_ether_addr(addr
) ||
4072 is_multicast_ether_addr(addr
)) {
4073 dev_dbg(&hdev
->pdev
->dev
,
4074 "Remove mac err! invalid mac:%pM.\n",
4079 memset(&req
, 0, sizeof(req
));
4080 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
4081 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4082 hclge_prepare_mac_addr(&req
, addr
);
4083 status
= hclge_remove_mac_vlan_tbl(vport
, &req
);
4088 static int hclge_add_mc_addr(struct hnae3_handle
*handle
,
4089 const unsigned char *addr
)
4091 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4093 return hclge_add_mc_addr_common(vport
, addr
);
4096 int hclge_add_mc_addr_common(struct hclge_vport
*vport
,
4097 const unsigned char *addr
)
4099 struct hclge_dev
*hdev
= vport
->back
;
4100 struct hclge_mac_vlan_tbl_entry_cmd req
;
4101 struct hclge_desc desc
[3];
4105 /* mac addr check */
4106 if (!is_multicast_ether_addr(addr
)) {
4107 dev_err(&hdev
->pdev
->dev
,
4108 "Add mc mac err! invalid mac:%pM.\n",
4112 memset(&req
, 0, sizeof(req
));
4113 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
4114 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4115 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 1);
4116 hnae_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4117 hclge_prepare_mac_addr(&req
, addr
);
4118 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
4120 /* This mac addr exist, update VFID for it */
4121 hclge_update_desc_vfid(desc
, vport
->vport_id
, false);
4122 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
4124 /* This mac addr do not exist, add new entry for it */
4125 memset(desc
[0].data
, 0, sizeof(desc
[0].data
));
4126 memset(desc
[1].data
, 0, sizeof(desc
[0].data
));
4127 memset(desc
[2].data
, 0, sizeof(desc
[0].data
));
4128 hclge_update_desc_vfid(desc
, vport
->vport_id
, false);
4129 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
4132 /* Set MTA table for this MAC address */
4133 tbl_idx
= hclge_get_mac_addr_to_mta_index(vport
, addr
);
4134 status
= hclge_set_mta_table_item(vport
, tbl_idx
, true);
4139 static int hclge_rm_mc_addr(struct hnae3_handle
*handle
,
4140 const unsigned char *addr
)
4142 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4144 return hclge_rm_mc_addr_common(vport
, addr
);
4147 int hclge_rm_mc_addr_common(struct hclge_vport
*vport
,
4148 const unsigned char *addr
)
4150 struct hclge_dev
*hdev
= vport
->back
;
4151 struct hclge_mac_vlan_tbl_entry_cmd req
;
4152 enum hclge_cmd_status status
;
4153 struct hclge_desc desc
[3];
4156 /* mac addr check */
4157 if (!is_multicast_ether_addr(addr
)) {
4158 dev_dbg(&hdev
->pdev
->dev
,
4159 "Remove mc mac err! invalid mac:%pM.\n",
4164 memset(&req
, 0, sizeof(req
));
4165 hnae_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
4166 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4167 hnae_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 1);
4168 hnae_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
4169 hclge_prepare_mac_addr(&req
, addr
);
4170 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
4172 /* This mac addr exist, remove this handle's VFID for it */
4173 hclge_update_desc_vfid(desc
, vport
->vport_id
, true);
4175 if (hclge_is_all_function_id_zero(desc
))
4176 /* All the vfid is zero, so need to delete this entry */
4177 status
= hclge_remove_mac_vlan_tbl(vport
, &req
);
4179 /* Not all the vfid is zero, update the vfid */
4180 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
4183 /* This mac addr do not exist, can't delete it */
4184 dev_err(&hdev
->pdev
->dev
,
4185 "Rm multicast mac addr failed, ret = %d.\n",
4190 /* Set MTB table for this MAC address */
4191 tbl_idx
= hclge_get_mac_addr_to_mta_index(vport
, addr
);
4192 status
= hclge_set_mta_table_item(vport
, tbl_idx
, false);
4197 static void hclge_get_mac_addr(struct hnae3_handle
*handle
, u8
*p
)
4199 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4200 struct hclge_dev
*hdev
= vport
->back
;
4202 ether_addr_copy(p
, hdev
->hw
.mac
.mac_addr
);
4205 static int hclge_set_mac_addr(struct hnae3_handle
*handle
, void *p
)
4207 const unsigned char *new_addr
= (const unsigned char *)p
;
4208 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4209 struct hclge_dev
*hdev
= vport
->back
;
4211 /* mac addr check */
4212 if (is_zero_ether_addr(new_addr
) ||
4213 is_broadcast_ether_addr(new_addr
) ||
4214 is_multicast_ether_addr(new_addr
)) {
4215 dev_err(&hdev
->pdev
->dev
,
4216 "Change uc mac err! invalid mac:%p.\n",
4221 hclge_rm_uc_addr(handle
, hdev
->hw
.mac
.mac_addr
);
4223 if (!hclge_add_uc_addr(handle
, new_addr
)) {
4224 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, new_addr
);
4231 static int hclge_set_vlan_filter_ctrl(struct hclge_dev
*hdev
, u8 vlan_type
,
4234 struct hclge_vlan_filter_ctrl_cmd
*req
;
4235 struct hclge_desc desc
;
4238 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_CTRL
, false);
4240 req
= (struct hclge_vlan_filter_ctrl_cmd
*)desc
.data
;
4241 req
->vlan_type
= vlan_type
;
4242 req
->vlan_fe
= filter_en
;
4244 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4246 dev_err(&hdev
->pdev
->dev
, "set vlan filter fail, ret =%d.\n",
4254 #define HCLGE_FILTER_TYPE_VF 0
4255 #define HCLGE_FILTER_TYPE_PORT 1
4257 static void hclge_enable_vlan_filter(struct hnae3_handle
*handle
, bool enable
)
4259 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4260 struct hclge_dev
*hdev
= vport
->back
;
4262 hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
, enable
);
4265 int hclge_set_vf_vlan_common(struct hclge_dev
*hdev
, int vfid
,
4266 bool is_kill
, u16 vlan
, u8 qos
, __be16 proto
)
4268 #define HCLGE_MAX_VF_BYTES 16
4269 struct hclge_vlan_filter_vf_cfg_cmd
*req0
;
4270 struct hclge_vlan_filter_vf_cfg_cmd
*req1
;
4271 struct hclge_desc desc
[2];
4276 hclge_cmd_setup_basic_desc(&desc
[0],
4277 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
4278 hclge_cmd_setup_basic_desc(&desc
[1],
4279 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
4281 desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
4283 vf_byte_off
= vfid
/ 8;
4284 vf_byte_val
= 1 << (vfid
% 8);
4286 req0
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[0].data
;
4287 req1
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[1].data
;
4289 req0
->vlan_id
= cpu_to_le16(vlan
);
4290 req0
->vlan_cfg
= is_kill
;
4292 if (vf_byte_off
< HCLGE_MAX_VF_BYTES
)
4293 req0
->vf_bitmap
[vf_byte_off
] = vf_byte_val
;
4295 req1
->vf_bitmap
[vf_byte_off
- HCLGE_MAX_VF_BYTES
] = vf_byte_val
;
4297 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
4299 dev_err(&hdev
->pdev
->dev
,
4300 "Send vf vlan command fail, ret =%d.\n",
4306 if (!req0
->resp_code
|| req0
->resp_code
== 1)
4309 dev_err(&hdev
->pdev
->dev
,
4310 "Add vf vlan filter fail, ret =%d.\n",
4313 if (!req0
->resp_code
)
4316 dev_err(&hdev
->pdev
->dev
,
4317 "Kill vf vlan filter fail, ret =%d.\n",
4324 static int hclge_set_port_vlan_filter(struct hnae3_handle
*handle
,
4325 __be16 proto
, u16 vlan_id
,
4328 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4329 struct hclge_dev
*hdev
= vport
->back
;
4330 struct hclge_vlan_filter_pf_cfg_cmd
*req
;
4331 struct hclge_desc desc
;
4332 u8 vlan_offset_byte_val
;
4333 u8 vlan_offset_byte
;
4337 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_PF_CFG
, false);
4339 vlan_offset_160
= vlan_id
/ 160;
4340 vlan_offset_byte
= (vlan_id
% 160) / 8;
4341 vlan_offset_byte_val
= 1 << (vlan_id
% 8);
4343 req
= (struct hclge_vlan_filter_pf_cfg_cmd
*)desc
.data
;
4344 req
->vlan_offset
= vlan_offset_160
;
4345 req
->vlan_cfg
= is_kill
;
4346 req
->vlan_offset_bitmap
[vlan_offset_byte
] = vlan_offset_byte_val
;
4348 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4350 dev_err(&hdev
->pdev
->dev
,
4351 "port vlan command, send fail, ret =%d.\n",
4356 ret
= hclge_set_vf_vlan_common(hdev
, 0, is_kill
, vlan_id
, 0, proto
);
4358 dev_err(&hdev
->pdev
->dev
,
4359 "Set pf vlan filter config fail, ret =%d.\n",
4367 static int hclge_set_vf_vlan_filter(struct hnae3_handle
*handle
, int vfid
,
4368 u16 vlan
, u8 qos
, __be16 proto
)
4370 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4371 struct hclge_dev
*hdev
= vport
->back
;
4373 if ((vfid
>= hdev
->num_alloc_vfs
) || (vlan
> 4095) || (qos
> 7))
4375 if (proto
!= htons(ETH_P_8021Q
))
4376 return -EPROTONOSUPPORT
;
4378 return hclge_set_vf_vlan_common(hdev
, vfid
, false, vlan
, qos
, proto
);
4381 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport
*vport
)
4383 struct hclge_tx_vtag_cfg
*vcfg
= &vport
->txvlan_cfg
;
4384 struct hclge_vport_vtag_tx_cfg_cmd
*req
;
4385 struct hclge_dev
*hdev
= vport
->back
;
4386 struct hclge_desc desc
;
4389 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_PORT_TX_CFG
, false);
4391 req
= (struct hclge_vport_vtag_tx_cfg_cmd
*)desc
.data
;
4392 req
->def_vlan_tag1
= cpu_to_le16(vcfg
->default_tag1
);
4393 req
->def_vlan_tag2
= cpu_to_le16(vcfg
->default_tag2
);
4394 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_TAG_B
,
4395 vcfg
->accept_tag
? 1 : 0);
4396 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_UNTAG_B
,
4397 vcfg
->accept_untag
? 1 : 0);
4398 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_PORT_INS_TAG1_EN_B
,
4399 vcfg
->insert_tag1_en
? 1 : 0);
4400 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_PORT_INS_TAG2_EN_B
,
4401 vcfg
->insert_tag2_en
? 1 : 0);
4402 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_CFG_NIC_ROCE_SEL_B
, 0);
4404 req
->vf_offset
= vport
->vport_id
/ HCLGE_VF_NUM_PER_CMD
;
4405 req
->vf_bitmap
[req
->vf_offset
] =
4406 1 << (vport
->vport_id
% HCLGE_VF_NUM_PER_BYTE
);
4408 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4410 dev_err(&hdev
->pdev
->dev
,
4411 "Send port txvlan cfg command fail, ret =%d\n",
4417 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport
*vport
)
4419 struct hclge_rx_vtag_cfg
*vcfg
= &vport
->rxvlan_cfg
;
4420 struct hclge_vport_vtag_rx_cfg_cmd
*req
;
4421 struct hclge_dev
*hdev
= vport
->back
;
4422 struct hclge_desc desc
;
4425 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_PORT_RX_CFG
, false);
4427 req
= (struct hclge_vport_vtag_rx_cfg_cmd
*)desc
.data
;
4428 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_REM_TAG1_EN_B
,
4429 vcfg
->strip_tag1_en
? 1 : 0);
4430 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_REM_TAG2_EN_B
,
4431 vcfg
->strip_tag2_en
? 1 : 0);
4432 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_SHOW_TAG1_EN_B
,
4433 vcfg
->vlan1_vlan_prionly
? 1 : 0);
4434 hnae_set_bit(req
->vport_vlan_cfg
, HCLGE_SHOW_TAG2_EN_B
,
4435 vcfg
->vlan2_vlan_prionly
? 1 : 0);
4437 req
->vf_offset
= vport
->vport_id
/ HCLGE_VF_NUM_PER_CMD
;
4438 req
->vf_bitmap
[req
->vf_offset
] =
4439 1 << (vport
->vport_id
% HCLGE_VF_NUM_PER_BYTE
);
4441 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4443 dev_err(&hdev
->pdev
->dev
,
4444 "Send port rxvlan cfg command fail, ret =%d\n",
4450 static int hclge_set_vlan_protocol_type(struct hclge_dev
*hdev
)
4452 struct hclge_rx_vlan_type_cfg_cmd
*rx_req
;
4453 struct hclge_tx_vlan_type_cfg_cmd
*tx_req
;
4454 struct hclge_desc desc
;
4457 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_TYPE_ID
, false);
4458 rx_req
= (struct hclge_rx_vlan_type_cfg_cmd
*)desc
.data
;
4459 rx_req
->ot_fst_vlan_type
=
4460 cpu_to_le16(hdev
->vlan_type_cfg
.rx_ot_fst_vlan_type
);
4461 rx_req
->ot_sec_vlan_type
=
4462 cpu_to_le16(hdev
->vlan_type_cfg
.rx_ot_sec_vlan_type
);
4463 rx_req
->in_fst_vlan_type
=
4464 cpu_to_le16(hdev
->vlan_type_cfg
.rx_in_fst_vlan_type
);
4465 rx_req
->in_sec_vlan_type
=
4466 cpu_to_le16(hdev
->vlan_type_cfg
.rx_in_sec_vlan_type
);
4468 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4470 dev_err(&hdev
->pdev
->dev
,
4471 "Send rxvlan protocol type command fail, ret =%d\n",
4476 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_INSERT
, false);
4478 tx_req
= (struct hclge_tx_vlan_type_cfg_cmd
*)&desc
.data
;
4479 tx_req
->ot_vlan_type
= cpu_to_le16(hdev
->vlan_type_cfg
.tx_ot_vlan_type
);
4480 tx_req
->in_vlan_type
= cpu_to_le16(hdev
->vlan_type_cfg
.tx_in_vlan_type
);
4482 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4484 dev_err(&hdev
->pdev
->dev
,
4485 "Send txvlan protocol type command fail, ret =%d\n",
4491 static int hclge_init_vlan_config(struct hclge_dev
*hdev
)
4493 #define HCLGE_DEF_VLAN_TYPE 0x8100
4495 struct hnae3_handle
*handle
;
4496 struct hclge_vport
*vport
;
4500 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
, true);
4504 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_PORT
, true);
4508 hdev
->vlan_type_cfg
.rx_in_fst_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
4509 hdev
->vlan_type_cfg
.rx_in_sec_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
4510 hdev
->vlan_type_cfg
.rx_ot_fst_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
4511 hdev
->vlan_type_cfg
.rx_ot_sec_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
4512 hdev
->vlan_type_cfg
.tx_ot_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
4513 hdev
->vlan_type_cfg
.tx_in_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
4515 ret
= hclge_set_vlan_protocol_type(hdev
);
4519 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
4520 vport
= &hdev
->vport
[i
];
4521 vport
->txvlan_cfg
.accept_tag
= true;
4522 vport
->txvlan_cfg
.accept_untag
= true;
4523 vport
->txvlan_cfg
.insert_tag1_en
= false;
4524 vport
->txvlan_cfg
.insert_tag2_en
= false;
4525 vport
->txvlan_cfg
.default_tag1
= 0;
4526 vport
->txvlan_cfg
.default_tag2
= 0;
4528 ret
= hclge_set_vlan_tx_offload_cfg(vport
);
4532 vport
->rxvlan_cfg
.strip_tag1_en
= false;
4533 vport
->rxvlan_cfg
.strip_tag2_en
= true;
4534 vport
->rxvlan_cfg
.vlan1_vlan_prionly
= false;
4535 vport
->rxvlan_cfg
.vlan2_vlan_prionly
= false;
4537 ret
= hclge_set_vlan_rx_offload_cfg(vport
);
4542 handle
= &hdev
->vport
[0].nic
;
4543 return hclge_set_port_vlan_filter(handle
, htons(ETH_P_8021Q
), 0, false);
4546 static int hclge_en_hw_strip_rxvtag(struct hnae3_handle
*handle
, bool enable
)
4548 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4550 vport
->rxvlan_cfg
.strip_tag1_en
= false;
4551 vport
->rxvlan_cfg
.strip_tag2_en
= enable
;
4552 vport
->rxvlan_cfg
.vlan1_vlan_prionly
= false;
4553 vport
->rxvlan_cfg
.vlan2_vlan_prionly
= false;
4555 return hclge_set_vlan_rx_offload_cfg(vport
);
4558 static int hclge_set_mtu(struct hnae3_handle
*handle
, int new_mtu
)
4560 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4561 struct hclge_config_max_frm_size_cmd
*req
;
4562 struct hclge_dev
*hdev
= vport
->back
;
4563 struct hclge_desc desc
;
4567 max_frm_size
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
4569 if (max_frm_size
< HCLGE_MAC_MIN_FRAME
||
4570 max_frm_size
> HCLGE_MAC_MAX_FRAME
)
4573 max_frm_size
= max(max_frm_size
, HCLGE_MAC_DEFAULT_FRAME
);
4575 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAX_FRM_SIZE
, false);
4577 req
= (struct hclge_config_max_frm_size_cmd
*)desc
.data
;
4578 req
->max_frm_size
= cpu_to_le16(max_frm_size
);
4580 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4582 dev_err(&hdev
->pdev
->dev
, "set mtu fail, ret =%d.\n", ret
);
4586 hdev
->mps
= max_frm_size
;
4591 static int hclge_send_reset_tqp_cmd(struct hclge_dev
*hdev
, u16 queue_id
,
4594 struct hclge_reset_tqp_queue_cmd
*req
;
4595 struct hclge_desc desc
;
4598 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, false);
4600 req
= (struct hclge_reset_tqp_queue_cmd
*)desc
.data
;
4601 req
->tqp_id
= cpu_to_le16(queue_id
& HCLGE_RING_ID_MASK
);
4602 hnae_set_bit(req
->reset_req
, HCLGE_TQP_RESET_B
, enable
);
4604 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4606 dev_err(&hdev
->pdev
->dev
,
4607 "Send tqp reset cmd error, status =%d\n", ret
);
4614 static int hclge_get_reset_status(struct hclge_dev
*hdev
, u16 queue_id
)
4616 struct hclge_reset_tqp_queue_cmd
*req
;
4617 struct hclge_desc desc
;
4620 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, true);
4622 req
= (struct hclge_reset_tqp_queue_cmd
*)desc
.data
;
4623 req
->tqp_id
= cpu_to_le16(queue_id
& HCLGE_RING_ID_MASK
);
4625 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4627 dev_err(&hdev
->pdev
->dev
,
4628 "Get reset status error, status =%d\n", ret
);
4632 return hnae_get_bit(req
->ready_to_reset
, HCLGE_TQP_RESET_B
);
4635 void hclge_reset_tqp(struct hnae3_handle
*handle
, u16 queue_id
)
4637 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4638 struct hclge_dev
*hdev
= vport
->back
;
4639 int reset_try_times
= 0;
4643 ret
= hclge_tqp_enable(hdev
, queue_id
, 0, false);
4645 dev_warn(&hdev
->pdev
->dev
, "Disable tqp fail, ret = %d\n", ret
);
4649 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_id
, true);
4651 dev_warn(&hdev
->pdev
->dev
,
4652 "Send reset tqp cmd fail, ret = %d\n", ret
);
4656 reset_try_times
= 0;
4657 while (reset_try_times
++ < HCLGE_TQP_RESET_TRY_TIMES
) {
4658 /* Wait for tqp hw reset */
4660 reset_status
= hclge_get_reset_status(hdev
, queue_id
);
4665 if (reset_try_times
>= HCLGE_TQP_RESET_TRY_TIMES
) {
4666 dev_warn(&hdev
->pdev
->dev
, "Reset TQP fail\n");
4670 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_id
, false);
4672 dev_warn(&hdev
->pdev
->dev
,
4673 "Deassert the soft reset fail, ret = %d\n", ret
);
4678 static u32
hclge_get_fw_version(struct hnae3_handle
*handle
)
4680 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4681 struct hclge_dev
*hdev
= vport
->back
;
4683 return hdev
->fw_version
;
4686 static void hclge_get_flowctrl_adv(struct hnae3_handle
*handle
,
4689 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4690 struct hclge_dev
*hdev
= vport
->back
;
4691 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
4696 *flowctrl_adv
|= (phydev
->advertising
& ADVERTISED_Pause
) |
4697 (phydev
->advertising
& ADVERTISED_Asym_Pause
);
4700 static void hclge_set_flowctrl_adv(struct hclge_dev
*hdev
, u32 rx_en
, u32 tx_en
)
4702 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
4707 phydev
->advertising
&= ~(ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
4710 phydev
->advertising
|= ADVERTISED_Pause
| ADVERTISED_Asym_Pause
;
4713 phydev
->advertising
^= ADVERTISED_Asym_Pause
;
4716 static int hclge_cfg_pauseparam(struct hclge_dev
*hdev
, u32 rx_en
, u32 tx_en
)
4721 hdev
->fc_mode_last_time
= HCLGE_FC_FULL
;
4722 else if (rx_en
&& !tx_en
)
4723 hdev
->fc_mode_last_time
= HCLGE_FC_RX_PAUSE
;
4724 else if (!rx_en
&& tx_en
)
4725 hdev
->fc_mode_last_time
= HCLGE_FC_TX_PAUSE
;
4727 hdev
->fc_mode_last_time
= HCLGE_FC_NONE
;
4729 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
)
4732 ret
= hclge_mac_pause_en_cfg(hdev
, tx_en
, rx_en
);
4734 dev_err(&hdev
->pdev
->dev
, "configure pauseparam error, ret = %d.\n",
4739 hdev
->tm_info
.fc_mode
= hdev
->fc_mode_last_time
;
4744 int hclge_cfg_flowctrl(struct hclge_dev
*hdev
)
4746 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
4747 u16 remote_advertising
= 0;
4748 u16 local_advertising
= 0;
4749 u32 rx_pause
, tx_pause
;
4752 if (!phydev
->link
|| !phydev
->autoneg
)
4755 if (phydev
->advertising
& ADVERTISED_Pause
)
4756 local_advertising
= ADVERTISE_PAUSE_CAP
;
4758 if (phydev
->advertising
& ADVERTISED_Asym_Pause
)
4759 local_advertising
|= ADVERTISE_PAUSE_ASYM
;
4762 remote_advertising
= LPA_PAUSE_CAP
;
4764 if (phydev
->asym_pause
)
4765 remote_advertising
|= LPA_PAUSE_ASYM
;
4767 flowctl
= mii_resolve_flowctrl_fdx(local_advertising
,
4768 remote_advertising
);
4769 tx_pause
= flowctl
& FLOW_CTRL_TX
;
4770 rx_pause
= flowctl
& FLOW_CTRL_RX
;
4772 if (phydev
->duplex
== HCLGE_MAC_HALF
) {
4777 return hclge_cfg_pauseparam(hdev
, rx_pause
, tx_pause
);
4780 static void hclge_get_pauseparam(struct hnae3_handle
*handle
, u32
*auto_neg
,
4781 u32
*rx_en
, u32
*tx_en
)
4783 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4784 struct hclge_dev
*hdev
= vport
->back
;
4786 *auto_neg
= hclge_get_autoneg(handle
);
4788 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
) {
4794 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_RX_PAUSE
) {
4797 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_TX_PAUSE
) {
4800 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_FULL
) {
4809 static int hclge_set_pauseparam(struct hnae3_handle
*handle
, u32 auto_neg
,
4810 u32 rx_en
, u32 tx_en
)
4812 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4813 struct hclge_dev
*hdev
= vport
->back
;
4814 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
4817 /* Only support flow control negotiation for netdev with
4818 * phy attached for now.
4823 fc_autoneg
= hclge_get_autoneg(handle
);
4824 if (auto_neg
!= fc_autoneg
) {
4825 dev_info(&hdev
->pdev
->dev
,
4826 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
4830 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
) {
4831 dev_info(&hdev
->pdev
->dev
,
4832 "Priority flow control enabled. Cannot set link flow control.\n");
4836 hclge_set_flowctrl_adv(hdev
, rx_en
, tx_en
);
4839 return hclge_cfg_pauseparam(hdev
, rx_en
, tx_en
);
4841 return phy_start_aneg(phydev
);
4844 static void hclge_get_ksettings_an_result(struct hnae3_handle
*handle
,
4845 u8
*auto_neg
, u32
*speed
, u8
*duplex
)
4847 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4848 struct hclge_dev
*hdev
= vport
->back
;
4851 *speed
= hdev
->hw
.mac
.speed
;
4853 *duplex
= hdev
->hw
.mac
.duplex
;
4855 *auto_neg
= hdev
->hw
.mac
.autoneg
;
4858 static void hclge_get_media_type(struct hnae3_handle
*handle
, u8
*media_type
)
4860 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4861 struct hclge_dev
*hdev
= vport
->back
;
4864 *media_type
= hdev
->hw
.mac
.media_type
;
4867 static void hclge_get_mdix_mode(struct hnae3_handle
*handle
,
4868 u8
*tp_mdix_ctrl
, u8
*tp_mdix
)
4870 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4871 struct hclge_dev
*hdev
= vport
->back
;
4872 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
4873 int mdix_ctrl
, mdix
, retval
, is_resolved
;
4876 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
4877 *tp_mdix
= ETH_TP_MDI_INVALID
;
4881 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_MDIX
);
4883 retval
= phy_read(phydev
, HCLGE_PHY_CSC_REG
);
4884 mdix_ctrl
= hnae_get_field(retval
, HCLGE_PHY_MDIX_CTRL_M
,
4885 HCLGE_PHY_MDIX_CTRL_S
);
4887 retval
= phy_read(phydev
, HCLGE_PHY_CSS_REG
);
4888 mdix
= hnae_get_bit(retval
, HCLGE_PHY_MDIX_STATUS_B
);
4889 is_resolved
= hnae_get_bit(retval
, HCLGE_PHY_SPEED_DUP_RESOLVE_B
);
4891 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_COPPER
);
4893 switch (mdix_ctrl
) {
4895 *tp_mdix_ctrl
= ETH_TP_MDI
;
4898 *tp_mdix_ctrl
= ETH_TP_MDI_X
;
4901 *tp_mdix_ctrl
= ETH_TP_MDI_AUTO
;
4904 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
4909 *tp_mdix
= ETH_TP_MDI_INVALID
;
4911 *tp_mdix
= ETH_TP_MDI_X
;
4913 *tp_mdix
= ETH_TP_MDI
;
4916 static int hclge_init_client_instance(struct hnae3_client
*client
,
4917 struct hnae3_ae_dev
*ae_dev
)
4919 struct hclge_dev
*hdev
= ae_dev
->priv
;
4920 struct hclge_vport
*vport
;
4923 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
4924 vport
= &hdev
->vport
[i
];
4926 switch (client
->type
) {
4927 case HNAE3_CLIENT_KNIC
:
4929 hdev
->nic_client
= client
;
4930 vport
->nic
.client
= client
;
4931 ret
= client
->ops
->init_instance(&vport
->nic
);
4935 if (hdev
->roce_client
&&
4936 hnae3_dev_roce_supported(hdev
)) {
4937 struct hnae3_client
*rc
= hdev
->roce_client
;
4939 ret
= hclge_init_roce_base_info(vport
);
4943 ret
= rc
->ops
->init_instance(&vport
->roce
);
4949 case HNAE3_CLIENT_UNIC
:
4950 hdev
->nic_client
= client
;
4951 vport
->nic
.client
= client
;
4953 ret
= client
->ops
->init_instance(&vport
->nic
);
4958 case HNAE3_CLIENT_ROCE
:
4959 if (hnae3_dev_roce_supported(hdev
)) {
4960 hdev
->roce_client
= client
;
4961 vport
->roce
.client
= client
;
4964 if (hdev
->roce_client
&& hdev
->nic_client
) {
4965 ret
= hclge_init_roce_base_info(vport
);
4969 ret
= client
->ops
->init_instance(&vport
->roce
);
4981 static void hclge_uninit_client_instance(struct hnae3_client
*client
,
4982 struct hnae3_ae_dev
*ae_dev
)
4984 struct hclge_dev
*hdev
= ae_dev
->priv
;
4985 struct hclge_vport
*vport
;
4988 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
4989 vport
= &hdev
->vport
[i
];
4990 if (hdev
->roce_client
) {
4991 hdev
->roce_client
->ops
->uninit_instance(&vport
->roce
,
4993 hdev
->roce_client
= NULL
;
4994 vport
->roce
.client
= NULL
;
4996 if (client
->type
== HNAE3_CLIENT_ROCE
)
4998 if (client
->ops
->uninit_instance
) {
4999 client
->ops
->uninit_instance(&vport
->nic
, 0);
5000 hdev
->nic_client
= NULL
;
5001 vport
->nic
.client
= NULL
;
5006 static int hclge_pci_init(struct hclge_dev
*hdev
)
5008 struct pci_dev
*pdev
= hdev
->pdev
;
5009 struct hclge_hw
*hw
;
5012 ret
= pci_enable_device(pdev
);
5014 dev_err(&pdev
->dev
, "failed to enable PCI device\n");
5015 goto err_no_drvdata
;
5018 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
5020 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
5023 "can't set consistent PCI DMA");
5024 goto err_disable_device
;
5026 dev_warn(&pdev
->dev
, "set DMA mask to 32 bits\n");
5029 ret
= pci_request_regions(pdev
, HCLGE_DRIVER_NAME
);
5031 dev_err(&pdev
->dev
, "PCI request regions failed %d\n", ret
);
5032 goto err_disable_device
;
5035 pci_set_master(pdev
);
5038 hw
->io_base
= pcim_iomap(pdev
, 2, 0);
5040 dev_err(&pdev
->dev
, "Can't map configuration register space\n");
5042 goto err_clr_master
;
5045 hdev
->num_req_vfs
= pci_sriov_get_totalvfs(pdev
);
5049 pci_clear_master(pdev
);
5050 pci_release_regions(pdev
);
5052 pci_disable_device(pdev
);
5054 pci_set_drvdata(pdev
, NULL
);
5059 static void hclge_pci_uninit(struct hclge_dev
*hdev
)
5061 struct pci_dev
*pdev
= hdev
->pdev
;
5063 pci_free_irq_vectors(pdev
);
5064 pci_clear_master(pdev
);
5065 pci_release_mem_regions(pdev
);
5066 pci_disable_device(pdev
);
5069 static int hclge_init_ae_dev(struct hnae3_ae_dev
*ae_dev
)
5071 struct pci_dev
*pdev
= ae_dev
->pdev
;
5072 struct hclge_dev
*hdev
;
5075 hdev
= devm_kzalloc(&pdev
->dev
, sizeof(*hdev
), GFP_KERNEL
);
5082 hdev
->ae_dev
= ae_dev
;
5083 hdev
->reset_type
= HNAE3_NONE_RESET
;
5084 hdev
->reset_request
= 0;
5085 hdev
->reset_pending
= 0;
5086 ae_dev
->priv
= hdev
;
5088 ret
= hclge_pci_init(hdev
);
5090 dev_err(&pdev
->dev
, "PCI init failed\n");
5094 /* Firmware command queue initialize */
5095 ret
= hclge_cmd_queue_init(hdev
);
5097 dev_err(&pdev
->dev
, "Cmd queue init failed, ret = %d.\n", ret
);
5101 /* Firmware command initialize */
5102 ret
= hclge_cmd_init(hdev
);
5106 ret
= hclge_get_cap(hdev
);
5108 dev_err(&pdev
->dev
, "get hw capability error, ret = %d.\n",
5113 ret
= hclge_configure(hdev
);
5115 dev_err(&pdev
->dev
, "Configure dev error, ret = %d.\n", ret
);
5119 ret
= hclge_init_msi(hdev
);
5121 dev_err(&pdev
->dev
, "Init MSI/MSI-X error, ret = %d.\n", ret
);
5125 ret
= hclge_misc_irq_init(hdev
);
5128 "Misc IRQ(vector0) init error, ret = %d.\n",
5133 ret
= hclge_alloc_tqps(hdev
);
5135 dev_err(&pdev
->dev
, "Allocate TQPs error, ret = %d.\n", ret
);
5139 ret
= hclge_alloc_vport(hdev
);
5141 dev_err(&pdev
->dev
, "Allocate vport error, ret = %d.\n", ret
);
5145 ret
= hclge_map_tqp(hdev
);
5147 dev_err(&pdev
->dev
, "Map tqp error, ret = %d.\n", ret
);
5151 ret
= hclge_mac_mdio_config(hdev
);
5153 dev_warn(&hdev
->pdev
->dev
,
5154 "mdio config fail ret=%d\n", ret
);
5158 ret
= hclge_mac_init(hdev
);
5160 dev_err(&pdev
->dev
, "Mac init error, ret = %d\n", ret
);
5163 ret
= hclge_buffer_alloc(hdev
);
5165 dev_err(&pdev
->dev
, "Buffer allocate fail, ret =%d\n", ret
);
5169 ret
= hclge_config_tso(hdev
, HCLGE_TSO_MSS_MIN
, HCLGE_TSO_MSS_MAX
);
5171 dev_err(&pdev
->dev
, "Enable tso fail, ret =%d\n", ret
);
5175 ret
= hclge_init_vlan_config(hdev
);
5177 dev_err(&pdev
->dev
, "VLAN init fail, ret =%d\n", ret
);
5181 ret
= hclge_tm_schd_init(hdev
);
5183 dev_err(&pdev
->dev
, "tm schd init fail, ret =%d\n", ret
);
5187 ret
= hclge_rss_init_hw(hdev
);
5189 dev_err(&pdev
->dev
, "Rss init fail, ret =%d\n", ret
);
5193 hclge_dcb_ops_set(hdev
);
5195 timer_setup(&hdev
->service_timer
, hclge_service_timer
, 0);
5196 INIT_WORK(&hdev
->service_task
, hclge_service_task
);
5197 INIT_WORK(&hdev
->rst_service_task
, hclge_reset_service_task
);
5198 INIT_WORK(&hdev
->mbx_service_task
, hclge_mailbox_service_task
);
5200 /* Enable MISC vector(vector0) */
5201 hclge_enable_vector(&hdev
->misc_vector
, true);
5203 set_bit(HCLGE_STATE_SERVICE_INITED
, &hdev
->state
);
5204 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
5205 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
);
5206 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
5207 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
5208 clear_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
);
5210 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME
);
5214 pci_release_regions(pdev
);
5216 pci_set_drvdata(pdev
, NULL
);
5221 static void hclge_stats_clear(struct hclge_dev
*hdev
)
5223 memset(&hdev
->hw_stats
, 0, sizeof(hdev
->hw_stats
));
5226 static int hclge_reset_ae_dev(struct hnae3_ae_dev
*ae_dev
)
5228 struct hclge_dev
*hdev
= ae_dev
->priv
;
5229 struct pci_dev
*pdev
= ae_dev
->pdev
;
5232 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
5234 hclge_stats_clear(hdev
);
5236 ret
= hclge_cmd_init(hdev
);
5238 dev_err(&pdev
->dev
, "Cmd queue init failed\n");
5242 ret
= hclge_get_cap(hdev
);
5244 dev_err(&pdev
->dev
, "get hw capability error, ret = %d.\n",
5249 ret
= hclge_configure(hdev
);
5251 dev_err(&pdev
->dev
, "Configure dev error, ret = %d.\n", ret
);
5255 ret
= hclge_map_tqp(hdev
);
5257 dev_err(&pdev
->dev
, "Map tqp error, ret = %d.\n", ret
);
5261 ret
= hclge_mac_init(hdev
);
5263 dev_err(&pdev
->dev
, "Mac init error, ret = %d\n", ret
);
5267 ret
= hclge_buffer_alloc(hdev
);
5269 dev_err(&pdev
->dev
, "Buffer allocate fail, ret =%d\n", ret
);
5273 ret
= hclge_config_tso(hdev
, HCLGE_TSO_MSS_MIN
, HCLGE_TSO_MSS_MAX
);
5275 dev_err(&pdev
->dev
, "Enable tso fail, ret =%d\n", ret
);
5279 ret
= hclge_init_vlan_config(hdev
);
5281 dev_err(&pdev
->dev
, "VLAN init fail, ret =%d\n", ret
);
5285 ret
= hclge_tm_schd_init(hdev
);
5287 dev_err(&pdev
->dev
, "tm schd init fail, ret =%d\n", ret
);
5291 ret
= hclge_rss_init_hw(hdev
);
5293 dev_err(&pdev
->dev
, "Rss init fail, ret =%d\n", ret
);
5297 /* Enable MISC vector(vector0) */
5298 hclge_enable_vector(&hdev
->misc_vector
, true);
5300 dev_info(&pdev
->dev
, "Reset done, %s driver initialization finished.\n",
5306 static void hclge_uninit_ae_dev(struct hnae3_ae_dev
*ae_dev
)
5308 struct hclge_dev
*hdev
= ae_dev
->priv
;
5309 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
5311 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
5313 if (IS_ENABLED(CONFIG_PCI_IOV
))
5314 hclge_disable_sriov(hdev
);
5316 if (hdev
->service_timer
.function
)
5317 del_timer_sync(&hdev
->service_timer
);
5318 if (hdev
->service_task
.func
)
5319 cancel_work_sync(&hdev
->service_task
);
5320 if (hdev
->rst_service_task
.func
)
5321 cancel_work_sync(&hdev
->rst_service_task
);
5322 if (hdev
->mbx_service_task
.func
)
5323 cancel_work_sync(&hdev
->mbx_service_task
);
5326 mdiobus_unregister(mac
->mdio_bus
);
5328 /* Disable MISC vector(vector0) */
5329 hclge_enable_vector(&hdev
->misc_vector
, false);
5330 hclge_destroy_cmd_queue(&hdev
->hw
);
5331 hclge_misc_irq_uninit(hdev
);
5332 hclge_pci_uninit(hdev
);
5333 ae_dev
->priv
= NULL
;
5336 static u32
hclge_get_max_channels(struct hnae3_handle
*handle
)
5338 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
5339 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5340 struct hclge_dev
*hdev
= vport
->back
;
5342 return min_t(u32
, hdev
->rss_size_max
* kinfo
->num_tc
, hdev
->num_tqps
);
5345 static void hclge_get_channels(struct hnae3_handle
*handle
,
5346 struct ethtool_channels
*ch
)
5348 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5350 ch
->max_combined
= hclge_get_max_channels(handle
);
5351 ch
->other_count
= 1;
5353 ch
->combined_count
= vport
->alloc_tqps
;
5356 static void hclge_get_tqps_and_rss_info(struct hnae3_handle
*handle
,
5357 u16
*free_tqps
, u16
*max_rss_size
)
5359 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5360 struct hclge_dev
*hdev
= vport
->back
;
5364 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
5365 if (!hdev
->htqp
[i
].alloced
)
5368 *free_tqps
= temp_tqps
;
5369 *max_rss_size
= hdev
->rss_size_max
;
5372 static void hclge_release_tqp(struct hclge_vport
*vport
)
5374 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
5375 struct hclge_dev
*hdev
= vport
->back
;
5378 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
5379 struct hclge_tqp
*tqp
=
5380 container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
5382 tqp
->q
.handle
= NULL
;
5383 tqp
->q
.tqp_index
= 0;
5384 tqp
->alloced
= false;
5387 devm_kfree(&hdev
->pdev
->dev
, kinfo
->tqp
);
5391 static int hclge_set_channels(struct hnae3_handle
*handle
, u32 new_tqps_num
)
5393 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5394 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
5395 struct hclge_dev
*hdev
= vport
->back
;
5396 int cur_rss_size
= kinfo
->rss_size
;
5397 int cur_tqps
= kinfo
->num_tqps
;
5398 u16 tc_offset
[HCLGE_MAX_TC_NUM
];
5399 u16 tc_valid
[HCLGE_MAX_TC_NUM
];
5400 u16 tc_size
[HCLGE_MAX_TC_NUM
];
5405 hclge_release_tqp(vport
);
5407 ret
= hclge_knic_setup(vport
, new_tqps_num
);
5409 dev_err(&hdev
->pdev
->dev
, "setup nic fail, ret =%d\n", ret
);
5413 ret
= hclge_map_tqp_to_vport(hdev
, vport
);
5415 dev_err(&hdev
->pdev
->dev
, "map vport tqp fail, ret =%d\n", ret
);
5419 ret
= hclge_tm_schd_init(hdev
);
5421 dev_err(&hdev
->pdev
->dev
, "tm schd init fail, ret =%d\n", ret
);
5425 roundup_size
= roundup_pow_of_two(kinfo
->rss_size
);
5426 roundup_size
= ilog2(roundup_size
);
5427 /* Set the RSS TC mode according to the new RSS size */
5428 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
5431 if (!(hdev
->hw_tc_map
& BIT(i
)))
5435 tc_size
[i
] = roundup_size
;
5436 tc_offset
[i
] = kinfo
->rss_size
* i
;
5438 ret
= hclge_set_rss_tc_mode(hdev
, tc_valid
, tc_size
, tc_offset
);
5442 /* Reinitializes the rss indirect table according to the new RSS size */
5443 rss_indir
= kcalloc(HCLGE_RSS_IND_TBL_SIZE
, sizeof(u32
), GFP_KERNEL
);
5447 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
5448 rss_indir
[i
] = i
% kinfo
->rss_size
;
5450 ret
= hclge_set_rss(handle
, rss_indir
, NULL
, 0);
5452 dev_err(&hdev
->pdev
->dev
, "set rss indir table fail, ret=%d\n",
5458 dev_info(&hdev
->pdev
->dev
,
5459 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
5460 cur_rss_size
, kinfo
->rss_size
,
5461 cur_tqps
, kinfo
->rss_size
* kinfo
->num_tc
);
5466 static const struct hnae3_ae_ops hclge_ops
= {
5467 .init_ae_dev
= hclge_init_ae_dev
,
5468 .uninit_ae_dev
= hclge_uninit_ae_dev
,
5469 .init_client_instance
= hclge_init_client_instance
,
5470 .uninit_client_instance
= hclge_uninit_client_instance
,
5471 .map_ring_to_vector
= hclge_map_ring_to_vector
,
5472 .unmap_ring_from_vector
= hclge_unmap_ring_frm_vector
,
5473 .get_vector
= hclge_get_vector
,
5474 .set_promisc_mode
= hclge_set_promisc_mode
,
5475 .set_loopback
= hclge_set_loopback
,
5476 .start
= hclge_ae_start
,
5477 .stop
= hclge_ae_stop
,
5478 .get_status
= hclge_get_status
,
5479 .get_ksettings_an_result
= hclge_get_ksettings_an_result
,
5480 .update_speed_duplex_h
= hclge_update_speed_duplex_h
,
5481 .cfg_mac_speed_dup_h
= hclge_cfg_mac_speed_dup_h
,
5482 .get_media_type
= hclge_get_media_type
,
5483 .get_rss_key_size
= hclge_get_rss_key_size
,
5484 .get_rss_indir_size
= hclge_get_rss_indir_size
,
5485 .get_rss
= hclge_get_rss
,
5486 .set_rss
= hclge_set_rss
,
5487 .set_rss_tuple
= hclge_set_rss_tuple
,
5488 .get_rss_tuple
= hclge_get_rss_tuple
,
5489 .get_tc_size
= hclge_get_tc_size
,
5490 .get_mac_addr
= hclge_get_mac_addr
,
5491 .set_mac_addr
= hclge_set_mac_addr
,
5492 .add_uc_addr
= hclge_add_uc_addr
,
5493 .rm_uc_addr
= hclge_rm_uc_addr
,
5494 .add_mc_addr
= hclge_add_mc_addr
,
5495 .rm_mc_addr
= hclge_rm_mc_addr
,
5496 .set_autoneg
= hclge_set_autoneg
,
5497 .get_autoneg
= hclge_get_autoneg
,
5498 .get_pauseparam
= hclge_get_pauseparam
,
5499 .set_pauseparam
= hclge_set_pauseparam
,
5500 .set_mtu
= hclge_set_mtu
,
5501 .reset_queue
= hclge_reset_tqp
,
5502 .get_stats
= hclge_get_stats
,
5503 .update_stats
= hclge_update_stats
,
5504 .get_strings
= hclge_get_strings
,
5505 .get_sset_count
= hclge_get_sset_count
,
5506 .get_fw_version
= hclge_get_fw_version
,
5507 .get_mdix_mode
= hclge_get_mdix_mode
,
5508 .enable_vlan_filter
= hclge_enable_vlan_filter
,
5509 .set_vlan_filter
= hclge_set_port_vlan_filter
,
5510 .set_vf_vlan_filter
= hclge_set_vf_vlan_filter
,
5511 .enable_hw_strip_rxvtag
= hclge_en_hw_strip_rxvtag
,
5512 .reset_event
= hclge_reset_event
,
5513 .get_tqps_and_rss_info
= hclge_get_tqps_and_rss_info
,
5514 .set_channels
= hclge_set_channels
,
5515 .get_channels
= hclge_get_channels
,
5516 .get_flowctrl_adv
= hclge_get_flowctrl_adv
,
5519 static struct hnae3_ae_algo ae_algo
= {
5522 .pdev_id_table
= ae_algo_pci_tbl
,
5525 static int hclge_init(void)
5527 pr_info("%s is initializing\n", HCLGE_NAME
);
5529 return hnae3_register_ae_algo(&ae_algo
);
5532 static void hclge_exit(void)
5534 hnae3_unregister_ae_algo(&ae_algo
);
5536 module_init(hclge_init
);
5537 module_exit(hclge_exit
);
5539 MODULE_LICENSE("GPL");
5540 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
5541 MODULE_DESCRIPTION("HCLGE Driver");
5542 MODULE_VERSION(HCLGE_MOD_VERSION
);