1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <net/rtnetlink.h>
16 #include "hclge_cmd.h"
17 #include "hclge_dcb.h"
18 #include "hclge_main.h"
19 #include "hclge_mbx.h"
20 #include "hclge_mdio.h"
22 #include "hclge_err.h"
25 #define HCLGE_NAME "hclge"
26 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
27 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 #define HCLGE_BUF_SIZE_UNIT 256
31 static int hclge_set_mac_mtu(struct hclge_dev
*hdev
, int new_mps
);
32 static int hclge_init_vlan_config(struct hclge_dev
*hdev
);
33 static int hclge_reset_ae_dev(struct hnae3_ae_dev
*ae_dev
);
34 static int hclge_set_umv_space(struct hclge_dev
*hdev
, u16 space_size
,
35 u16
*allocated_size
, bool is_alloc
);
37 static struct hnae3_ae_algo ae_algo
;
39 static const struct pci_device_id ae_algo_pci_tbl
[] = {
40 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
41 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
42 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
), 0},
43 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
), 0},
44 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
), 0},
45 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
), 0},
46 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
), 0},
47 /* required last entry */
51 MODULE_DEVICE_TABLE(pci
, ae_algo_pci_tbl
);
53 static const u32 cmdq_reg_addr_list
[] = {HCLGE_CMDQ_TX_ADDR_L_REG
,
54 HCLGE_CMDQ_TX_ADDR_H_REG
,
55 HCLGE_CMDQ_TX_DEPTH_REG
,
56 HCLGE_CMDQ_TX_TAIL_REG
,
57 HCLGE_CMDQ_TX_HEAD_REG
,
58 HCLGE_CMDQ_RX_ADDR_L_REG
,
59 HCLGE_CMDQ_RX_ADDR_H_REG
,
60 HCLGE_CMDQ_RX_DEPTH_REG
,
61 HCLGE_CMDQ_RX_TAIL_REG
,
62 HCLGE_CMDQ_RX_HEAD_REG
,
63 HCLGE_VECTOR0_CMDQ_SRC_REG
,
64 HCLGE_CMDQ_INTR_STS_REG
,
65 HCLGE_CMDQ_INTR_EN_REG
,
66 HCLGE_CMDQ_INTR_GEN_REG
};
68 static const u32 common_reg_addr_list
[] = {HCLGE_MISC_VECTOR_REG_BASE
,
69 HCLGE_VECTOR0_OTER_EN_REG
,
70 HCLGE_MISC_RESET_STS_REG
,
71 HCLGE_MISC_VECTOR_INT_STS
,
72 HCLGE_GLOBAL_RESET_REG
,
76 static const u32 ring_reg_addr_list
[] = {HCLGE_RING_RX_ADDR_L_REG
,
77 HCLGE_RING_RX_ADDR_H_REG
,
78 HCLGE_RING_RX_BD_NUM_REG
,
79 HCLGE_RING_RX_BD_LENGTH_REG
,
80 HCLGE_RING_RX_MERGE_EN_REG
,
81 HCLGE_RING_RX_TAIL_REG
,
82 HCLGE_RING_RX_HEAD_REG
,
83 HCLGE_RING_RX_FBD_NUM_REG
,
84 HCLGE_RING_RX_OFFSET_REG
,
85 HCLGE_RING_RX_FBD_OFFSET_REG
,
86 HCLGE_RING_RX_STASH_REG
,
87 HCLGE_RING_RX_BD_ERR_REG
,
88 HCLGE_RING_TX_ADDR_L_REG
,
89 HCLGE_RING_TX_ADDR_H_REG
,
90 HCLGE_RING_TX_BD_NUM_REG
,
91 HCLGE_RING_TX_PRIORITY_REG
,
93 HCLGE_RING_TX_MERGE_EN_REG
,
94 HCLGE_RING_TX_TAIL_REG
,
95 HCLGE_RING_TX_HEAD_REG
,
96 HCLGE_RING_TX_FBD_NUM_REG
,
97 HCLGE_RING_TX_OFFSET_REG
,
98 HCLGE_RING_TX_EBD_NUM_REG
,
99 HCLGE_RING_TX_EBD_OFFSET_REG
,
100 HCLGE_RING_TX_BD_ERR_REG
,
103 static const u32 tqp_intr_reg_addr_list
[] = {HCLGE_TQP_INTR_CTRL_REG
,
104 HCLGE_TQP_INTR_GL0_REG
,
105 HCLGE_TQP_INTR_GL1_REG
,
106 HCLGE_TQP_INTR_GL2_REG
,
107 HCLGE_TQP_INTR_RL_REG
};
109 static const char hns3_nic_test_strs
[][ETH_GSTRING_LEN
] = {
111 "Serdes serial Loopback test",
112 "Serdes parallel Loopback test",
116 static const struct hclge_comm_stats_str g_mac_stats_string
[] = {
117 {"mac_tx_mac_pause_num",
118 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num
)},
119 {"mac_rx_mac_pause_num",
120 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num
)},
121 {"mac_tx_pfc_pri0_pkt_num",
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num
)},
123 {"mac_tx_pfc_pri1_pkt_num",
124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num
)},
125 {"mac_tx_pfc_pri2_pkt_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num
)},
127 {"mac_tx_pfc_pri3_pkt_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num
)},
129 {"mac_tx_pfc_pri4_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num
)},
131 {"mac_tx_pfc_pri5_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num
)},
133 {"mac_tx_pfc_pri6_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num
)},
135 {"mac_tx_pfc_pri7_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num
)},
137 {"mac_rx_pfc_pri0_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num
)},
139 {"mac_rx_pfc_pri1_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num
)},
141 {"mac_rx_pfc_pri2_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num
)},
143 {"mac_rx_pfc_pri3_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num
)},
145 {"mac_rx_pfc_pri4_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num
)},
147 {"mac_rx_pfc_pri5_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num
)},
149 {"mac_rx_pfc_pri6_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num
)},
151 {"mac_rx_pfc_pri7_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num
)},
153 {"mac_tx_total_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num
)},
155 {"mac_tx_total_oct_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num
)},
157 {"mac_tx_good_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num
)},
159 {"mac_tx_bad_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num
)},
161 {"mac_tx_good_oct_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num
)},
163 {"mac_tx_bad_oct_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num
)},
165 {"mac_tx_uni_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num
)},
167 {"mac_tx_multi_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num
)},
169 {"mac_tx_broad_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num
)},
171 {"mac_tx_undersize_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num
)},
173 {"mac_tx_oversize_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num
)},
175 {"mac_tx_64_oct_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num
)},
177 {"mac_tx_65_127_oct_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num
)},
179 {"mac_tx_128_255_oct_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num
)},
181 {"mac_tx_256_511_oct_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num
)},
183 {"mac_tx_512_1023_oct_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num
)},
185 {"mac_tx_1024_1518_oct_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num
)},
187 {"mac_tx_1519_2047_oct_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num
)},
189 {"mac_tx_2048_4095_oct_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num
)},
191 {"mac_tx_4096_8191_oct_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num
)},
193 {"mac_tx_8192_9216_oct_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num
)},
195 {"mac_tx_9217_12287_oct_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num
)},
197 {"mac_tx_12288_16383_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num
)},
199 {"mac_tx_1519_max_good_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num
)},
201 {"mac_tx_1519_max_bad_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num
)},
203 {"mac_rx_total_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num
)},
205 {"mac_rx_total_oct_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num
)},
207 {"mac_rx_good_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num
)},
209 {"mac_rx_bad_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num
)},
211 {"mac_rx_good_oct_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num
)},
213 {"mac_rx_bad_oct_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num
)},
215 {"mac_rx_uni_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num
)},
217 {"mac_rx_multi_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num
)},
219 {"mac_rx_broad_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num
)},
221 {"mac_rx_undersize_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num
)},
223 {"mac_rx_oversize_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num
)},
225 {"mac_rx_64_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num
)},
227 {"mac_rx_65_127_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num
)},
229 {"mac_rx_128_255_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num
)},
231 {"mac_rx_256_511_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num
)},
233 {"mac_rx_512_1023_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num
)},
235 {"mac_rx_1024_1518_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num
)},
237 {"mac_rx_1519_2047_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num
)},
239 {"mac_rx_2048_4095_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num
)},
241 {"mac_rx_4096_8191_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num
)},
243 {"mac_rx_8192_9216_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num
)},
245 {"mac_rx_9217_12287_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num
)},
247 {"mac_rx_12288_16383_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num
)},
249 {"mac_rx_1519_max_good_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num
)},
251 {"mac_rx_1519_max_bad_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num
)},
254 {"mac_tx_fragment_pkt_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num
)},
256 {"mac_tx_undermin_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num
)},
258 {"mac_tx_jabber_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num
)},
260 {"mac_tx_err_all_pkt_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num
)},
262 {"mac_tx_from_app_good_pkt_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num
)},
264 {"mac_tx_from_app_bad_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num
)},
266 {"mac_rx_fragment_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num
)},
268 {"mac_rx_undermin_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num
)},
270 {"mac_rx_jabber_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num
)},
272 {"mac_rx_fcs_err_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num
)},
274 {"mac_rx_send_app_good_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num
)},
276 {"mac_rx_send_app_bad_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num
)}
280 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table
[] = {
282 .flags
= HCLGE_MAC_MGR_MASK_VLAN_B
,
283 .ethter_type
= cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP
),
284 .mac_addr_hi32
= cpu_to_le32(htonl(0x0180C200)),
285 .mac_addr_lo16
= cpu_to_le16(htons(0x000E)),
286 .i_port_bitmap
= 0x1,
290 static int hclge_mac_update_stats(struct hclge_dev
*hdev
)
292 #define HCLGE_MAC_CMD_NUM 21
293 #define HCLGE_RTN_DATA_NUM 4
295 u64
*data
= (u64
*)(&hdev
->hw_stats
.mac_stats
);
296 struct hclge_desc desc
[HCLGE_MAC_CMD_NUM
];
301 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_MAC
, true);
302 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_MAC_CMD_NUM
);
304 dev_err(&hdev
->pdev
->dev
,
305 "Get MAC pkt stats fail, status = %d.\n", ret
);
310 for (i
= 0; i
< HCLGE_MAC_CMD_NUM
; i
++) {
311 if (unlikely(i
== 0)) {
312 desc_data
= (__le64
*)(&desc
[i
].data
[0]);
313 n
= HCLGE_RTN_DATA_NUM
- 2;
315 desc_data
= (__le64
*)(&desc
[i
]);
316 n
= HCLGE_RTN_DATA_NUM
;
318 for (k
= 0; k
< n
; k
++) {
319 *data
++ += le64_to_cpu(*desc_data
);
327 static int hclge_tqps_update_stats(struct hnae3_handle
*handle
)
329 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
330 struct hclge_vport
*vport
= hclge_get_vport(handle
);
331 struct hclge_dev
*hdev
= vport
->back
;
332 struct hnae3_queue
*queue
;
333 struct hclge_desc desc
[1];
334 struct hclge_tqp
*tqp
;
337 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
338 queue
= handle
->kinfo
.tqp
[i
];
339 tqp
= container_of(queue
, struct hclge_tqp
, q
);
340 /* command : HCLGE_OPC_QUERY_IGU_STAT */
341 hclge_cmd_setup_basic_desc(&desc
[0],
342 HCLGE_OPC_QUERY_RX_STATUS
,
345 desc
[0].data
[0] = cpu_to_le32((tqp
->index
& 0x1ff));
346 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
348 dev_err(&hdev
->pdev
->dev
,
349 "Query tqp stat fail, status = %d,queue = %d\n",
353 tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
+=
354 le32_to_cpu(desc
[0].data
[1]);
357 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
358 queue
= handle
->kinfo
.tqp
[i
];
359 tqp
= container_of(queue
, struct hclge_tqp
, q
);
360 /* command : HCLGE_OPC_QUERY_IGU_STAT */
361 hclge_cmd_setup_basic_desc(&desc
[0],
362 HCLGE_OPC_QUERY_TX_STATUS
,
365 desc
[0].data
[0] = cpu_to_le32((tqp
->index
& 0x1ff));
366 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
368 dev_err(&hdev
->pdev
->dev
,
369 "Query tqp stat fail, status = %d,queue = %d\n",
373 tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
+=
374 le32_to_cpu(desc
[0].data
[1]);
380 static u64
*hclge_tqps_get_stats(struct hnae3_handle
*handle
, u64
*data
)
382 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
383 struct hclge_tqp
*tqp
;
387 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
388 tqp
= container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
389 *buff
++ = tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
;
392 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
393 tqp
= container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
394 *buff
++ = tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
;
400 static int hclge_tqps_get_sset_count(struct hnae3_handle
*handle
, int stringset
)
402 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
404 return kinfo
->num_tqps
* (2);
407 static u8
*hclge_tqps_get_strings(struct hnae3_handle
*handle
, u8
*data
)
409 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
413 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
414 struct hclge_tqp
*tqp
= container_of(handle
->kinfo
.tqp
[i
],
415 struct hclge_tqp
, q
);
416 snprintf(buff
, ETH_GSTRING_LEN
, "txq%d_pktnum_rcd",
418 buff
= buff
+ ETH_GSTRING_LEN
;
421 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
422 struct hclge_tqp
*tqp
= container_of(kinfo
->tqp
[i
],
423 struct hclge_tqp
, q
);
424 snprintf(buff
, ETH_GSTRING_LEN
, "rxq%d_pktnum_rcd",
426 buff
= buff
+ ETH_GSTRING_LEN
;
432 static u64
*hclge_comm_get_stats(void *comm_stats
,
433 const struct hclge_comm_stats_str strs
[],
439 for (i
= 0; i
< size
; i
++)
440 buf
[i
] = HCLGE_STATS_READ(comm_stats
, strs
[i
].offset
);
445 static u8
*hclge_comm_get_strings(u32 stringset
,
446 const struct hclge_comm_stats_str strs
[],
449 char *buff
= (char *)data
;
452 if (stringset
!= ETH_SS_STATS
)
455 for (i
= 0; i
< size
; i
++) {
456 snprintf(buff
, ETH_GSTRING_LEN
,
458 buff
= buff
+ ETH_GSTRING_LEN
;
464 static void hclge_update_netstat(struct hclge_hw_stats
*hw_stats
,
465 struct net_device_stats
*net_stats
)
467 net_stats
->tx_dropped
= 0;
468 net_stats
->rx_errors
= hw_stats
->mac_stats
.mac_rx_oversize_pkt_num
;
469 net_stats
->rx_errors
+= hw_stats
->mac_stats
.mac_rx_undersize_pkt_num
;
470 net_stats
->rx_errors
+= hw_stats
->mac_stats
.mac_rx_fcs_err_pkt_num
;
472 net_stats
->multicast
= hw_stats
->mac_stats
.mac_tx_multi_pkt_num
;
473 net_stats
->multicast
+= hw_stats
->mac_stats
.mac_rx_multi_pkt_num
;
475 net_stats
->rx_crc_errors
= hw_stats
->mac_stats
.mac_rx_fcs_err_pkt_num
;
476 net_stats
->rx_length_errors
=
477 hw_stats
->mac_stats
.mac_rx_undersize_pkt_num
;
478 net_stats
->rx_length_errors
+=
479 hw_stats
->mac_stats
.mac_rx_oversize_pkt_num
;
480 net_stats
->rx_over_errors
=
481 hw_stats
->mac_stats
.mac_rx_oversize_pkt_num
;
484 static void hclge_update_stats_for_all(struct hclge_dev
*hdev
)
486 struct hnae3_handle
*handle
;
489 handle
= &hdev
->vport
[0].nic
;
490 if (handle
->client
) {
491 status
= hclge_tqps_update_stats(handle
);
493 dev_err(&hdev
->pdev
->dev
,
494 "Update TQPS stats fail, status = %d.\n",
499 status
= hclge_mac_update_stats(hdev
);
501 dev_err(&hdev
->pdev
->dev
,
502 "Update MAC stats fail, status = %d.\n", status
);
504 hclge_update_netstat(&hdev
->hw_stats
, &handle
->kinfo
.netdev
->stats
);
507 static void hclge_update_stats(struct hnae3_handle
*handle
,
508 struct net_device_stats
*net_stats
)
510 struct hclge_vport
*vport
= hclge_get_vport(handle
);
511 struct hclge_dev
*hdev
= vport
->back
;
512 struct hclge_hw_stats
*hw_stats
= &hdev
->hw_stats
;
515 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING
, &hdev
->state
))
518 status
= hclge_mac_update_stats(hdev
);
520 dev_err(&hdev
->pdev
->dev
,
521 "Update MAC stats fail, status = %d.\n",
524 status
= hclge_tqps_update_stats(handle
);
526 dev_err(&hdev
->pdev
->dev
,
527 "Update TQPS stats fail, status = %d.\n",
530 hclge_update_netstat(hw_stats
, net_stats
);
532 clear_bit(HCLGE_STATE_STATISTICS_UPDATING
, &hdev
->state
);
535 static int hclge_get_sset_count(struct hnae3_handle
*handle
, int stringset
)
537 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
538 HNAE3_SUPPORT_PHY_LOOPBACK |\
539 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
540 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
542 struct hclge_vport
*vport
= hclge_get_vport(handle
);
543 struct hclge_dev
*hdev
= vport
->back
;
546 /* Loopback test support rules:
547 * mac: only GE mode support
548 * serdes: all mac mode will support include GE/XGE/LGE/CGE
549 * phy: only support when phy device exist on board
551 if (stringset
== ETH_SS_TEST
) {
552 /* clear loopback bit flags at first */
553 handle
->flags
= (handle
->flags
& (~HCLGE_LOOPBACK_TEST_FLAGS
));
554 if (hdev
->pdev
->revision
>= 0x21 ||
555 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_10M
||
556 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_100M
||
557 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_1G
) {
559 handle
->flags
|= HNAE3_SUPPORT_APP_LOOPBACK
;
563 handle
->flags
|= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK
;
564 handle
->flags
|= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK
;
565 } else if (stringset
== ETH_SS_STATS
) {
566 count
= ARRAY_SIZE(g_mac_stats_string
) +
567 hclge_tqps_get_sset_count(handle
, stringset
);
573 static void hclge_get_strings(struct hnae3_handle
*handle
,
577 u8
*p
= (char *)data
;
580 if (stringset
== ETH_SS_STATS
) {
581 size
= ARRAY_SIZE(g_mac_stats_string
);
582 p
= hclge_comm_get_strings(stringset
,
586 p
= hclge_tqps_get_strings(handle
, p
);
587 } else if (stringset
== ETH_SS_TEST
) {
588 if (handle
->flags
& HNAE3_SUPPORT_APP_LOOPBACK
) {
590 hns3_nic_test_strs
[HNAE3_LOOP_APP
],
592 p
+= ETH_GSTRING_LEN
;
594 if (handle
->flags
& HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK
) {
596 hns3_nic_test_strs
[HNAE3_LOOP_SERIAL_SERDES
],
598 p
+= ETH_GSTRING_LEN
;
600 if (handle
->flags
& HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK
) {
602 hns3_nic_test_strs
[HNAE3_LOOP_PARALLEL_SERDES
],
604 p
+= ETH_GSTRING_LEN
;
606 if (handle
->flags
& HNAE3_SUPPORT_PHY_LOOPBACK
) {
608 hns3_nic_test_strs
[HNAE3_LOOP_PHY
],
610 p
+= ETH_GSTRING_LEN
;
615 static void hclge_get_stats(struct hnae3_handle
*handle
, u64
*data
)
617 struct hclge_vport
*vport
= hclge_get_vport(handle
);
618 struct hclge_dev
*hdev
= vport
->back
;
621 p
= hclge_comm_get_stats(&hdev
->hw_stats
.mac_stats
,
623 ARRAY_SIZE(g_mac_stats_string
),
625 p
= hclge_tqps_get_stats(handle
, p
);
628 static int hclge_parse_func_status(struct hclge_dev
*hdev
,
629 struct hclge_func_status_cmd
*status
)
631 if (!(status
->pf_state
& HCLGE_PF_STATE_DONE
))
634 /* Set the pf to main pf */
635 if (status
->pf_state
& HCLGE_PF_STATE_MAIN
)
636 hdev
->flag
|= HCLGE_FLAG_MAIN
;
638 hdev
->flag
&= ~HCLGE_FLAG_MAIN
;
643 static int hclge_query_function_status(struct hclge_dev
*hdev
)
645 struct hclge_func_status_cmd
*req
;
646 struct hclge_desc desc
;
650 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_FUNC_STATUS
, true);
651 req
= (struct hclge_func_status_cmd
*)desc
.data
;
654 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
656 dev_err(&hdev
->pdev
->dev
,
657 "query function status failed %d.\n",
663 /* Check pf reset is done */
666 usleep_range(1000, 2000);
667 } while (timeout
++ < 5);
669 ret
= hclge_parse_func_status(hdev
, req
);
674 static int hclge_query_pf_resource(struct hclge_dev
*hdev
)
676 struct hclge_pf_res_cmd
*req
;
677 struct hclge_desc desc
;
680 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_PF_RSRC
, true);
681 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
683 dev_err(&hdev
->pdev
->dev
,
684 "query pf resource failed %d.\n", ret
);
688 req
= (struct hclge_pf_res_cmd
*)desc
.data
;
689 hdev
->num_tqps
= __le16_to_cpu(req
->tqp_num
);
690 hdev
->pkt_buf_size
= __le16_to_cpu(req
->buf_size
) << HCLGE_BUF_UNIT_S
;
692 if (req
->tx_buf_size
)
694 __le16_to_cpu(req
->tx_buf_size
) << HCLGE_BUF_UNIT_S
;
696 hdev
->tx_buf_size
= HCLGE_DEFAULT_TX_BUF
;
698 hdev
->tx_buf_size
= roundup(hdev
->tx_buf_size
, HCLGE_BUF_SIZE_UNIT
);
700 if (req
->dv_buf_size
)
702 __le16_to_cpu(req
->dv_buf_size
) << HCLGE_BUF_UNIT_S
;
704 hdev
->dv_buf_size
= HCLGE_DEFAULT_DV
;
706 hdev
->dv_buf_size
= roundup(hdev
->dv_buf_size
, HCLGE_BUF_SIZE_UNIT
);
708 if (hnae3_dev_roce_supported(hdev
)) {
709 hdev
->roce_base_msix_offset
=
710 hnae3_get_field(__le16_to_cpu(req
->msixcap_localid_ba_rocee
),
711 HCLGE_MSIX_OFT_ROCEE_M
, HCLGE_MSIX_OFT_ROCEE_S
);
713 hnae3_get_field(__le16_to_cpu(req
->pf_intr_vector_number
),
714 HCLGE_PF_VEC_NUM_M
, HCLGE_PF_VEC_NUM_S
);
716 /* PF should have NIC vectors and Roce vectors,
717 * NIC vectors are queued before Roce vectors.
719 hdev
->num_msi
= hdev
->num_roce_msi
+
720 hdev
->roce_base_msix_offset
;
723 hnae3_get_field(__le16_to_cpu(req
->pf_intr_vector_number
),
724 HCLGE_PF_VEC_NUM_M
, HCLGE_PF_VEC_NUM_S
);
730 static int hclge_parse_speed(int speed_cmd
, int *speed
)
734 *speed
= HCLGE_MAC_SPEED_10M
;
737 *speed
= HCLGE_MAC_SPEED_100M
;
740 *speed
= HCLGE_MAC_SPEED_1G
;
743 *speed
= HCLGE_MAC_SPEED_10G
;
746 *speed
= HCLGE_MAC_SPEED_25G
;
749 *speed
= HCLGE_MAC_SPEED_40G
;
752 *speed
= HCLGE_MAC_SPEED_50G
;
755 *speed
= HCLGE_MAC_SPEED_100G
;
764 static void hclge_parse_fiber_link_mode(struct hclge_dev
*hdev
,
767 unsigned long *supported
= hdev
->hw
.mac
.supported
;
769 if (speed_ability
& HCLGE_SUPPORT_1G_BIT
)
770 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT
,
773 if (speed_ability
& HCLGE_SUPPORT_10G_BIT
)
774 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT
,
777 if (speed_ability
& HCLGE_SUPPORT_25G_BIT
)
778 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
781 if (speed_ability
& HCLGE_SUPPORT_50G_BIT
)
782 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT
,
785 if (speed_ability
& HCLGE_SUPPORT_100G_BIT
)
786 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
,
789 set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT
, supported
);
790 set_bit(ETHTOOL_LINK_MODE_Pause_BIT
, supported
);
793 static void hclge_parse_link_mode(struct hclge_dev
*hdev
, u8 speed_ability
)
795 u8 media_type
= hdev
->hw
.mac
.media_type
;
797 if (media_type
!= HNAE3_MEDIA_TYPE_FIBER
)
800 hclge_parse_fiber_link_mode(hdev
, speed_ability
);
803 static void hclge_parse_cfg(struct hclge_cfg
*cfg
, struct hclge_desc
*desc
)
805 struct hclge_cfg_param_cmd
*req
;
806 u64 mac_addr_tmp_high
;
810 req
= (struct hclge_cfg_param_cmd
*)desc
[0].data
;
812 /* get the configuration */
813 cfg
->vmdq_vport_num
= hnae3_get_field(__le32_to_cpu(req
->param
[0]),
816 cfg
->tc_num
= hnae3_get_field(__le32_to_cpu(req
->param
[0]),
817 HCLGE_CFG_TC_NUM_M
, HCLGE_CFG_TC_NUM_S
);
818 cfg
->tqp_desc_num
= hnae3_get_field(__le32_to_cpu(req
->param
[0]),
819 HCLGE_CFG_TQP_DESC_N_M
,
820 HCLGE_CFG_TQP_DESC_N_S
);
822 cfg
->phy_addr
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
823 HCLGE_CFG_PHY_ADDR_M
,
824 HCLGE_CFG_PHY_ADDR_S
);
825 cfg
->media_type
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
826 HCLGE_CFG_MEDIA_TP_M
,
827 HCLGE_CFG_MEDIA_TP_S
);
828 cfg
->rx_buf_len
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
829 HCLGE_CFG_RX_BUF_LEN_M
,
830 HCLGE_CFG_RX_BUF_LEN_S
);
831 /* get mac_address */
832 mac_addr_tmp
= __le32_to_cpu(req
->param
[2]);
833 mac_addr_tmp_high
= hnae3_get_field(__le32_to_cpu(req
->param
[3]),
834 HCLGE_CFG_MAC_ADDR_H_M
,
835 HCLGE_CFG_MAC_ADDR_H_S
);
837 mac_addr_tmp
|= (mac_addr_tmp_high
<< 31) << 1;
839 cfg
->default_speed
= hnae3_get_field(__le32_to_cpu(req
->param
[3]),
840 HCLGE_CFG_DEFAULT_SPEED_M
,
841 HCLGE_CFG_DEFAULT_SPEED_S
);
842 cfg
->rss_size_max
= hnae3_get_field(__le32_to_cpu(req
->param
[3]),
843 HCLGE_CFG_RSS_SIZE_M
,
844 HCLGE_CFG_RSS_SIZE_S
);
846 for (i
= 0; i
< ETH_ALEN
; i
++)
847 cfg
->mac_addr
[i
] = (mac_addr_tmp
>> (8 * i
)) & 0xff;
849 req
= (struct hclge_cfg_param_cmd
*)desc
[1].data
;
850 cfg
->numa_node_map
= __le32_to_cpu(req
->param
[0]);
852 cfg
->speed_ability
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
853 HCLGE_CFG_SPEED_ABILITY_M
,
854 HCLGE_CFG_SPEED_ABILITY_S
);
855 cfg
->umv_space
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
856 HCLGE_CFG_UMV_TBL_SPACE_M
,
857 HCLGE_CFG_UMV_TBL_SPACE_S
);
859 cfg
->umv_space
= HCLGE_DEFAULT_UMV_SPACE_PER_PF
;
862 /* hclge_get_cfg: query the static parameter from flash
863 * @hdev: pointer to struct hclge_dev
864 * @hcfg: the config structure to be getted
866 static int hclge_get_cfg(struct hclge_dev
*hdev
, struct hclge_cfg
*hcfg
)
868 struct hclge_desc desc
[HCLGE_PF_CFG_DESC_NUM
];
869 struct hclge_cfg_param_cmd
*req
;
872 for (i
= 0; i
< HCLGE_PF_CFG_DESC_NUM
; i
++) {
875 req
= (struct hclge_cfg_param_cmd
*)desc
[i
].data
;
876 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_GET_CFG_PARAM
,
878 hnae3_set_field(offset
, HCLGE_CFG_OFFSET_M
,
879 HCLGE_CFG_OFFSET_S
, i
* HCLGE_CFG_RD_LEN_BYTES
);
880 /* Len should be united by 4 bytes when send to hardware */
881 hnae3_set_field(offset
, HCLGE_CFG_RD_LEN_M
, HCLGE_CFG_RD_LEN_S
,
882 HCLGE_CFG_RD_LEN_BYTES
/ HCLGE_CFG_RD_LEN_UNIT
);
883 req
->offset
= cpu_to_le32(offset
);
886 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_PF_CFG_DESC_NUM
);
888 dev_err(&hdev
->pdev
->dev
, "get config failed %d.\n", ret
);
892 hclge_parse_cfg(hcfg
, desc
);
897 static int hclge_get_cap(struct hclge_dev
*hdev
)
901 ret
= hclge_query_function_status(hdev
);
903 dev_err(&hdev
->pdev
->dev
,
904 "query function status error %d.\n", ret
);
908 /* get pf resource */
909 ret
= hclge_query_pf_resource(hdev
);
911 dev_err(&hdev
->pdev
->dev
, "query pf resource error %d.\n", ret
);
916 static int hclge_configure(struct hclge_dev
*hdev
)
918 struct hclge_cfg cfg
;
921 ret
= hclge_get_cfg(hdev
, &cfg
);
923 dev_err(&hdev
->pdev
->dev
, "get mac mode error %d.\n", ret
);
927 hdev
->num_vmdq_vport
= cfg
.vmdq_vport_num
;
928 hdev
->base_tqp_pid
= 0;
929 hdev
->rss_size_max
= cfg
.rss_size_max
;
930 hdev
->rx_buf_len
= cfg
.rx_buf_len
;
931 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, cfg
.mac_addr
);
932 hdev
->hw
.mac
.media_type
= cfg
.media_type
;
933 hdev
->hw
.mac
.phy_addr
= cfg
.phy_addr
;
934 hdev
->num_desc
= cfg
.tqp_desc_num
;
935 hdev
->tm_info
.num_pg
= 1;
936 hdev
->tc_max
= cfg
.tc_num
;
937 hdev
->tm_info
.hw_pfc_map
= 0;
938 hdev
->wanted_umv_size
= cfg
.umv_space
;
940 ret
= hclge_parse_speed(cfg
.default_speed
, &hdev
->hw
.mac
.speed
);
942 dev_err(&hdev
->pdev
->dev
, "Get wrong speed ret=%d.\n", ret
);
946 hclge_parse_link_mode(hdev
, cfg
.speed_ability
);
948 if ((hdev
->tc_max
> HNAE3_MAX_TC
) ||
949 (hdev
->tc_max
< 1)) {
950 dev_warn(&hdev
->pdev
->dev
, "TC num = %d.\n",
955 /* Dev does not support DCB */
956 if (!hnae3_dev_dcb_supported(hdev
)) {
960 hdev
->pfc_max
= hdev
->tc_max
;
963 hdev
->tm_info
.num_tc
= 1;
965 /* Currently not support uncontiuous tc */
966 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++)
967 hnae3_set_bit(hdev
->hw_tc_map
, i
, 1);
969 hdev
->tx_sch_mode
= HCLGE_FLAG_TC_BASE_SCH_MODE
;
974 static int hclge_config_tso(struct hclge_dev
*hdev
, int tso_mss_min
,
977 struct hclge_cfg_tso_status_cmd
*req
;
978 struct hclge_desc desc
;
981 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TSO_GENERIC_CONFIG
, false);
983 req
= (struct hclge_cfg_tso_status_cmd
*)desc
.data
;
986 hnae3_set_field(tso_mss
, HCLGE_TSO_MSS_MIN_M
,
987 HCLGE_TSO_MSS_MIN_S
, tso_mss_min
);
988 req
->tso_mss_min
= cpu_to_le16(tso_mss
);
991 hnae3_set_field(tso_mss
, HCLGE_TSO_MSS_MIN_M
,
992 HCLGE_TSO_MSS_MIN_S
, tso_mss_max
);
993 req
->tso_mss_max
= cpu_to_le16(tso_mss
);
995 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
998 static int hclge_config_gro(struct hclge_dev
*hdev
, bool en
)
1000 struct hclge_cfg_gro_status_cmd
*req
;
1001 struct hclge_desc desc
;
1004 if (!hnae3_dev_gro_supported(hdev
))
1007 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_GRO_GENERIC_CONFIG
, false);
1008 req
= (struct hclge_cfg_gro_status_cmd
*)desc
.data
;
1010 req
->gro_en
= cpu_to_le16(en
? 1 : 0);
1012 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1014 dev_err(&hdev
->pdev
->dev
,
1015 "GRO hardware config cmd failed, ret = %d\n", ret
);
1020 static int hclge_alloc_tqps(struct hclge_dev
*hdev
)
1022 struct hclge_tqp
*tqp
;
1025 hdev
->htqp
= devm_kcalloc(&hdev
->pdev
->dev
, hdev
->num_tqps
,
1026 sizeof(struct hclge_tqp
), GFP_KERNEL
);
1032 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
1033 tqp
->dev
= &hdev
->pdev
->dev
;
1036 tqp
->q
.ae_algo
= &ae_algo
;
1037 tqp
->q
.buf_size
= hdev
->rx_buf_len
;
1038 tqp
->q
.desc_num
= hdev
->num_desc
;
1039 tqp
->q
.io_base
= hdev
->hw
.io_base
+ HCLGE_TQP_REG_OFFSET
+
1040 i
* HCLGE_TQP_REG_SIZE
;
1048 static int hclge_map_tqps_to_func(struct hclge_dev
*hdev
, u16 func_id
,
1049 u16 tqp_pid
, u16 tqp_vid
, bool is_pf
)
1051 struct hclge_tqp_map_cmd
*req
;
1052 struct hclge_desc desc
;
1055 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_SET_TQP_MAP
, false);
1057 req
= (struct hclge_tqp_map_cmd
*)desc
.data
;
1058 req
->tqp_id
= cpu_to_le16(tqp_pid
);
1059 req
->tqp_vf
= func_id
;
1060 req
->tqp_flag
= !is_pf
<< HCLGE_TQP_MAP_TYPE_B
|
1061 1 << HCLGE_TQP_MAP_EN_B
;
1062 req
->tqp_vid
= cpu_to_le16(tqp_vid
);
1064 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1066 dev_err(&hdev
->pdev
->dev
, "TQP map failed %d.\n", ret
);
1071 static int hclge_assign_tqp(struct hclge_vport
*vport
)
1073 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
1074 struct hclge_dev
*hdev
= vport
->back
;
1077 for (i
= 0, alloced
= 0; i
< hdev
->num_tqps
&&
1078 alloced
< kinfo
->num_tqps
; i
++) {
1079 if (!hdev
->htqp
[i
].alloced
) {
1080 hdev
->htqp
[i
].q
.handle
= &vport
->nic
;
1081 hdev
->htqp
[i
].q
.tqp_index
= alloced
;
1082 hdev
->htqp
[i
].q
.desc_num
= kinfo
->num_desc
;
1083 kinfo
->tqp
[alloced
] = &hdev
->htqp
[i
].q
;
1084 hdev
->htqp
[i
].alloced
= true;
1088 vport
->alloc_tqps
= kinfo
->num_tqps
;
1093 static int hclge_knic_setup(struct hclge_vport
*vport
,
1094 u16 num_tqps
, u16 num_desc
)
1096 struct hnae3_handle
*nic
= &vport
->nic
;
1097 struct hnae3_knic_private_info
*kinfo
= &nic
->kinfo
;
1098 struct hclge_dev
*hdev
= vport
->back
;
1101 kinfo
->num_desc
= num_desc
;
1102 kinfo
->rx_buf_len
= hdev
->rx_buf_len
;
1103 kinfo
->num_tc
= min_t(u16
, num_tqps
, hdev
->tm_info
.num_tc
);
1105 = min_t(u16
, hdev
->rss_size_max
, num_tqps
/ kinfo
->num_tc
);
1106 kinfo
->num_tqps
= kinfo
->rss_size
* kinfo
->num_tc
;
1108 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
1109 if (hdev
->hw_tc_map
& BIT(i
)) {
1110 kinfo
->tc_info
[i
].enable
= true;
1111 kinfo
->tc_info
[i
].tqp_offset
= i
* kinfo
->rss_size
;
1112 kinfo
->tc_info
[i
].tqp_count
= kinfo
->rss_size
;
1113 kinfo
->tc_info
[i
].tc
= i
;
1115 /* Set to default queue if TC is disable */
1116 kinfo
->tc_info
[i
].enable
= false;
1117 kinfo
->tc_info
[i
].tqp_offset
= 0;
1118 kinfo
->tc_info
[i
].tqp_count
= 1;
1119 kinfo
->tc_info
[i
].tc
= 0;
1123 kinfo
->tqp
= devm_kcalloc(&hdev
->pdev
->dev
, kinfo
->num_tqps
,
1124 sizeof(struct hnae3_queue
*), GFP_KERNEL
);
1128 ret
= hclge_assign_tqp(vport
);
1130 dev_err(&hdev
->pdev
->dev
, "fail to assign TQPs %d.\n", ret
);
1135 static int hclge_map_tqp_to_vport(struct hclge_dev
*hdev
,
1136 struct hclge_vport
*vport
)
1138 struct hnae3_handle
*nic
= &vport
->nic
;
1139 struct hnae3_knic_private_info
*kinfo
;
1142 kinfo
= &nic
->kinfo
;
1143 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
1144 struct hclge_tqp
*q
=
1145 container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
1149 is_pf
= !(vport
->vport_id
);
1150 ret
= hclge_map_tqps_to_func(hdev
, vport
->vport_id
, q
->index
,
1159 static int hclge_map_tqp(struct hclge_dev
*hdev
)
1161 struct hclge_vport
*vport
= hdev
->vport
;
1164 num_vport
= hdev
->num_vmdq_vport
+ hdev
->num_req_vfs
+ 1;
1165 for (i
= 0; i
< num_vport
; i
++) {
1168 ret
= hclge_map_tqp_to_vport(hdev
, vport
);
1178 static void hclge_unic_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1180 /* this would be initialized later */
1183 static int hclge_vport_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1185 struct hnae3_handle
*nic
= &vport
->nic
;
1186 struct hclge_dev
*hdev
= vport
->back
;
1189 nic
->pdev
= hdev
->pdev
;
1190 nic
->ae_algo
= &ae_algo
;
1191 nic
->numa_node_mask
= hdev
->numa_node_mask
;
1193 if (hdev
->ae_dev
->dev_type
== HNAE3_DEV_KNIC
) {
1194 ret
= hclge_knic_setup(vport
, num_tqps
, hdev
->num_desc
);
1196 dev_err(&hdev
->pdev
->dev
, "knic setup failed %d\n",
1201 hclge_unic_setup(vport
, num_tqps
);
1207 static int hclge_alloc_vport(struct hclge_dev
*hdev
)
1209 struct pci_dev
*pdev
= hdev
->pdev
;
1210 struct hclge_vport
*vport
;
1216 /* We need to alloc a vport for main NIC of PF */
1217 num_vport
= hdev
->num_vmdq_vport
+ hdev
->num_req_vfs
+ 1;
1219 if (hdev
->num_tqps
< num_vport
) {
1220 dev_err(&hdev
->pdev
->dev
, "tqps(%d) is less than vports(%d)",
1221 hdev
->num_tqps
, num_vport
);
1225 /* Alloc the same number of TQPs for every vport */
1226 tqp_per_vport
= hdev
->num_tqps
/ num_vport
;
1227 tqp_main_vport
= tqp_per_vport
+ hdev
->num_tqps
% num_vport
;
1229 vport
= devm_kcalloc(&pdev
->dev
, num_vport
, sizeof(struct hclge_vport
),
1234 hdev
->vport
= vport
;
1235 hdev
->num_alloc_vport
= num_vport
;
1237 if (IS_ENABLED(CONFIG_PCI_IOV
))
1238 hdev
->num_alloc_vfs
= hdev
->num_req_vfs
;
1240 for (i
= 0; i
< num_vport
; i
++) {
1242 vport
->vport_id
= i
;
1243 vport
->mps
= HCLGE_MAC_DEFAULT_FRAME
;
1246 ret
= hclge_vport_setup(vport
, tqp_main_vport
);
1248 ret
= hclge_vport_setup(vport
, tqp_per_vport
);
1251 "vport setup failed for vport %d, %d\n",
1262 static int hclge_cmd_alloc_tx_buff(struct hclge_dev
*hdev
,
1263 struct hclge_pkt_buf_alloc
*buf_alloc
)
1265 /* TX buffer size is unit by 128 byte */
1266 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1267 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1268 struct hclge_tx_buff_alloc_cmd
*req
;
1269 struct hclge_desc desc
;
1273 req
= (struct hclge_tx_buff_alloc_cmd
*)desc
.data
;
1275 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TX_BUFF_ALLOC
, 0);
1276 for (i
= 0; i
< HCLGE_TC_NUM
; i
++) {
1277 u32 buf_size
= buf_alloc
->priv_buf
[i
].tx_buf_size
;
1279 req
->tx_pkt_buff
[i
] =
1280 cpu_to_le16((buf_size
>> HCLGE_BUF_SIZE_UNIT_SHIFT
) |
1281 HCLGE_BUF_SIZE_UPDATE_EN_MSK
);
1284 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1286 dev_err(&hdev
->pdev
->dev
, "tx buffer alloc cmd failed %d.\n",
1292 static int hclge_tx_buffer_alloc(struct hclge_dev
*hdev
,
1293 struct hclge_pkt_buf_alloc
*buf_alloc
)
1295 int ret
= hclge_cmd_alloc_tx_buff(hdev
, buf_alloc
);
1298 dev_err(&hdev
->pdev
->dev
, "tx buffer alloc failed %d\n", ret
);
1303 static int hclge_get_tc_num(struct hclge_dev
*hdev
)
1307 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1308 if (hdev
->hw_tc_map
& BIT(i
))
1313 static int hclge_get_pfc_enalbe_num(struct hclge_dev
*hdev
)
1317 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1318 if (hdev
->hw_tc_map
& BIT(i
) &&
1319 hdev
->tm_info
.hw_pfc_map
& BIT(i
))
1324 /* Get the number of pfc enabled TCs, which have private buffer */
1325 static int hclge_get_pfc_priv_num(struct hclge_dev
*hdev
,
1326 struct hclge_pkt_buf_alloc
*buf_alloc
)
1328 struct hclge_priv_buf
*priv
;
1331 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1332 priv
= &buf_alloc
->priv_buf
[i
];
1333 if ((hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1341 /* Get the number of pfc disabled TCs, which have private buffer */
1342 static int hclge_get_no_pfc_priv_num(struct hclge_dev
*hdev
,
1343 struct hclge_pkt_buf_alloc
*buf_alloc
)
1345 struct hclge_priv_buf
*priv
;
1348 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1349 priv
= &buf_alloc
->priv_buf
[i
];
1350 if (hdev
->hw_tc_map
& BIT(i
) &&
1351 !(hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1359 static u32
hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc
*buf_alloc
)
1361 struct hclge_priv_buf
*priv
;
1365 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1366 priv
= &buf_alloc
->priv_buf
[i
];
1368 rx_priv
+= priv
->buf_size
;
1373 static u32
hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc
*buf_alloc
)
1375 u32 i
, total_tx_size
= 0;
1377 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1378 total_tx_size
+= buf_alloc
->priv_buf
[i
].tx_buf_size
;
1380 return total_tx_size
;
1383 static bool hclge_is_rx_buf_ok(struct hclge_dev
*hdev
,
1384 struct hclge_pkt_buf_alloc
*buf_alloc
,
1387 u32 shared_buf_min
, shared_buf_tc
, shared_std
;
1388 int tc_num
, pfc_enable_num
;
1389 u32 shared_buf
, aligned_mps
;
1393 tc_num
= hclge_get_tc_num(hdev
);
1394 pfc_enable_num
= hclge_get_pfc_enalbe_num(hdev
);
1395 aligned_mps
= roundup(hdev
->mps
, HCLGE_BUF_SIZE_UNIT
);
1397 if (hnae3_dev_dcb_supported(hdev
))
1398 shared_buf_min
= 2 * aligned_mps
+ hdev
->dv_buf_size
;
1400 shared_buf_min
= aligned_mps
+ HCLGE_NON_DCB_ADDITIONAL_BUF
1401 + hdev
->dv_buf_size
;
1403 shared_buf_tc
= pfc_enable_num
* aligned_mps
+
1404 (tc_num
- pfc_enable_num
) * aligned_mps
/ 2 +
1406 shared_std
= roundup(max_t(u32
, shared_buf_min
, shared_buf_tc
),
1407 HCLGE_BUF_SIZE_UNIT
);
1409 rx_priv
= hclge_get_rx_priv_buff_alloced(buf_alloc
);
1410 if (rx_all
< rx_priv
+ shared_std
)
1413 shared_buf
= rounddown(rx_all
- rx_priv
, HCLGE_BUF_SIZE_UNIT
);
1414 buf_alloc
->s_buf
.buf_size
= shared_buf
;
1415 if (hnae3_dev_dcb_supported(hdev
)) {
1416 buf_alloc
->s_buf
.self
.high
= shared_buf
- hdev
->dv_buf_size
;
1417 buf_alloc
->s_buf
.self
.low
= buf_alloc
->s_buf
.self
.high
1418 - roundup(aligned_mps
/ 2, HCLGE_BUF_SIZE_UNIT
);
1420 buf_alloc
->s_buf
.self
.high
= aligned_mps
+
1421 HCLGE_NON_DCB_ADDITIONAL_BUF
;
1422 buf_alloc
->s_buf
.self
.low
=
1423 roundup(aligned_mps
/ 2, HCLGE_BUF_SIZE_UNIT
);
1426 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1427 if ((hdev
->hw_tc_map
& BIT(i
)) &&
1428 (hdev
->tm_info
.hw_pfc_map
& BIT(i
))) {
1429 buf_alloc
->s_buf
.tc_thrd
[i
].low
= aligned_mps
;
1430 buf_alloc
->s_buf
.tc_thrd
[i
].high
= 2 * aligned_mps
;
1432 buf_alloc
->s_buf
.tc_thrd
[i
].low
= 0;
1433 buf_alloc
->s_buf
.tc_thrd
[i
].high
= aligned_mps
;
1440 static int hclge_tx_buffer_calc(struct hclge_dev
*hdev
,
1441 struct hclge_pkt_buf_alloc
*buf_alloc
)
1445 total_size
= hdev
->pkt_buf_size
;
1447 /* alloc tx buffer for all enabled tc */
1448 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1449 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
1451 if (total_size
< hdev
->tx_buf_size
)
1454 if (hdev
->hw_tc_map
& BIT(i
))
1455 priv
->tx_buf_size
= hdev
->tx_buf_size
;
1457 priv
->tx_buf_size
= 0;
1459 total_size
-= priv
->tx_buf_size
;
1465 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1466 * @hdev: pointer to struct hclge_dev
1467 * @buf_alloc: pointer to buffer calculation data
1468 * @return: 0: calculate sucessful, negative: fail
1470 static int hclge_rx_buffer_calc(struct hclge_dev
*hdev
,
1471 struct hclge_pkt_buf_alloc
*buf_alloc
)
1473 u32 rx_all
= hdev
->pkt_buf_size
, aligned_mps
;
1474 int no_pfc_priv_num
, pfc_priv_num
;
1475 struct hclge_priv_buf
*priv
;
1478 aligned_mps
= round_up(hdev
->mps
, HCLGE_BUF_SIZE_UNIT
);
1479 rx_all
-= hclge_get_tx_buff_alloced(buf_alloc
);
1481 /* When DCB is not supported, rx private
1482 * buffer is not allocated.
1484 if (!hnae3_dev_dcb_supported(hdev
)) {
1485 if (!hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1491 /* step 1, try to alloc private buffer for all enabled tc */
1492 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1493 priv
= &buf_alloc
->priv_buf
[i
];
1494 if (hdev
->hw_tc_map
& BIT(i
)) {
1496 if (hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1497 priv
->wl
.low
= aligned_mps
;
1499 roundup(priv
->wl
.low
+ aligned_mps
,
1500 HCLGE_BUF_SIZE_UNIT
);
1501 priv
->buf_size
= priv
->wl
.high
+
1505 priv
->wl
.high
= 2 * aligned_mps
;
1506 priv
->buf_size
= priv
->wl
.high
+
1517 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1520 /* step 2, try to decrease the buffer size of
1521 * no pfc TC's private buffer
1523 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1524 priv
= &buf_alloc
->priv_buf
[i
];
1531 if (!(hdev
->hw_tc_map
& BIT(i
)))
1536 if (hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1538 priv
->wl
.high
= priv
->wl
.low
+ aligned_mps
;
1539 priv
->buf_size
= priv
->wl
.high
+ hdev
->dv_buf_size
;
1542 priv
->wl
.high
= aligned_mps
;
1543 priv
->buf_size
= priv
->wl
.high
+ hdev
->dv_buf_size
;
1547 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1550 /* step 3, try to reduce the number of pfc disabled TCs,
1551 * which have private buffer
1553 /* get the total no pfc enable TC number, which have private buffer */
1554 no_pfc_priv_num
= hclge_get_no_pfc_priv_num(hdev
, buf_alloc
);
1556 /* let the last to be cleared first */
1557 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
1558 priv
= &buf_alloc
->priv_buf
[i
];
1560 if (hdev
->hw_tc_map
& BIT(i
) &&
1561 !(hdev
->tm_info
.hw_pfc_map
& BIT(i
))) {
1562 /* Clear the no pfc TC private buffer */
1570 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
) ||
1571 no_pfc_priv_num
== 0)
1575 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1578 /* step 4, try to reduce the number of pfc enabled TCs
1579 * which have private buffer.
1581 pfc_priv_num
= hclge_get_pfc_priv_num(hdev
, buf_alloc
);
1583 /* let the last to be cleared first */
1584 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
1585 priv
= &buf_alloc
->priv_buf
[i
];
1587 if (hdev
->hw_tc_map
& BIT(i
) &&
1588 hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1589 /* Reduce the number of pfc TC with private buffer */
1597 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
) ||
1601 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1607 static int hclge_rx_priv_buf_alloc(struct hclge_dev
*hdev
,
1608 struct hclge_pkt_buf_alloc
*buf_alloc
)
1610 struct hclge_rx_priv_buff_cmd
*req
;
1611 struct hclge_desc desc
;
1615 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_PRIV_BUFF_ALLOC
, false);
1616 req
= (struct hclge_rx_priv_buff_cmd
*)desc
.data
;
1618 /* Alloc private buffer TCs */
1619 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1620 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
1623 cpu_to_le16(priv
->buf_size
>> HCLGE_BUF_UNIT_S
);
1625 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B
);
1629 cpu_to_le16((buf_alloc
->s_buf
.buf_size
>> HCLGE_BUF_UNIT_S
) |
1630 (1 << HCLGE_TC0_PRI_BUF_EN_B
));
1632 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1634 dev_err(&hdev
->pdev
->dev
,
1635 "rx private buffer alloc cmd failed %d\n", ret
);
1640 static int hclge_rx_priv_wl_config(struct hclge_dev
*hdev
,
1641 struct hclge_pkt_buf_alloc
*buf_alloc
)
1643 struct hclge_rx_priv_wl_buf
*req
;
1644 struct hclge_priv_buf
*priv
;
1645 struct hclge_desc desc
[2];
1649 for (i
= 0; i
< 2; i
++) {
1650 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_RX_PRIV_WL_ALLOC
,
1652 req
= (struct hclge_rx_priv_wl_buf
*)desc
[i
].data
;
1654 /* The first descriptor set the NEXT bit to 1 */
1656 desc
[i
].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1658 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1660 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
1661 u32 idx
= i
* HCLGE_TC_NUM_ONE_DESC
+ j
;
1663 priv
= &buf_alloc
->priv_buf
[idx
];
1664 req
->tc_wl
[j
].high
=
1665 cpu_to_le16(priv
->wl
.high
>> HCLGE_BUF_UNIT_S
);
1666 req
->tc_wl
[j
].high
|=
1667 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
1669 cpu_to_le16(priv
->wl
.low
>> HCLGE_BUF_UNIT_S
);
1670 req
->tc_wl
[j
].low
|=
1671 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
1675 /* Send 2 descriptor at one time */
1676 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
1678 dev_err(&hdev
->pdev
->dev
,
1679 "rx private waterline config cmd failed %d\n",
1684 static int hclge_common_thrd_config(struct hclge_dev
*hdev
,
1685 struct hclge_pkt_buf_alloc
*buf_alloc
)
1687 struct hclge_shared_buf
*s_buf
= &buf_alloc
->s_buf
;
1688 struct hclge_rx_com_thrd
*req
;
1689 struct hclge_desc desc
[2];
1690 struct hclge_tc_thrd
*tc
;
1694 for (i
= 0; i
< 2; i
++) {
1695 hclge_cmd_setup_basic_desc(&desc
[i
],
1696 HCLGE_OPC_RX_COM_THRD_ALLOC
, false);
1697 req
= (struct hclge_rx_com_thrd
*)&desc
[i
].data
;
1699 /* The first descriptor set the NEXT bit to 1 */
1701 desc
[i
].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1703 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1705 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
1706 tc
= &s_buf
->tc_thrd
[i
* HCLGE_TC_NUM_ONE_DESC
+ j
];
1708 req
->com_thrd
[j
].high
=
1709 cpu_to_le16(tc
->high
>> HCLGE_BUF_UNIT_S
);
1710 req
->com_thrd
[j
].high
|=
1711 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
1712 req
->com_thrd
[j
].low
=
1713 cpu_to_le16(tc
->low
>> HCLGE_BUF_UNIT_S
);
1714 req
->com_thrd
[j
].low
|=
1715 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
1719 /* Send 2 descriptors at one time */
1720 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
1722 dev_err(&hdev
->pdev
->dev
,
1723 "common threshold config cmd failed %d\n", ret
);
1727 static int hclge_common_wl_config(struct hclge_dev
*hdev
,
1728 struct hclge_pkt_buf_alloc
*buf_alloc
)
1730 struct hclge_shared_buf
*buf
= &buf_alloc
->s_buf
;
1731 struct hclge_rx_com_wl
*req
;
1732 struct hclge_desc desc
;
1735 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_COM_WL_ALLOC
, false);
1737 req
= (struct hclge_rx_com_wl
*)desc
.data
;
1738 req
->com_wl
.high
= cpu_to_le16(buf
->self
.high
>> HCLGE_BUF_UNIT_S
);
1739 req
->com_wl
.high
|= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
1741 req
->com_wl
.low
= cpu_to_le16(buf
->self
.low
>> HCLGE_BUF_UNIT_S
);
1742 req
->com_wl
.low
|= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
1744 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1746 dev_err(&hdev
->pdev
->dev
,
1747 "common waterline config cmd failed %d\n", ret
);
1752 int hclge_buffer_alloc(struct hclge_dev
*hdev
)
1754 struct hclge_pkt_buf_alloc
*pkt_buf
;
1757 pkt_buf
= kzalloc(sizeof(*pkt_buf
), GFP_KERNEL
);
1761 ret
= hclge_tx_buffer_calc(hdev
, pkt_buf
);
1763 dev_err(&hdev
->pdev
->dev
,
1764 "could not calc tx buffer size for all TCs %d\n", ret
);
1768 ret
= hclge_tx_buffer_alloc(hdev
, pkt_buf
);
1770 dev_err(&hdev
->pdev
->dev
,
1771 "could not alloc tx buffers %d\n", ret
);
1775 ret
= hclge_rx_buffer_calc(hdev
, pkt_buf
);
1777 dev_err(&hdev
->pdev
->dev
,
1778 "could not calc rx priv buffer size for all TCs %d\n",
1783 ret
= hclge_rx_priv_buf_alloc(hdev
, pkt_buf
);
1785 dev_err(&hdev
->pdev
->dev
, "could not alloc rx priv buffer %d\n",
1790 if (hnae3_dev_dcb_supported(hdev
)) {
1791 ret
= hclge_rx_priv_wl_config(hdev
, pkt_buf
);
1793 dev_err(&hdev
->pdev
->dev
,
1794 "could not configure rx private waterline %d\n",
1799 ret
= hclge_common_thrd_config(hdev
, pkt_buf
);
1801 dev_err(&hdev
->pdev
->dev
,
1802 "could not configure common threshold %d\n",
1808 ret
= hclge_common_wl_config(hdev
, pkt_buf
);
1810 dev_err(&hdev
->pdev
->dev
,
1811 "could not configure common waterline %d\n", ret
);
1818 static int hclge_init_roce_base_info(struct hclge_vport
*vport
)
1820 struct hnae3_handle
*roce
= &vport
->roce
;
1821 struct hnae3_handle
*nic
= &vport
->nic
;
1823 roce
->rinfo
.num_vectors
= vport
->back
->num_roce_msi
;
1825 if (vport
->back
->num_msi_left
< vport
->roce
.rinfo
.num_vectors
||
1826 vport
->back
->num_msi_left
== 0)
1829 roce
->rinfo
.base_vector
= vport
->back
->roce_base_vector
;
1831 roce
->rinfo
.netdev
= nic
->kinfo
.netdev
;
1832 roce
->rinfo
.roce_io_base
= vport
->back
->hw
.io_base
;
1834 roce
->pdev
= nic
->pdev
;
1835 roce
->ae_algo
= nic
->ae_algo
;
1836 roce
->numa_node_mask
= nic
->numa_node_mask
;
1841 static int hclge_init_msi(struct hclge_dev
*hdev
)
1843 struct pci_dev
*pdev
= hdev
->pdev
;
1847 vectors
= pci_alloc_irq_vectors(pdev
, 1, hdev
->num_msi
,
1848 PCI_IRQ_MSI
| PCI_IRQ_MSIX
);
1851 "failed(%d) to allocate MSI/MSI-X vectors\n",
1855 if (vectors
< hdev
->num_msi
)
1856 dev_warn(&hdev
->pdev
->dev
,
1857 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1858 hdev
->num_msi
, vectors
);
1860 hdev
->num_msi
= vectors
;
1861 hdev
->num_msi_left
= vectors
;
1862 hdev
->base_msi_vector
= pdev
->irq
;
1863 hdev
->roce_base_vector
= hdev
->base_msi_vector
+
1864 hdev
->roce_base_msix_offset
;
1866 hdev
->vector_status
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
1867 sizeof(u16
), GFP_KERNEL
);
1868 if (!hdev
->vector_status
) {
1869 pci_free_irq_vectors(pdev
);
1873 for (i
= 0; i
< hdev
->num_msi
; i
++)
1874 hdev
->vector_status
[i
] = HCLGE_INVALID_VPORT
;
1876 hdev
->vector_irq
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
1877 sizeof(int), GFP_KERNEL
);
1878 if (!hdev
->vector_irq
) {
1879 pci_free_irq_vectors(pdev
);
1886 static u8
hclge_check_speed_dup(u8 duplex
, int speed
)
1889 if (!(speed
== HCLGE_MAC_SPEED_10M
|| speed
== HCLGE_MAC_SPEED_100M
))
1890 duplex
= HCLGE_MAC_FULL
;
1895 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev
*hdev
, int speed
,
1898 struct hclge_config_mac_speed_dup_cmd
*req
;
1899 struct hclge_desc desc
;
1902 req
= (struct hclge_config_mac_speed_dup_cmd
*)desc
.data
;
1904 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_SPEED_DUP
, false);
1906 hnae3_set_bit(req
->speed_dup
, HCLGE_CFG_DUPLEX_B
, !!duplex
);
1909 case HCLGE_MAC_SPEED_10M
:
1910 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
1911 HCLGE_CFG_SPEED_S
, 6);
1913 case HCLGE_MAC_SPEED_100M
:
1914 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
1915 HCLGE_CFG_SPEED_S
, 7);
1917 case HCLGE_MAC_SPEED_1G
:
1918 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
1919 HCLGE_CFG_SPEED_S
, 0);
1921 case HCLGE_MAC_SPEED_10G
:
1922 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
1923 HCLGE_CFG_SPEED_S
, 1);
1925 case HCLGE_MAC_SPEED_25G
:
1926 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
1927 HCLGE_CFG_SPEED_S
, 2);
1929 case HCLGE_MAC_SPEED_40G
:
1930 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
1931 HCLGE_CFG_SPEED_S
, 3);
1933 case HCLGE_MAC_SPEED_50G
:
1934 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
1935 HCLGE_CFG_SPEED_S
, 4);
1937 case HCLGE_MAC_SPEED_100G
:
1938 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
1939 HCLGE_CFG_SPEED_S
, 5);
1942 dev_err(&hdev
->pdev
->dev
, "invalid speed (%d)\n", speed
);
1946 hnae3_set_bit(req
->mac_change_fec_en
, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B
,
1949 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1951 dev_err(&hdev
->pdev
->dev
,
1952 "mac speed/duplex config cmd failed %d.\n", ret
);
1959 int hclge_cfg_mac_speed_dup(struct hclge_dev
*hdev
, int speed
, u8 duplex
)
1963 duplex
= hclge_check_speed_dup(duplex
, speed
);
1964 if (hdev
->hw
.mac
.speed
== speed
&& hdev
->hw
.mac
.duplex
== duplex
)
1967 ret
= hclge_cfg_mac_speed_dup_hw(hdev
, speed
, duplex
);
1971 hdev
->hw
.mac
.speed
= speed
;
1972 hdev
->hw
.mac
.duplex
= duplex
;
1977 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle
*handle
, int speed
,
1980 struct hclge_vport
*vport
= hclge_get_vport(handle
);
1981 struct hclge_dev
*hdev
= vport
->back
;
1983 return hclge_cfg_mac_speed_dup(hdev
, speed
, duplex
);
1986 static int hclge_query_mac_an_speed_dup(struct hclge_dev
*hdev
, int *speed
,
1989 struct hclge_query_an_speed_dup_cmd
*req
;
1990 struct hclge_desc desc
;
1994 req
= (struct hclge_query_an_speed_dup_cmd
*)desc
.data
;
1996 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_AN_RESULT
, true);
1997 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1999 dev_err(&hdev
->pdev
->dev
,
2000 "mac speed/autoneg/duplex query cmd failed %d\n",
2005 *duplex
= hnae3_get_bit(req
->an_syn_dup_speed
, HCLGE_QUERY_DUPLEX_B
);
2006 speed_tmp
= hnae3_get_field(req
->an_syn_dup_speed
, HCLGE_QUERY_SPEED_M
,
2007 HCLGE_QUERY_SPEED_S
);
2009 ret
= hclge_parse_speed(speed_tmp
, speed
);
2011 dev_err(&hdev
->pdev
->dev
,
2012 "could not parse speed(=%d), %d\n", speed_tmp
, ret
);
2017 static int hclge_set_autoneg_en(struct hclge_dev
*hdev
, bool enable
)
2019 struct hclge_config_auto_neg_cmd
*req
;
2020 struct hclge_desc desc
;
2024 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_AN_MODE
, false);
2026 req
= (struct hclge_config_auto_neg_cmd
*)desc
.data
;
2027 hnae3_set_bit(flag
, HCLGE_MAC_CFG_AN_EN_B
, !!enable
);
2028 req
->cfg_an_cmd_flag
= cpu_to_le32(flag
);
2030 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2032 dev_err(&hdev
->pdev
->dev
, "auto neg set cmd failed %d.\n",
2038 static int hclge_set_autoneg(struct hnae3_handle
*handle
, bool enable
)
2040 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2041 struct hclge_dev
*hdev
= vport
->back
;
2043 return hclge_set_autoneg_en(hdev
, enable
);
2046 static int hclge_get_autoneg(struct hnae3_handle
*handle
)
2048 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2049 struct hclge_dev
*hdev
= vport
->back
;
2050 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
2053 return phydev
->autoneg
;
2055 return hdev
->hw
.mac
.autoneg
;
2058 static int hclge_mac_init(struct hclge_dev
*hdev
)
2060 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2063 hdev
->hw
.mac
.duplex
= HCLGE_MAC_FULL
;
2064 ret
= hclge_cfg_mac_speed_dup_hw(hdev
, hdev
->hw
.mac
.speed
,
2065 hdev
->hw
.mac
.duplex
);
2067 dev_err(&hdev
->pdev
->dev
,
2068 "Config mac speed dup fail ret=%d\n", ret
);
2074 ret
= hclge_set_mac_mtu(hdev
, hdev
->mps
);
2076 dev_err(&hdev
->pdev
->dev
, "set mtu failed ret=%d\n", ret
);
2080 ret
= hclge_buffer_alloc(hdev
);
2082 dev_err(&hdev
->pdev
->dev
,
2083 "allocate buffer fail, ret=%d\n", ret
);
2088 static void hclge_mbx_task_schedule(struct hclge_dev
*hdev
)
2090 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
))
2091 schedule_work(&hdev
->mbx_service_task
);
2094 static void hclge_reset_task_schedule(struct hclge_dev
*hdev
)
2096 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
))
2097 schedule_work(&hdev
->rst_service_task
);
2100 static void hclge_task_schedule(struct hclge_dev
*hdev
)
2102 if (!test_bit(HCLGE_STATE_DOWN
, &hdev
->state
) &&
2103 !test_bit(HCLGE_STATE_REMOVING
, &hdev
->state
) &&
2104 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
))
2105 (void)schedule_work(&hdev
->service_task
);
2108 static int hclge_get_mac_link_status(struct hclge_dev
*hdev
)
2110 struct hclge_link_status_cmd
*req
;
2111 struct hclge_desc desc
;
2115 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_LINK_STATUS
, true);
2116 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2118 dev_err(&hdev
->pdev
->dev
, "get link status cmd failed %d\n",
2123 req
= (struct hclge_link_status_cmd
*)desc
.data
;
2124 link_status
= req
->status
& HCLGE_LINK_STATUS_UP_M
;
2126 return !!link_status
;
2129 static int hclge_get_mac_phy_link(struct hclge_dev
*hdev
)
2134 if (test_bit(HCLGE_STATE_DOWN
, &hdev
->state
))
2137 mac_state
= hclge_get_mac_link_status(hdev
);
2139 if (hdev
->hw
.mac
.phydev
) {
2140 if (hdev
->hw
.mac
.phydev
->state
== PHY_RUNNING
)
2141 link_stat
= mac_state
&
2142 hdev
->hw
.mac
.phydev
->link
;
2147 link_stat
= mac_state
;
2153 static void hclge_update_link_status(struct hclge_dev
*hdev
)
2155 struct hnae3_client
*client
= hdev
->nic_client
;
2156 struct hnae3_handle
*handle
;
2162 state
= hclge_get_mac_phy_link(hdev
);
2163 if (state
!= hdev
->hw
.mac
.link
) {
2164 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
2165 handle
= &hdev
->vport
[i
].nic
;
2166 client
->ops
->link_status_change(handle
, state
);
2168 hdev
->hw
.mac
.link
= state
;
2172 static int hclge_update_speed_duplex(struct hclge_dev
*hdev
)
2174 struct hclge_mac mac
= hdev
->hw
.mac
;
2179 /* get the speed and duplex as autoneg'result from mac cmd when phy
2182 if (mac
.phydev
|| !mac
.autoneg
)
2185 ret
= hclge_query_mac_an_speed_dup(hdev
, &speed
, &duplex
);
2187 dev_err(&hdev
->pdev
->dev
,
2188 "mac autoneg/speed/duplex query failed %d\n", ret
);
2192 ret
= hclge_cfg_mac_speed_dup(hdev
, speed
, duplex
);
2194 dev_err(&hdev
->pdev
->dev
,
2195 "mac speed/duplex config failed %d\n", ret
);
2202 static int hclge_update_speed_duplex_h(struct hnae3_handle
*handle
)
2204 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2205 struct hclge_dev
*hdev
= vport
->back
;
2207 return hclge_update_speed_duplex(hdev
);
2210 static int hclge_get_status(struct hnae3_handle
*handle
)
2212 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2213 struct hclge_dev
*hdev
= vport
->back
;
2215 hclge_update_link_status(hdev
);
2217 return hdev
->hw
.mac
.link
;
2220 static void hclge_service_timer(struct timer_list
*t
)
2222 struct hclge_dev
*hdev
= from_timer(hdev
, t
, service_timer
);
2224 mod_timer(&hdev
->service_timer
, jiffies
+ HZ
);
2225 hdev
->hw_stats
.stats_timer
++;
2226 hclge_task_schedule(hdev
);
2229 static void hclge_service_complete(struct hclge_dev
*hdev
)
2231 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
));
2233 /* Flush memory before next watchdog */
2234 smp_mb__before_atomic();
2235 clear_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
);
2238 static u32
hclge_check_event_cause(struct hclge_dev
*hdev
, u32
*clearval
)
2240 u32 rst_src_reg
, cmdq_src_reg
, msix_src_reg
;
2242 /* fetch the events from their corresponding regs */
2243 rst_src_reg
= hclge_read_dev(&hdev
->hw
, HCLGE_MISC_VECTOR_INT_STS
);
2244 cmdq_src_reg
= hclge_read_dev(&hdev
->hw
, HCLGE_VECTOR0_CMDQ_SRC_REG
);
2245 msix_src_reg
= hclge_read_dev(&hdev
->hw
,
2246 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG
);
2248 /* Assumption: If by any chance reset and mailbox events are reported
2249 * together then we will only process reset event in this go and will
2250 * defer the processing of the mailbox events. Since, we would have not
2251 * cleared RX CMDQ event this time we would receive again another
2252 * interrupt from H/W just for the mailbox.
2255 /* check for vector0 reset event sources */
2256 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B
) & rst_src_reg
) {
2257 dev_info(&hdev
->pdev
->dev
, "IMP reset interrupt\n");
2258 set_bit(HNAE3_IMP_RESET
, &hdev
->reset_pending
);
2259 set_bit(HCLGE_STATE_CMD_DISABLE
, &hdev
->state
);
2260 *clearval
= BIT(HCLGE_VECTOR0_IMPRESET_INT_B
);
2261 return HCLGE_VECTOR0_EVENT_RST
;
2264 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
) & rst_src_reg
) {
2265 dev_info(&hdev
->pdev
->dev
, "global reset interrupt\n");
2266 set_bit(HCLGE_STATE_CMD_DISABLE
, &hdev
->state
);
2267 set_bit(HNAE3_GLOBAL_RESET
, &hdev
->reset_pending
);
2268 *clearval
= BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
);
2269 return HCLGE_VECTOR0_EVENT_RST
;
2272 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B
) & rst_src_reg
) {
2273 dev_info(&hdev
->pdev
->dev
, "core reset interrupt\n");
2274 set_bit(HCLGE_STATE_CMD_DISABLE
, &hdev
->state
);
2275 set_bit(HNAE3_CORE_RESET
, &hdev
->reset_pending
);
2276 *clearval
= BIT(HCLGE_VECTOR0_CORERESET_INT_B
);
2277 return HCLGE_VECTOR0_EVENT_RST
;
2280 /* check for vector0 msix event source */
2281 if (msix_src_reg
& HCLGE_VECTOR0_REG_MSIX_MASK
)
2282 return HCLGE_VECTOR0_EVENT_ERR
;
2284 /* check for vector0 mailbox(=CMDQ RX) event source */
2285 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B
) & cmdq_src_reg
) {
2286 cmdq_src_reg
&= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B
);
2287 *clearval
= cmdq_src_reg
;
2288 return HCLGE_VECTOR0_EVENT_MBX
;
2291 return HCLGE_VECTOR0_EVENT_OTHER
;
2294 static void hclge_clear_event_cause(struct hclge_dev
*hdev
, u32 event_type
,
2297 switch (event_type
) {
2298 case HCLGE_VECTOR0_EVENT_RST
:
2299 hclge_write_dev(&hdev
->hw
, HCLGE_MISC_RESET_STS_REG
, regclr
);
2301 case HCLGE_VECTOR0_EVENT_MBX
:
2302 hclge_write_dev(&hdev
->hw
, HCLGE_VECTOR0_CMDQ_SRC_REG
, regclr
);
2309 static void hclge_clear_all_event_cause(struct hclge_dev
*hdev
)
2311 hclge_clear_event_cause(hdev
, HCLGE_VECTOR0_EVENT_RST
,
2312 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
) |
2313 BIT(HCLGE_VECTOR0_CORERESET_INT_B
) |
2314 BIT(HCLGE_VECTOR0_IMPRESET_INT_B
));
2315 hclge_clear_event_cause(hdev
, HCLGE_VECTOR0_EVENT_MBX
, 0);
2318 static void hclge_enable_vector(struct hclge_misc_vector
*vector
, bool enable
)
2320 writel(enable
? 1 : 0, vector
->addr
);
2323 static irqreturn_t
hclge_misc_irq_handle(int irq
, void *data
)
2325 struct hclge_dev
*hdev
= data
;
2329 hclge_enable_vector(&hdev
->misc_vector
, false);
2330 event_cause
= hclge_check_event_cause(hdev
, &clearval
);
2332 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2333 switch (event_cause
) {
2334 case HCLGE_VECTOR0_EVENT_ERR
:
2335 /* we do not know what type of reset is required now. This could
2336 * only be decided after we fetch the type of errors which
2337 * caused this event. Therefore, we will do below for now:
2338 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2339 * have defered type of reset to be used.
2340 * 2. Schedule the reset serivce task.
2341 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2342 * will fetch the correct type of reset. This would be done
2343 * by first decoding the types of errors.
2345 set_bit(HNAE3_UNKNOWN_RESET
, &hdev
->reset_request
);
2347 case HCLGE_VECTOR0_EVENT_RST
:
2348 hclge_reset_task_schedule(hdev
);
2350 case HCLGE_VECTOR0_EVENT_MBX
:
2351 /* If we are here then,
2352 * 1. Either we are not handling any mbx task and we are not
2355 * 2. We could be handling a mbx task but nothing more is
2357 * In both cases, we should schedule mbx task as there are more
2358 * mbx messages reported by this interrupt.
2360 hclge_mbx_task_schedule(hdev
);
2363 dev_warn(&hdev
->pdev
->dev
,
2364 "received unknown or unhandled event of vector0\n");
2368 /* clear the source of interrupt if it is not cause by reset */
2369 if (event_cause
== HCLGE_VECTOR0_EVENT_MBX
) {
2370 hclge_clear_event_cause(hdev
, event_cause
, clearval
);
2371 hclge_enable_vector(&hdev
->misc_vector
, true);
2377 static void hclge_free_vector(struct hclge_dev
*hdev
, int vector_id
)
2379 if (hdev
->vector_status
[vector_id
] == HCLGE_INVALID_VPORT
) {
2380 dev_warn(&hdev
->pdev
->dev
,
2381 "vector(vector_id %d) has been freed.\n", vector_id
);
2385 hdev
->vector_status
[vector_id
] = HCLGE_INVALID_VPORT
;
2386 hdev
->num_msi_left
+= 1;
2387 hdev
->num_msi_used
-= 1;
2390 static void hclge_get_misc_vector(struct hclge_dev
*hdev
)
2392 struct hclge_misc_vector
*vector
= &hdev
->misc_vector
;
2394 vector
->vector_irq
= pci_irq_vector(hdev
->pdev
, 0);
2396 vector
->addr
= hdev
->hw
.io_base
+ HCLGE_MISC_VECTOR_REG_BASE
;
2397 hdev
->vector_status
[0] = 0;
2399 hdev
->num_msi_left
-= 1;
2400 hdev
->num_msi_used
+= 1;
2403 static int hclge_misc_irq_init(struct hclge_dev
*hdev
)
2407 hclge_get_misc_vector(hdev
);
2409 /* this would be explicitly freed in the end */
2410 ret
= request_irq(hdev
->misc_vector
.vector_irq
, hclge_misc_irq_handle
,
2411 0, "hclge_misc", hdev
);
2413 hclge_free_vector(hdev
, 0);
2414 dev_err(&hdev
->pdev
->dev
, "request misc irq(%d) fail\n",
2415 hdev
->misc_vector
.vector_irq
);
2421 static void hclge_misc_irq_uninit(struct hclge_dev
*hdev
)
2423 free_irq(hdev
->misc_vector
.vector_irq
, hdev
);
2424 hclge_free_vector(hdev
, 0);
2427 static int hclge_notify_client(struct hclge_dev
*hdev
,
2428 enum hnae3_reset_notify_type type
)
2430 struct hnae3_client
*client
= hdev
->nic_client
;
2433 if (!client
->ops
->reset_notify
)
2436 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
2437 struct hnae3_handle
*handle
= &hdev
->vport
[i
].nic
;
2440 ret
= client
->ops
->reset_notify(handle
, type
);
2442 dev_err(&hdev
->pdev
->dev
,
2443 "notify nic client failed %d(%d)\n", type
, ret
);
2451 static int hclge_notify_roce_client(struct hclge_dev
*hdev
,
2452 enum hnae3_reset_notify_type type
)
2454 struct hnae3_client
*client
= hdev
->roce_client
;
2461 if (!client
->ops
->reset_notify
)
2464 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
2465 struct hnae3_handle
*handle
= &hdev
->vport
[i
].roce
;
2467 ret
= client
->ops
->reset_notify(handle
, type
);
2469 dev_err(&hdev
->pdev
->dev
,
2470 "notify roce client failed %d(%d)",
2479 static int hclge_reset_wait(struct hclge_dev
*hdev
)
2481 #define HCLGE_RESET_WATI_MS 100
2482 #define HCLGE_RESET_WAIT_CNT 200
2483 u32 val
, reg
, reg_bit
;
2486 switch (hdev
->reset_type
) {
2487 case HNAE3_IMP_RESET
:
2488 reg
= HCLGE_GLOBAL_RESET_REG
;
2489 reg_bit
= HCLGE_IMP_RESET_BIT
;
2491 case HNAE3_GLOBAL_RESET
:
2492 reg
= HCLGE_GLOBAL_RESET_REG
;
2493 reg_bit
= HCLGE_GLOBAL_RESET_BIT
;
2495 case HNAE3_CORE_RESET
:
2496 reg
= HCLGE_GLOBAL_RESET_REG
;
2497 reg_bit
= HCLGE_CORE_RESET_BIT
;
2499 case HNAE3_FUNC_RESET
:
2500 reg
= HCLGE_FUN_RST_ING
;
2501 reg_bit
= HCLGE_FUN_RST_ING_B
;
2503 case HNAE3_FLR_RESET
:
2506 dev_err(&hdev
->pdev
->dev
,
2507 "Wait for unsupported reset type: %d\n",
2512 if (hdev
->reset_type
== HNAE3_FLR_RESET
) {
2513 while (!test_bit(HNAE3_FLR_DONE
, &hdev
->flr_state
) &&
2514 cnt
++ < HCLGE_RESET_WAIT_CNT
)
2515 msleep(HCLGE_RESET_WATI_MS
);
2517 if (!test_bit(HNAE3_FLR_DONE
, &hdev
->flr_state
)) {
2518 dev_err(&hdev
->pdev
->dev
,
2519 "flr wait timeout: %d\n", cnt
);
2526 val
= hclge_read_dev(&hdev
->hw
, reg
);
2527 while (hnae3_get_bit(val
, reg_bit
) && cnt
< HCLGE_RESET_WAIT_CNT
) {
2528 msleep(HCLGE_RESET_WATI_MS
);
2529 val
= hclge_read_dev(&hdev
->hw
, reg
);
2533 if (cnt
>= HCLGE_RESET_WAIT_CNT
) {
2534 dev_warn(&hdev
->pdev
->dev
,
2535 "Wait for reset timeout: %d\n", hdev
->reset_type
);
2542 static int hclge_set_vf_rst(struct hclge_dev
*hdev
, int func_id
, bool reset
)
2544 struct hclge_vf_rst_cmd
*req
;
2545 struct hclge_desc desc
;
2547 req
= (struct hclge_vf_rst_cmd
*)desc
.data
;
2548 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_GBL_RST_STATUS
, false);
2549 req
->dest_vfid
= func_id
;
2554 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2557 int hclge_set_all_vf_rst(struct hclge_dev
*hdev
, bool reset
)
2561 for (i
= hdev
->num_vmdq_vport
+ 1; i
< hdev
->num_alloc_vport
; i
++) {
2562 struct hclge_vport
*vport
= &hdev
->vport
[i
];
2565 /* Send cmd to set/clear VF's FUNC_RST_ING */
2566 ret
= hclge_set_vf_rst(hdev
, vport
->vport_id
, reset
);
2568 dev_err(&hdev
->pdev
->dev
,
2569 "set vf(%d) rst failed %d!\n",
2570 vport
->vport_id
, ret
);
2577 /* Inform VF to process the reset.
2578 * hclge_inform_reset_assert_to_vf may fail if VF
2579 * driver is not loaded.
2581 ret
= hclge_inform_reset_assert_to_vf(vport
);
2583 dev_warn(&hdev
->pdev
->dev
,
2584 "inform reset to vf(%d) failed %d!\n",
2585 vport
->vport_id
, ret
);
2591 int hclge_func_reset_cmd(struct hclge_dev
*hdev
, int func_id
)
2593 struct hclge_desc desc
;
2594 struct hclge_reset_cmd
*req
= (struct hclge_reset_cmd
*)desc
.data
;
2597 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_RST_TRIGGER
, false);
2598 hnae3_set_bit(req
->mac_func_reset
, HCLGE_CFG_RESET_FUNC_B
, 1);
2599 req
->fun_reset_vfid
= func_id
;
2601 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2603 dev_err(&hdev
->pdev
->dev
,
2604 "send function reset cmd fail, status =%d\n", ret
);
2609 static void hclge_do_reset(struct hclge_dev
*hdev
)
2611 struct pci_dev
*pdev
= hdev
->pdev
;
2614 switch (hdev
->reset_type
) {
2615 case HNAE3_GLOBAL_RESET
:
2616 val
= hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
);
2617 hnae3_set_bit(val
, HCLGE_GLOBAL_RESET_BIT
, 1);
2618 hclge_write_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
, val
);
2619 dev_info(&pdev
->dev
, "Global Reset requested\n");
2621 case HNAE3_CORE_RESET
:
2622 val
= hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
);
2623 hnae3_set_bit(val
, HCLGE_CORE_RESET_BIT
, 1);
2624 hclge_write_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
, val
);
2625 dev_info(&pdev
->dev
, "Core Reset requested\n");
2627 case HNAE3_FUNC_RESET
:
2628 dev_info(&pdev
->dev
, "PF Reset requested\n");
2629 /* schedule again to check later */
2630 set_bit(HNAE3_FUNC_RESET
, &hdev
->reset_pending
);
2631 hclge_reset_task_schedule(hdev
);
2633 case HNAE3_FLR_RESET
:
2634 dev_info(&pdev
->dev
, "FLR requested\n");
2635 /* schedule again to check later */
2636 set_bit(HNAE3_FLR_RESET
, &hdev
->reset_pending
);
2637 hclge_reset_task_schedule(hdev
);
2640 dev_warn(&pdev
->dev
,
2641 "Unsupported reset type: %d\n", hdev
->reset_type
);
2646 static enum hnae3_reset_type
hclge_get_reset_level(struct hclge_dev
*hdev
,
2647 unsigned long *addr
)
2649 enum hnae3_reset_type rst_level
= HNAE3_NONE_RESET
;
2651 /* first, resolve any unknown reset type to the known type(s) */
2652 if (test_bit(HNAE3_UNKNOWN_RESET
, addr
)) {
2653 /* we will intentionally ignore any errors from this function
2654 * as we will end up in *some* reset request in any case
2656 hclge_handle_hw_msix_error(hdev
, addr
);
2657 clear_bit(HNAE3_UNKNOWN_RESET
, addr
);
2658 /* We defered the clearing of the error event which caused
2659 * interrupt since it was not posssible to do that in
2660 * interrupt context (and this is the reason we introduced
2661 * new UNKNOWN reset type). Now, the errors have been
2662 * handled and cleared in hardware we can safely enable
2663 * interrupts. This is an exception to the norm.
2665 hclge_enable_vector(&hdev
->misc_vector
, true);
2668 /* return the highest priority reset level amongst all */
2669 if (test_bit(HNAE3_IMP_RESET
, addr
)) {
2670 rst_level
= HNAE3_IMP_RESET
;
2671 clear_bit(HNAE3_IMP_RESET
, addr
);
2672 clear_bit(HNAE3_GLOBAL_RESET
, addr
);
2673 clear_bit(HNAE3_CORE_RESET
, addr
);
2674 clear_bit(HNAE3_FUNC_RESET
, addr
);
2675 } else if (test_bit(HNAE3_GLOBAL_RESET
, addr
)) {
2676 rst_level
= HNAE3_GLOBAL_RESET
;
2677 clear_bit(HNAE3_GLOBAL_RESET
, addr
);
2678 clear_bit(HNAE3_CORE_RESET
, addr
);
2679 clear_bit(HNAE3_FUNC_RESET
, addr
);
2680 } else if (test_bit(HNAE3_CORE_RESET
, addr
)) {
2681 rst_level
= HNAE3_CORE_RESET
;
2682 clear_bit(HNAE3_CORE_RESET
, addr
);
2683 clear_bit(HNAE3_FUNC_RESET
, addr
);
2684 } else if (test_bit(HNAE3_FUNC_RESET
, addr
)) {
2685 rst_level
= HNAE3_FUNC_RESET
;
2686 clear_bit(HNAE3_FUNC_RESET
, addr
);
2687 } else if (test_bit(HNAE3_FLR_RESET
, addr
)) {
2688 rst_level
= HNAE3_FLR_RESET
;
2689 clear_bit(HNAE3_FLR_RESET
, addr
);
2695 static void hclge_clear_reset_cause(struct hclge_dev
*hdev
)
2699 switch (hdev
->reset_type
) {
2700 case HNAE3_IMP_RESET
:
2701 clearval
= BIT(HCLGE_VECTOR0_IMPRESET_INT_B
);
2703 case HNAE3_GLOBAL_RESET
:
2704 clearval
= BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
);
2706 case HNAE3_CORE_RESET
:
2707 clearval
= BIT(HCLGE_VECTOR0_CORERESET_INT_B
);
2716 hclge_write_dev(&hdev
->hw
, HCLGE_MISC_RESET_STS_REG
, clearval
);
2717 hclge_enable_vector(&hdev
->misc_vector
, true);
2720 static int hclge_reset_prepare_down(struct hclge_dev
*hdev
)
2724 switch (hdev
->reset_type
) {
2725 case HNAE3_FUNC_RESET
:
2727 case HNAE3_FLR_RESET
:
2728 ret
= hclge_set_all_vf_rst(hdev
, true);
2737 static int hclge_reset_prepare_wait(struct hclge_dev
*hdev
)
2742 switch (hdev
->reset_type
) {
2743 case HNAE3_FUNC_RESET
:
2744 /* There is no mechanism for PF to know if VF has stopped IO
2745 * for now, just wait 100 ms for VF to stop IO
2748 ret
= hclge_func_reset_cmd(hdev
, 0);
2750 dev_err(&hdev
->pdev
->dev
,
2751 "asserting function reset fail %d!\n", ret
);
2755 /* After performaning pf reset, it is not necessary to do the
2756 * mailbox handling or send any command to firmware, because
2757 * any mailbox handling or command to firmware is only valid
2758 * after hclge_cmd_init is called.
2760 set_bit(HCLGE_STATE_CMD_DISABLE
, &hdev
->state
);
2762 case HNAE3_FLR_RESET
:
2763 /* There is no mechanism for PF to know if VF has stopped IO
2764 * for now, just wait 100 ms for VF to stop IO
2767 set_bit(HCLGE_STATE_CMD_DISABLE
, &hdev
->state
);
2768 set_bit(HNAE3_FLR_DOWN
, &hdev
->flr_state
);
2770 case HNAE3_IMP_RESET
:
2771 reg_val
= hclge_read_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
);
2772 hclge_write_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
,
2773 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B
) | reg_val
);
2779 dev_info(&hdev
->pdev
->dev
, "prepare wait ok\n");
2784 static bool hclge_reset_err_handle(struct hclge_dev
*hdev
, bool is_timeout
)
2786 #define MAX_RESET_FAIL_CNT 5
2787 #define RESET_UPGRADE_DELAY_SEC 10
2789 if (hdev
->reset_pending
) {
2790 dev_info(&hdev
->pdev
->dev
, "Reset pending %lu\n",
2791 hdev
->reset_pending
);
2793 } else if ((hdev
->reset_type
!= HNAE3_IMP_RESET
) &&
2794 (hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
) &
2795 BIT(HCLGE_IMP_RESET_BIT
))) {
2796 dev_info(&hdev
->pdev
->dev
,
2797 "reset failed because IMP Reset is pending\n");
2798 hclge_clear_reset_cause(hdev
);
2800 } else if (hdev
->reset_fail_cnt
< MAX_RESET_FAIL_CNT
) {
2801 hdev
->reset_fail_cnt
++;
2803 set_bit(hdev
->reset_type
, &hdev
->reset_pending
);
2804 dev_info(&hdev
->pdev
->dev
,
2805 "re-schedule to wait for hw reset done\n");
2809 dev_info(&hdev
->pdev
->dev
, "Upgrade reset level\n");
2810 hclge_clear_reset_cause(hdev
);
2811 mod_timer(&hdev
->reset_timer
,
2812 jiffies
+ RESET_UPGRADE_DELAY_SEC
* HZ
);
2817 hclge_clear_reset_cause(hdev
);
2818 dev_err(&hdev
->pdev
->dev
, "Reset fail!\n");
2822 static int hclge_reset_prepare_up(struct hclge_dev
*hdev
)
2826 switch (hdev
->reset_type
) {
2827 case HNAE3_FUNC_RESET
:
2829 case HNAE3_FLR_RESET
:
2830 ret
= hclge_set_all_vf_rst(hdev
, false);
2839 static void hclge_reset(struct hclge_dev
*hdev
)
2841 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
2842 bool is_timeout
= false;
2845 /* Initialize ae_dev reset status as well, in case enet layer wants to
2846 * know if device is undergoing reset
2848 ae_dev
->reset_type
= hdev
->reset_type
;
2849 hdev
->reset_count
++;
2850 /* perform reset of the stack & ae device for a client */
2851 ret
= hclge_notify_roce_client(hdev
, HNAE3_DOWN_CLIENT
);
2855 ret
= hclge_reset_prepare_down(hdev
);
2860 ret
= hclge_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
2862 goto err_reset_lock
;
2866 ret
= hclge_reset_prepare_wait(hdev
);
2870 if (hclge_reset_wait(hdev
)) {
2875 ret
= hclge_notify_roce_client(hdev
, HNAE3_UNINIT_CLIENT
);
2880 ret
= hclge_notify_client(hdev
, HNAE3_UNINIT_CLIENT
);
2882 goto err_reset_lock
;
2884 ret
= hclge_reset_ae_dev(hdev
->ae_dev
);
2886 goto err_reset_lock
;
2888 ret
= hclge_notify_client(hdev
, HNAE3_INIT_CLIENT
);
2890 goto err_reset_lock
;
2892 hclge_clear_reset_cause(hdev
);
2894 ret
= hclge_reset_prepare_up(hdev
);
2896 goto err_reset_lock
;
2898 ret
= hclge_notify_client(hdev
, HNAE3_UP_CLIENT
);
2900 goto err_reset_lock
;
2904 ret
= hclge_notify_roce_client(hdev
, HNAE3_INIT_CLIENT
);
2908 ret
= hclge_notify_roce_client(hdev
, HNAE3_UP_CLIENT
);
2912 hdev
->last_reset_time
= jiffies
;
2913 hdev
->reset_fail_cnt
= 0;
2914 ae_dev
->reset_type
= HNAE3_NONE_RESET
;
2921 if (hclge_reset_err_handle(hdev
, is_timeout
))
2922 hclge_reset_task_schedule(hdev
);
2925 static void hclge_reset_event(struct pci_dev
*pdev
, struct hnae3_handle
*handle
)
2927 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
2928 struct hclge_dev
*hdev
= ae_dev
->priv
;
2930 /* We might end up getting called broadly because of 2 below cases:
2931 * 1. Recoverable error was conveyed through APEI and only way to bring
2932 * normalcy is to reset.
2933 * 2. A new reset request from the stack due to timeout
2935 * For the first case,error event might not have ae handle available.
2936 * check if this is a new reset request and we are not here just because
2937 * last reset attempt did not succeed and watchdog hit us again. We will
2938 * know this if last reset request did not occur very recently (watchdog
2939 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
2940 * In case of new request we reset the "reset level" to PF reset.
2941 * And if it is a repeat reset request of the most recent one then we
2942 * want to make sure we throttle the reset request. Therefore, we will
2943 * not allow it again before 3*HZ times.
2946 handle
= &hdev
->vport
[0].nic
;
2948 if (time_before(jiffies
, (hdev
->last_reset_time
+ 3 * HZ
)))
2950 else if (hdev
->default_reset_request
)
2952 hclge_get_reset_level(hdev
,
2953 &hdev
->default_reset_request
);
2954 else if (time_after(jiffies
, (hdev
->last_reset_time
+ 4 * 5 * HZ
)))
2955 hdev
->reset_level
= HNAE3_FUNC_RESET
;
2957 dev_info(&hdev
->pdev
->dev
, "received reset event , reset type is %d",
2960 /* request reset & schedule reset task */
2961 set_bit(hdev
->reset_level
, &hdev
->reset_request
);
2962 hclge_reset_task_schedule(hdev
);
2964 if (hdev
->reset_level
< HNAE3_GLOBAL_RESET
)
2965 hdev
->reset_level
++;
2968 static void hclge_set_def_reset_request(struct hnae3_ae_dev
*ae_dev
,
2969 enum hnae3_reset_type rst_type
)
2971 struct hclge_dev
*hdev
= ae_dev
->priv
;
2973 set_bit(rst_type
, &hdev
->default_reset_request
);
2976 static void hclge_reset_timer(struct timer_list
*t
)
2978 struct hclge_dev
*hdev
= from_timer(hdev
, t
, reset_timer
);
2980 dev_info(&hdev
->pdev
->dev
,
2981 "triggering global reset in reset timer\n");
2982 set_bit(HNAE3_GLOBAL_RESET
, &hdev
->default_reset_request
);
2983 hclge_reset_event(hdev
->pdev
, NULL
);
2986 static void hclge_reset_subtask(struct hclge_dev
*hdev
)
2988 /* check if there is any ongoing reset in the hardware. This status can
2989 * be checked from reset_pending. If there is then, we need to wait for
2990 * hardware to complete reset.
2991 * a. If we are able to figure out in reasonable time that hardware
2992 * has fully resetted then, we can proceed with driver, client
2994 * b. else, we can come back later to check this status so re-sched
2997 hdev
->last_reset_time
= jiffies
;
2998 hdev
->reset_type
= hclge_get_reset_level(hdev
, &hdev
->reset_pending
);
2999 if (hdev
->reset_type
!= HNAE3_NONE_RESET
)
3002 /* check if we got any *new* reset requests to be honored */
3003 hdev
->reset_type
= hclge_get_reset_level(hdev
, &hdev
->reset_request
);
3004 if (hdev
->reset_type
!= HNAE3_NONE_RESET
)
3005 hclge_do_reset(hdev
);
3007 hdev
->reset_type
= HNAE3_NONE_RESET
;
3010 static void hclge_reset_service_task(struct work_struct
*work
)
3012 struct hclge_dev
*hdev
=
3013 container_of(work
, struct hclge_dev
, rst_service_task
);
3015 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
3018 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
);
3020 hclge_reset_subtask(hdev
);
3022 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
3025 static void hclge_mailbox_service_task(struct work_struct
*work
)
3027 struct hclge_dev
*hdev
=
3028 container_of(work
, struct hclge_dev
, mbx_service_task
);
3030 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
))
3033 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
3035 hclge_mbx_handler(hdev
);
3037 clear_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
);
3040 static void hclge_update_vport_alive(struct hclge_dev
*hdev
)
3044 /* start from vport 1 for PF is always alive */
3045 for (i
= 1; i
< hdev
->num_alloc_vport
; i
++) {
3046 struct hclge_vport
*vport
= &hdev
->vport
[i
];
3048 if (time_after(jiffies
, vport
->last_active_jiffies
+ 8 * HZ
))
3049 clear_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
);
3051 /* If vf is not alive, set to default value */
3052 if (!test_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
))
3053 vport
->mps
= HCLGE_MAC_DEFAULT_FRAME
;
3057 static void hclge_service_task(struct work_struct
*work
)
3059 struct hclge_dev
*hdev
=
3060 container_of(work
, struct hclge_dev
, service_task
);
3062 if (hdev
->hw_stats
.stats_timer
>= HCLGE_STATS_TIMER_INTERVAL
) {
3063 hclge_update_stats_for_all(hdev
);
3064 hdev
->hw_stats
.stats_timer
= 0;
3067 hclge_update_speed_duplex(hdev
);
3068 hclge_update_link_status(hdev
);
3069 hclge_update_vport_alive(hdev
);
3070 hclge_service_complete(hdev
);
3073 struct hclge_vport
*hclge_get_vport(struct hnae3_handle
*handle
)
3075 /* VF handle has no client */
3076 if (!handle
->client
)
3077 return container_of(handle
, struct hclge_vport
, nic
);
3078 else if (handle
->client
->type
== HNAE3_CLIENT_ROCE
)
3079 return container_of(handle
, struct hclge_vport
, roce
);
3081 return container_of(handle
, struct hclge_vport
, nic
);
3084 static int hclge_get_vector(struct hnae3_handle
*handle
, u16 vector_num
,
3085 struct hnae3_vector_info
*vector_info
)
3087 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3088 struct hnae3_vector_info
*vector
= vector_info
;
3089 struct hclge_dev
*hdev
= vport
->back
;
3093 vector_num
= min(hdev
->num_msi_left
, vector_num
);
3095 for (j
= 0; j
< vector_num
; j
++) {
3096 for (i
= 1; i
< hdev
->num_msi
; i
++) {
3097 if (hdev
->vector_status
[i
] == HCLGE_INVALID_VPORT
) {
3098 vector
->vector
= pci_irq_vector(hdev
->pdev
, i
);
3099 vector
->io_addr
= hdev
->hw
.io_base
+
3100 HCLGE_VECTOR_REG_BASE
+
3101 (i
- 1) * HCLGE_VECTOR_REG_OFFSET
+
3103 HCLGE_VECTOR_VF_OFFSET
;
3104 hdev
->vector_status
[i
] = vport
->vport_id
;
3105 hdev
->vector_irq
[i
] = vector
->vector
;
3114 hdev
->num_msi_left
-= alloc
;
3115 hdev
->num_msi_used
+= alloc
;
3120 static int hclge_get_vector_index(struct hclge_dev
*hdev
, int vector
)
3124 for (i
= 0; i
< hdev
->num_msi
; i
++)
3125 if (vector
== hdev
->vector_irq
[i
])
3131 static int hclge_put_vector(struct hnae3_handle
*handle
, int vector
)
3133 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3134 struct hclge_dev
*hdev
= vport
->back
;
3137 vector_id
= hclge_get_vector_index(hdev
, vector
);
3138 if (vector_id
< 0) {
3139 dev_err(&hdev
->pdev
->dev
,
3140 "Get vector index fail. vector_id =%d\n", vector_id
);
3144 hclge_free_vector(hdev
, vector_id
);
3149 static u32
hclge_get_rss_key_size(struct hnae3_handle
*handle
)
3151 return HCLGE_RSS_KEY_SIZE
;
3154 static u32
hclge_get_rss_indir_size(struct hnae3_handle
*handle
)
3156 return HCLGE_RSS_IND_TBL_SIZE
;
3159 static int hclge_set_rss_algo_key(struct hclge_dev
*hdev
,
3160 const u8 hfunc
, const u8
*key
)
3162 struct hclge_rss_config_cmd
*req
;
3163 struct hclge_desc desc
;
3168 req
= (struct hclge_rss_config_cmd
*)desc
.data
;
3170 for (key_offset
= 0; key_offset
< 3; key_offset
++) {
3171 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_GENERIC_CONFIG
,
3174 req
->hash_config
|= (hfunc
& HCLGE_RSS_HASH_ALGO_MASK
);
3175 req
->hash_config
|= (key_offset
<< HCLGE_RSS_HASH_KEY_OFFSET_B
);
3177 if (key_offset
== 2)
3179 HCLGE_RSS_KEY_SIZE
- HCLGE_RSS_HASH_KEY_NUM
* 2;
3181 key_size
= HCLGE_RSS_HASH_KEY_NUM
;
3183 memcpy(req
->hash_key
,
3184 key
+ key_offset
* HCLGE_RSS_HASH_KEY_NUM
, key_size
);
3186 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3188 dev_err(&hdev
->pdev
->dev
,
3189 "Configure RSS config fail, status = %d\n",
3197 static int hclge_set_rss_indir_table(struct hclge_dev
*hdev
, const u8
*indir
)
3199 struct hclge_rss_indirection_table_cmd
*req
;
3200 struct hclge_desc desc
;
3204 req
= (struct hclge_rss_indirection_table_cmd
*)desc
.data
;
3206 for (i
= 0; i
< HCLGE_RSS_CFG_TBL_NUM
; i
++) {
3207 hclge_cmd_setup_basic_desc
3208 (&desc
, HCLGE_OPC_RSS_INDIR_TABLE
, false);
3210 req
->start_table_index
=
3211 cpu_to_le16(i
* HCLGE_RSS_CFG_TBL_SIZE
);
3212 req
->rss_set_bitmap
= cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK
);
3214 for (j
= 0; j
< HCLGE_RSS_CFG_TBL_SIZE
; j
++)
3215 req
->rss_result
[j
] =
3216 indir
[i
* HCLGE_RSS_CFG_TBL_SIZE
+ j
];
3218 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3220 dev_err(&hdev
->pdev
->dev
,
3221 "Configure rss indir table fail,status = %d\n",
3229 static int hclge_set_rss_tc_mode(struct hclge_dev
*hdev
, u16
*tc_valid
,
3230 u16
*tc_size
, u16
*tc_offset
)
3232 struct hclge_rss_tc_mode_cmd
*req
;
3233 struct hclge_desc desc
;
3237 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_TC_MODE
, false);
3238 req
= (struct hclge_rss_tc_mode_cmd
*)desc
.data
;
3240 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
3243 hnae3_set_bit(mode
, HCLGE_RSS_TC_VALID_B
, (tc_valid
[i
] & 0x1));
3244 hnae3_set_field(mode
, HCLGE_RSS_TC_SIZE_M
,
3245 HCLGE_RSS_TC_SIZE_S
, tc_size
[i
]);
3246 hnae3_set_field(mode
, HCLGE_RSS_TC_OFFSET_M
,
3247 HCLGE_RSS_TC_OFFSET_S
, tc_offset
[i
]);
3249 req
->rss_tc_mode
[i
] = cpu_to_le16(mode
);
3252 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3254 dev_err(&hdev
->pdev
->dev
,
3255 "Configure rss tc mode fail, status = %d\n", ret
);
3260 static void hclge_get_rss_type(struct hclge_vport
*vport
)
3262 if (vport
->rss_tuple_sets
.ipv4_tcp_en
||
3263 vport
->rss_tuple_sets
.ipv4_udp_en
||
3264 vport
->rss_tuple_sets
.ipv4_sctp_en
||
3265 vport
->rss_tuple_sets
.ipv6_tcp_en
||
3266 vport
->rss_tuple_sets
.ipv6_udp_en
||
3267 vport
->rss_tuple_sets
.ipv6_sctp_en
)
3268 vport
->nic
.kinfo
.rss_type
= PKT_HASH_TYPE_L4
;
3269 else if (vport
->rss_tuple_sets
.ipv4_fragment_en
||
3270 vport
->rss_tuple_sets
.ipv6_fragment_en
)
3271 vport
->nic
.kinfo
.rss_type
= PKT_HASH_TYPE_L3
;
3273 vport
->nic
.kinfo
.rss_type
= PKT_HASH_TYPE_NONE
;
3276 static int hclge_set_rss_input_tuple(struct hclge_dev
*hdev
)
3278 struct hclge_rss_input_tuple_cmd
*req
;
3279 struct hclge_desc desc
;
3282 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_INPUT_TUPLE
, false);
3284 req
= (struct hclge_rss_input_tuple_cmd
*)desc
.data
;
3286 /* Get the tuple cfg from pf */
3287 req
->ipv4_tcp_en
= hdev
->vport
[0].rss_tuple_sets
.ipv4_tcp_en
;
3288 req
->ipv4_udp_en
= hdev
->vport
[0].rss_tuple_sets
.ipv4_udp_en
;
3289 req
->ipv4_sctp_en
= hdev
->vport
[0].rss_tuple_sets
.ipv4_sctp_en
;
3290 req
->ipv4_fragment_en
= hdev
->vport
[0].rss_tuple_sets
.ipv4_fragment_en
;
3291 req
->ipv6_tcp_en
= hdev
->vport
[0].rss_tuple_sets
.ipv6_tcp_en
;
3292 req
->ipv6_udp_en
= hdev
->vport
[0].rss_tuple_sets
.ipv6_udp_en
;
3293 req
->ipv6_sctp_en
= hdev
->vport
[0].rss_tuple_sets
.ipv6_sctp_en
;
3294 req
->ipv6_fragment_en
= hdev
->vport
[0].rss_tuple_sets
.ipv6_fragment_en
;
3295 hclge_get_rss_type(&hdev
->vport
[0]);
3296 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3298 dev_err(&hdev
->pdev
->dev
,
3299 "Configure rss input fail, status = %d\n", ret
);
3303 static int hclge_get_rss(struct hnae3_handle
*handle
, u32
*indir
,
3306 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3309 /* Get hash algorithm */
3311 switch (vport
->rss_algo
) {
3312 case HCLGE_RSS_HASH_ALGO_TOEPLITZ
:
3313 *hfunc
= ETH_RSS_HASH_TOP
;
3315 case HCLGE_RSS_HASH_ALGO_SIMPLE
:
3316 *hfunc
= ETH_RSS_HASH_XOR
;
3319 *hfunc
= ETH_RSS_HASH_UNKNOWN
;
3324 /* Get the RSS Key required by the user */
3326 memcpy(key
, vport
->rss_hash_key
, HCLGE_RSS_KEY_SIZE
);
3328 /* Get indirect table */
3330 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
3331 indir
[i
] = vport
->rss_indirection_tbl
[i
];
3336 static int hclge_set_rss(struct hnae3_handle
*handle
, const u32
*indir
,
3337 const u8
*key
, const u8 hfunc
)
3339 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3340 struct hclge_dev
*hdev
= vport
->back
;
3344 /* Set the RSS Hash Key if specififed by the user */
3347 case ETH_RSS_HASH_TOP
:
3348 hash_algo
= HCLGE_RSS_HASH_ALGO_TOEPLITZ
;
3350 case ETH_RSS_HASH_XOR
:
3351 hash_algo
= HCLGE_RSS_HASH_ALGO_SIMPLE
;
3353 case ETH_RSS_HASH_NO_CHANGE
:
3354 hash_algo
= vport
->rss_algo
;
3360 ret
= hclge_set_rss_algo_key(hdev
, hash_algo
, key
);
3364 /* Update the shadow RSS key with user specified qids */
3365 memcpy(vport
->rss_hash_key
, key
, HCLGE_RSS_KEY_SIZE
);
3366 vport
->rss_algo
= hash_algo
;
3369 /* Update the shadow RSS table with user specified qids */
3370 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
3371 vport
->rss_indirection_tbl
[i
] = indir
[i
];
3373 /* Update the hardware */
3374 return hclge_set_rss_indir_table(hdev
, vport
->rss_indirection_tbl
);
3377 static u8
hclge_get_rss_hash_bits(struct ethtool_rxnfc
*nfc
)
3379 u8 hash_sets
= nfc
->data
& RXH_L4_B_0_1
? HCLGE_S_PORT_BIT
: 0;
3381 if (nfc
->data
& RXH_L4_B_2_3
)
3382 hash_sets
|= HCLGE_D_PORT_BIT
;
3384 hash_sets
&= ~HCLGE_D_PORT_BIT
;
3386 if (nfc
->data
& RXH_IP_SRC
)
3387 hash_sets
|= HCLGE_S_IP_BIT
;
3389 hash_sets
&= ~HCLGE_S_IP_BIT
;
3391 if (nfc
->data
& RXH_IP_DST
)
3392 hash_sets
|= HCLGE_D_IP_BIT
;
3394 hash_sets
&= ~HCLGE_D_IP_BIT
;
3396 if (nfc
->flow_type
== SCTP_V4_FLOW
|| nfc
->flow_type
== SCTP_V6_FLOW
)
3397 hash_sets
|= HCLGE_V_TAG_BIT
;
3402 static int hclge_set_rss_tuple(struct hnae3_handle
*handle
,
3403 struct ethtool_rxnfc
*nfc
)
3405 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3406 struct hclge_dev
*hdev
= vport
->back
;
3407 struct hclge_rss_input_tuple_cmd
*req
;
3408 struct hclge_desc desc
;
3412 if (nfc
->data
& ~(RXH_IP_SRC
| RXH_IP_DST
|
3413 RXH_L4_B_0_1
| RXH_L4_B_2_3
))
3416 req
= (struct hclge_rss_input_tuple_cmd
*)desc
.data
;
3417 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_INPUT_TUPLE
, false);
3419 req
->ipv4_tcp_en
= vport
->rss_tuple_sets
.ipv4_tcp_en
;
3420 req
->ipv4_udp_en
= vport
->rss_tuple_sets
.ipv4_udp_en
;
3421 req
->ipv4_sctp_en
= vport
->rss_tuple_sets
.ipv4_sctp_en
;
3422 req
->ipv4_fragment_en
= vport
->rss_tuple_sets
.ipv4_fragment_en
;
3423 req
->ipv6_tcp_en
= vport
->rss_tuple_sets
.ipv6_tcp_en
;
3424 req
->ipv6_udp_en
= vport
->rss_tuple_sets
.ipv6_udp_en
;
3425 req
->ipv6_sctp_en
= vport
->rss_tuple_sets
.ipv6_sctp_en
;
3426 req
->ipv6_fragment_en
= vport
->rss_tuple_sets
.ipv6_fragment_en
;
3428 tuple_sets
= hclge_get_rss_hash_bits(nfc
);
3429 switch (nfc
->flow_type
) {
3431 req
->ipv4_tcp_en
= tuple_sets
;
3434 req
->ipv6_tcp_en
= tuple_sets
;
3437 req
->ipv4_udp_en
= tuple_sets
;
3440 req
->ipv6_udp_en
= tuple_sets
;
3443 req
->ipv4_sctp_en
= tuple_sets
;
3446 if ((nfc
->data
& RXH_L4_B_0_1
) ||
3447 (nfc
->data
& RXH_L4_B_2_3
))
3450 req
->ipv6_sctp_en
= tuple_sets
;
3453 req
->ipv4_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3456 req
->ipv6_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3462 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3464 dev_err(&hdev
->pdev
->dev
,
3465 "Set rss tuple fail, status = %d\n", ret
);
3469 vport
->rss_tuple_sets
.ipv4_tcp_en
= req
->ipv4_tcp_en
;
3470 vport
->rss_tuple_sets
.ipv4_udp_en
= req
->ipv4_udp_en
;
3471 vport
->rss_tuple_sets
.ipv4_sctp_en
= req
->ipv4_sctp_en
;
3472 vport
->rss_tuple_sets
.ipv4_fragment_en
= req
->ipv4_fragment_en
;
3473 vport
->rss_tuple_sets
.ipv6_tcp_en
= req
->ipv6_tcp_en
;
3474 vport
->rss_tuple_sets
.ipv6_udp_en
= req
->ipv6_udp_en
;
3475 vport
->rss_tuple_sets
.ipv6_sctp_en
= req
->ipv6_sctp_en
;
3476 vport
->rss_tuple_sets
.ipv6_fragment_en
= req
->ipv6_fragment_en
;
3477 hclge_get_rss_type(vport
);
3481 static int hclge_get_rss_tuple(struct hnae3_handle
*handle
,
3482 struct ethtool_rxnfc
*nfc
)
3484 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3489 switch (nfc
->flow_type
) {
3491 tuple_sets
= vport
->rss_tuple_sets
.ipv4_tcp_en
;
3494 tuple_sets
= vport
->rss_tuple_sets
.ipv4_udp_en
;
3497 tuple_sets
= vport
->rss_tuple_sets
.ipv6_tcp_en
;
3500 tuple_sets
= vport
->rss_tuple_sets
.ipv6_udp_en
;
3503 tuple_sets
= vport
->rss_tuple_sets
.ipv4_sctp_en
;
3506 tuple_sets
= vport
->rss_tuple_sets
.ipv6_sctp_en
;
3510 tuple_sets
= HCLGE_S_IP_BIT
| HCLGE_D_IP_BIT
;
3519 if (tuple_sets
& HCLGE_D_PORT_BIT
)
3520 nfc
->data
|= RXH_L4_B_2_3
;
3521 if (tuple_sets
& HCLGE_S_PORT_BIT
)
3522 nfc
->data
|= RXH_L4_B_0_1
;
3523 if (tuple_sets
& HCLGE_D_IP_BIT
)
3524 nfc
->data
|= RXH_IP_DST
;
3525 if (tuple_sets
& HCLGE_S_IP_BIT
)
3526 nfc
->data
|= RXH_IP_SRC
;
3531 static int hclge_get_tc_size(struct hnae3_handle
*handle
)
3533 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3534 struct hclge_dev
*hdev
= vport
->back
;
3536 return hdev
->rss_size_max
;
3539 int hclge_rss_init_hw(struct hclge_dev
*hdev
)
3541 struct hclge_vport
*vport
= hdev
->vport
;
3542 u8
*rss_indir
= vport
[0].rss_indirection_tbl
;
3543 u16 rss_size
= vport
[0].alloc_rss_size
;
3544 u8
*key
= vport
[0].rss_hash_key
;
3545 u8 hfunc
= vport
[0].rss_algo
;
3546 u16 tc_offset
[HCLGE_MAX_TC_NUM
];
3547 u16 tc_valid
[HCLGE_MAX_TC_NUM
];
3548 u16 tc_size
[HCLGE_MAX_TC_NUM
];
3552 ret
= hclge_set_rss_indir_table(hdev
, rss_indir
);
3556 ret
= hclge_set_rss_algo_key(hdev
, hfunc
, key
);
3560 ret
= hclge_set_rss_input_tuple(hdev
);
3564 /* Each TC have the same queue size, and tc_size set to hardware is
3565 * the log2 of roundup power of two of rss_size, the acutal queue
3566 * size is limited by indirection table.
3568 if (rss_size
> HCLGE_RSS_TC_SIZE_7
|| rss_size
== 0) {
3569 dev_err(&hdev
->pdev
->dev
,
3570 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3575 roundup_size
= roundup_pow_of_two(rss_size
);
3576 roundup_size
= ilog2(roundup_size
);
3578 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
3581 if (!(hdev
->hw_tc_map
& BIT(i
)))
3585 tc_size
[i
] = roundup_size
;
3586 tc_offset
[i
] = rss_size
* i
;
3589 return hclge_set_rss_tc_mode(hdev
, tc_valid
, tc_size
, tc_offset
);
3592 void hclge_rss_indir_init_cfg(struct hclge_dev
*hdev
)
3594 struct hclge_vport
*vport
= hdev
->vport
;
3597 for (j
= 0; j
< hdev
->num_vmdq_vport
+ 1; j
++) {
3598 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
3599 vport
[j
].rss_indirection_tbl
[i
] =
3600 i
% vport
[j
].alloc_rss_size
;
3604 static void hclge_rss_init_cfg(struct hclge_dev
*hdev
)
3606 struct hclge_vport
*vport
= hdev
->vport
;
3609 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
3610 vport
[i
].rss_tuple_sets
.ipv4_tcp_en
=
3611 HCLGE_RSS_INPUT_TUPLE_OTHER
;
3612 vport
[i
].rss_tuple_sets
.ipv4_udp_en
=
3613 HCLGE_RSS_INPUT_TUPLE_OTHER
;
3614 vport
[i
].rss_tuple_sets
.ipv4_sctp_en
=
3615 HCLGE_RSS_INPUT_TUPLE_SCTP
;
3616 vport
[i
].rss_tuple_sets
.ipv4_fragment_en
=
3617 HCLGE_RSS_INPUT_TUPLE_OTHER
;
3618 vport
[i
].rss_tuple_sets
.ipv6_tcp_en
=
3619 HCLGE_RSS_INPUT_TUPLE_OTHER
;
3620 vport
[i
].rss_tuple_sets
.ipv6_udp_en
=
3621 HCLGE_RSS_INPUT_TUPLE_OTHER
;
3622 vport
[i
].rss_tuple_sets
.ipv6_sctp_en
=
3623 HCLGE_RSS_INPUT_TUPLE_SCTP
;
3624 vport
[i
].rss_tuple_sets
.ipv6_fragment_en
=
3625 HCLGE_RSS_INPUT_TUPLE_OTHER
;
3627 vport
[i
].rss_algo
= HCLGE_RSS_HASH_ALGO_TOEPLITZ
;
3629 netdev_rss_key_fill(vport
[i
].rss_hash_key
, HCLGE_RSS_KEY_SIZE
);
3632 hclge_rss_indir_init_cfg(hdev
);
3635 int hclge_bind_ring_with_vector(struct hclge_vport
*vport
,
3636 int vector_id
, bool en
,
3637 struct hnae3_ring_chain_node
*ring_chain
)
3639 struct hclge_dev
*hdev
= vport
->back
;
3640 struct hnae3_ring_chain_node
*node
;
3641 struct hclge_desc desc
;
3642 struct hclge_ctrl_vector_chain_cmd
*req
3643 = (struct hclge_ctrl_vector_chain_cmd
*)desc
.data
;
3644 enum hclge_cmd_status status
;
3645 enum hclge_opcode_type op
;
3646 u16 tqp_type_and_id
;
3649 op
= en
? HCLGE_OPC_ADD_RING_TO_VECTOR
: HCLGE_OPC_DEL_RING_TO_VECTOR
;
3650 hclge_cmd_setup_basic_desc(&desc
, op
, false);
3651 req
->int_vector_id
= vector_id
;
3654 for (node
= ring_chain
; node
; node
= node
->next
) {
3655 tqp_type_and_id
= le16_to_cpu(req
->tqp_type_and_id
[i
]);
3656 hnae3_set_field(tqp_type_and_id
, HCLGE_INT_TYPE_M
,
3658 hnae3_get_bit(node
->flag
, HNAE3_RING_TYPE_B
));
3659 hnae3_set_field(tqp_type_and_id
, HCLGE_TQP_ID_M
,
3660 HCLGE_TQP_ID_S
, node
->tqp_index
);
3661 hnae3_set_field(tqp_type_and_id
, HCLGE_INT_GL_IDX_M
,
3663 hnae3_get_field(node
->int_gl_idx
,
3664 HNAE3_RING_GL_IDX_M
,
3665 HNAE3_RING_GL_IDX_S
));
3666 req
->tqp_type_and_id
[i
] = cpu_to_le16(tqp_type_and_id
);
3667 if (++i
>= HCLGE_VECTOR_ELEMENTS_PER_CMD
) {
3668 req
->int_cause_num
= HCLGE_VECTOR_ELEMENTS_PER_CMD
;
3669 req
->vfid
= vport
->vport_id
;
3671 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3673 dev_err(&hdev
->pdev
->dev
,
3674 "Map TQP fail, status is %d.\n",
3680 hclge_cmd_setup_basic_desc(&desc
,
3683 req
->int_vector_id
= vector_id
;
3688 req
->int_cause_num
= i
;
3689 req
->vfid
= vport
->vport_id
;
3690 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3692 dev_err(&hdev
->pdev
->dev
,
3693 "Map TQP fail, status is %d.\n", status
);
3701 static int hclge_map_ring_to_vector(struct hnae3_handle
*handle
,
3703 struct hnae3_ring_chain_node
*ring_chain
)
3705 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3706 struct hclge_dev
*hdev
= vport
->back
;
3709 vector_id
= hclge_get_vector_index(hdev
, vector
);
3710 if (vector_id
< 0) {
3711 dev_err(&hdev
->pdev
->dev
,
3712 "Get vector index fail. vector_id =%d\n", vector_id
);
3716 return hclge_bind_ring_with_vector(vport
, vector_id
, true, ring_chain
);
3719 static int hclge_unmap_ring_frm_vector(struct hnae3_handle
*handle
,
3721 struct hnae3_ring_chain_node
*ring_chain
)
3723 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3724 struct hclge_dev
*hdev
= vport
->back
;
3727 if (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
3730 vector_id
= hclge_get_vector_index(hdev
, vector
);
3731 if (vector_id
< 0) {
3732 dev_err(&handle
->pdev
->dev
,
3733 "Get vector index fail. ret =%d\n", vector_id
);
3737 ret
= hclge_bind_ring_with_vector(vport
, vector_id
, false, ring_chain
);
3739 dev_err(&handle
->pdev
->dev
,
3740 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3747 int hclge_cmd_set_promisc_mode(struct hclge_dev
*hdev
,
3748 struct hclge_promisc_param
*param
)
3750 struct hclge_promisc_cfg_cmd
*req
;
3751 struct hclge_desc desc
;
3754 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_PROMISC_MODE
, false);
3756 req
= (struct hclge_promisc_cfg_cmd
*)desc
.data
;
3757 req
->vf_id
= param
->vf_id
;
3759 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3760 * pdev revision(0x20), new revision support them. The
3761 * value of this two fields will not return error when driver
3762 * send command to fireware in revision(0x20).
3764 req
->flag
= (param
->enable
<< HCLGE_PROMISC_EN_B
) |
3765 HCLGE_PROMISC_TX_EN_B
| HCLGE_PROMISC_RX_EN_B
;
3767 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3769 dev_err(&hdev
->pdev
->dev
,
3770 "Set promisc mode fail, status is %d.\n", ret
);
3775 void hclge_promisc_param_init(struct hclge_promisc_param
*param
, bool en_uc
,
3776 bool en_mc
, bool en_bc
, int vport_id
)
3781 memset(param
, 0, sizeof(struct hclge_promisc_param
));
3783 param
->enable
= HCLGE_PROMISC_EN_UC
;
3785 param
->enable
|= HCLGE_PROMISC_EN_MC
;
3787 param
->enable
|= HCLGE_PROMISC_EN_BC
;
3788 param
->vf_id
= vport_id
;
3791 static int hclge_set_promisc_mode(struct hnae3_handle
*handle
, bool en_uc_pmc
,
3794 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3795 struct hclge_dev
*hdev
= vport
->back
;
3796 struct hclge_promisc_param param
;
3798 hclge_promisc_param_init(¶m
, en_uc_pmc
, en_mc_pmc
, true,
3800 return hclge_cmd_set_promisc_mode(hdev
, ¶m
);
3803 static int hclge_get_fd_mode(struct hclge_dev
*hdev
, u8
*fd_mode
)
3805 struct hclge_get_fd_mode_cmd
*req
;
3806 struct hclge_desc desc
;
3809 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_FD_MODE_CTRL
, true);
3811 req
= (struct hclge_get_fd_mode_cmd
*)desc
.data
;
3813 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3815 dev_err(&hdev
->pdev
->dev
, "get fd mode fail, ret=%d\n", ret
);
3819 *fd_mode
= req
->mode
;
3824 static int hclge_get_fd_allocation(struct hclge_dev
*hdev
,
3825 u32
*stage1_entry_num
,
3826 u32
*stage2_entry_num
,
3827 u16
*stage1_counter_num
,
3828 u16
*stage2_counter_num
)
3830 struct hclge_get_fd_allocation_cmd
*req
;
3831 struct hclge_desc desc
;
3834 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_FD_GET_ALLOCATION
, true);
3836 req
= (struct hclge_get_fd_allocation_cmd
*)desc
.data
;
3838 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3840 dev_err(&hdev
->pdev
->dev
, "query fd allocation fail, ret=%d\n",
3845 *stage1_entry_num
= le32_to_cpu(req
->stage1_entry_num
);
3846 *stage2_entry_num
= le32_to_cpu(req
->stage2_entry_num
);
3847 *stage1_counter_num
= le16_to_cpu(req
->stage1_counter_num
);
3848 *stage2_counter_num
= le16_to_cpu(req
->stage2_counter_num
);
3853 static int hclge_set_fd_key_config(struct hclge_dev
*hdev
, int stage_num
)
3855 struct hclge_set_fd_key_config_cmd
*req
;
3856 struct hclge_fd_key_cfg
*stage
;
3857 struct hclge_desc desc
;
3860 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_FD_KEY_CONFIG
, false);
3862 req
= (struct hclge_set_fd_key_config_cmd
*)desc
.data
;
3863 stage
= &hdev
->fd_cfg
.key_cfg
[stage_num
];
3864 req
->stage
= stage_num
;
3865 req
->key_select
= stage
->key_sel
;
3866 req
->inner_sipv6_word_en
= stage
->inner_sipv6_word_en
;
3867 req
->inner_dipv6_word_en
= stage
->inner_dipv6_word_en
;
3868 req
->outer_sipv6_word_en
= stage
->outer_sipv6_word_en
;
3869 req
->outer_dipv6_word_en
= stage
->outer_dipv6_word_en
;
3870 req
->tuple_mask
= cpu_to_le32(~stage
->tuple_active
);
3871 req
->meta_data_mask
= cpu_to_le32(~stage
->meta_data_active
);
3873 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3875 dev_err(&hdev
->pdev
->dev
, "set fd key fail, ret=%d\n", ret
);
3880 static int hclge_init_fd_config(struct hclge_dev
*hdev
)
3882 #define LOW_2_WORDS 0x03
3883 struct hclge_fd_key_cfg
*key_cfg
;
3886 if (!hnae3_dev_fd_supported(hdev
))
3889 ret
= hclge_get_fd_mode(hdev
, &hdev
->fd_cfg
.fd_mode
);
3893 switch (hdev
->fd_cfg
.fd_mode
) {
3894 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1
:
3895 hdev
->fd_cfg
.max_key_length
= MAX_KEY_LENGTH
;
3897 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1
:
3898 hdev
->fd_cfg
.max_key_length
= MAX_KEY_LENGTH
/ 2;
3901 dev_err(&hdev
->pdev
->dev
,
3902 "Unsupported flow director mode %d\n",
3903 hdev
->fd_cfg
.fd_mode
);
3907 hdev
->fd_cfg
.fd_en
= true;
3908 hdev
->fd_cfg
.proto_support
=
3909 TCP_V4_FLOW
| UDP_V4_FLOW
| SCTP_V4_FLOW
| TCP_V6_FLOW
|
3910 UDP_V6_FLOW
| SCTP_V6_FLOW
| IPV4_USER_FLOW
| IPV6_USER_FLOW
;
3911 key_cfg
= &hdev
->fd_cfg
.key_cfg
[HCLGE_FD_STAGE_1
];
3912 key_cfg
->key_sel
= HCLGE_FD_KEY_BASE_ON_TUPLE
,
3913 key_cfg
->inner_sipv6_word_en
= LOW_2_WORDS
;
3914 key_cfg
->inner_dipv6_word_en
= LOW_2_WORDS
;
3915 key_cfg
->outer_sipv6_word_en
= 0;
3916 key_cfg
->outer_dipv6_word_en
= 0;
3918 key_cfg
->tuple_active
= BIT(INNER_VLAN_TAG_FST
) | BIT(INNER_ETH_TYPE
) |
3919 BIT(INNER_IP_PROTO
) | BIT(INNER_IP_TOS
) |
3920 BIT(INNER_SRC_IP
) | BIT(INNER_DST_IP
) |
3921 BIT(INNER_SRC_PORT
) | BIT(INNER_DST_PORT
);
3923 /* If use max 400bit key, we can support tuples for ether type */
3924 if (hdev
->fd_cfg
.max_key_length
== MAX_KEY_LENGTH
) {
3925 hdev
->fd_cfg
.proto_support
|= ETHER_FLOW
;
3926 key_cfg
->tuple_active
|=
3927 BIT(INNER_DST_MAC
) | BIT(INNER_SRC_MAC
);
3930 /* roce_type is used to filter roce frames
3931 * dst_vport is used to specify the rule
3933 key_cfg
->meta_data_active
= BIT(ROCE_TYPE
) | BIT(DST_VPORT
);
3935 ret
= hclge_get_fd_allocation(hdev
,
3936 &hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
],
3937 &hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_2
],
3938 &hdev
->fd_cfg
.cnt_num
[HCLGE_FD_STAGE_1
],
3939 &hdev
->fd_cfg
.cnt_num
[HCLGE_FD_STAGE_2
]);
3943 return hclge_set_fd_key_config(hdev
, HCLGE_FD_STAGE_1
);
3946 static int hclge_fd_tcam_config(struct hclge_dev
*hdev
, u8 stage
, bool sel_x
,
3947 int loc
, u8
*key
, bool is_add
)
3949 struct hclge_fd_tcam_config_1_cmd
*req1
;
3950 struct hclge_fd_tcam_config_2_cmd
*req2
;
3951 struct hclge_fd_tcam_config_3_cmd
*req3
;
3952 struct hclge_desc desc
[3];
3955 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_FD_TCAM_OP
, false);
3956 desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3957 hclge_cmd_setup_basic_desc(&desc
[1], HCLGE_OPC_FD_TCAM_OP
, false);
3958 desc
[1].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3959 hclge_cmd_setup_basic_desc(&desc
[2], HCLGE_OPC_FD_TCAM_OP
, false);
3961 req1
= (struct hclge_fd_tcam_config_1_cmd
*)desc
[0].data
;
3962 req2
= (struct hclge_fd_tcam_config_2_cmd
*)desc
[1].data
;
3963 req3
= (struct hclge_fd_tcam_config_3_cmd
*)desc
[2].data
;
3965 req1
->stage
= stage
;
3966 req1
->xy_sel
= sel_x
? 1 : 0;
3967 hnae3_set_bit(req1
->port_info
, HCLGE_FD_EPORT_SW_EN_B
, 0);
3968 req1
->index
= cpu_to_le32(loc
);
3969 req1
->entry_vld
= sel_x
? is_add
: 0;
3972 memcpy(req1
->tcam_data
, &key
[0], sizeof(req1
->tcam_data
));
3973 memcpy(req2
->tcam_data
, &key
[sizeof(req1
->tcam_data
)],
3974 sizeof(req2
->tcam_data
));
3975 memcpy(req3
->tcam_data
, &key
[sizeof(req1
->tcam_data
) +
3976 sizeof(req2
->tcam_data
)], sizeof(req3
->tcam_data
));
3979 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 3);
3981 dev_err(&hdev
->pdev
->dev
,
3982 "config tcam key fail, ret=%d\n",
3988 static int hclge_fd_ad_config(struct hclge_dev
*hdev
, u8 stage
, int loc
,
3989 struct hclge_fd_ad_data
*action
)
3991 struct hclge_fd_ad_config_cmd
*req
;
3992 struct hclge_desc desc
;
3996 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_FD_AD_OP
, false);
3998 req
= (struct hclge_fd_ad_config_cmd
*)desc
.data
;
3999 req
->index
= cpu_to_le32(loc
);
4002 hnae3_set_bit(ad_data
, HCLGE_FD_AD_WR_RULE_ID_B
,
4003 action
->write_rule_id_to_bd
);
4004 hnae3_set_field(ad_data
, HCLGE_FD_AD_RULE_ID_M
, HCLGE_FD_AD_RULE_ID_S
,
4007 hnae3_set_bit(ad_data
, HCLGE_FD_AD_DROP_B
, action
->drop_packet
);
4008 hnae3_set_bit(ad_data
, HCLGE_FD_AD_DIRECT_QID_B
,
4009 action
->forward_to_direct_queue
);
4010 hnae3_set_field(ad_data
, HCLGE_FD_AD_QID_M
, HCLGE_FD_AD_QID_S
,
4012 hnae3_set_bit(ad_data
, HCLGE_FD_AD_USE_COUNTER_B
, action
->use_counter
);
4013 hnae3_set_field(ad_data
, HCLGE_FD_AD_COUNTER_NUM_M
,
4014 HCLGE_FD_AD_COUNTER_NUM_S
, action
->counter_id
);
4015 hnae3_set_bit(ad_data
, HCLGE_FD_AD_NXT_STEP_B
, action
->use_next_stage
);
4016 hnae3_set_field(ad_data
, HCLGE_FD_AD_NXT_KEY_M
, HCLGE_FD_AD_NXT_KEY_S
,
4017 action
->counter_id
);
4019 req
->ad_data
= cpu_to_le64(ad_data
);
4020 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4022 dev_err(&hdev
->pdev
->dev
, "fd ad config fail, ret=%d\n", ret
);
4027 static bool hclge_fd_convert_tuple(u32 tuple_bit
, u8
*key_x
, u8
*key_y
,
4028 struct hclge_fd_rule
*rule
)
4030 u16 tmp_x_s
, tmp_y_s
;
4031 u32 tmp_x_l
, tmp_y_l
;
4034 if (rule
->unused_tuple
& tuple_bit
)
4037 switch (tuple_bit
) {
4040 case BIT(INNER_DST_MAC
):
4041 for (i
= 0; i
< 6; i
++) {
4042 calc_x(key_x
[5 - i
], rule
->tuples
.dst_mac
[i
],
4043 rule
->tuples_mask
.dst_mac
[i
]);
4044 calc_y(key_y
[5 - i
], rule
->tuples
.dst_mac
[i
],
4045 rule
->tuples_mask
.dst_mac
[i
]);
4049 case BIT(INNER_SRC_MAC
):
4050 for (i
= 0; i
< 6; i
++) {
4051 calc_x(key_x
[5 - i
], rule
->tuples
.src_mac
[i
],
4052 rule
->tuples
.src_mac
[i
]);
4053 calc_y(key_y
[5 - i
], rule
->tuples
.src_mac
[i
],
4054 rule
->tuples
.src_mac
[i
]);
4058 case BIT(INNER_VLAN_TAG_FST
):
4059 calc_x(tmp_x_s
, rule
->tuples
.vlan_tag1
,
4060 rule
->tuples_mask
.vlan_tag1
);
4061 calc_y(tmp_y_s
, rule
->tuples
.vlan_tag1
,
4062 rule
->tuples_mask
.vlan_tag1
);
4063 *(__le16
*)key_x
= cpu_to_le16(tmp_x_s
);
4064 *(__le16
*)key_y
= cpu_to_le16(tmp_y_s
);
4067 case BIT(INNER_ETH_TYPE
):
4068 calc_x(tmp_x_s
, rule
->tuples
.ether_proto
,
4069 rule
->tuples_mask
.ether_proto
);
4070 calc_y(tmp_y_s
, rule
->tuples
.ether_proto
,
4071 rule
->tuples_mask
.ether_proto
);
4072 *(__le16
*)key_x
= cpu_to_le16(tmp_x_s
);
4073 *(__le16
*)key_y
= cpu_to_le16(tmp_y_s
);
4076 case BIT(INNER_IP_TOS
):
4077 calc_x(*key_x
, rule
->tuples
.ip_tos
, rule
->tuples_mask
.ip_tos
);
4078 calc_y(*key_y
, rule
->tuples
.ip_tos
, rule
->tuples_mask
.ip_tos
);
4081 case BIT(INNER_IP_PROTO
):
4082 calc_x(*key_x
, rule
->tuples
.ip_proto
,
4083 rule
->tuples_mask
.ip_proto
);
4084 calc_y(*key_y
, rule
->tuples
.ip_proto
,
4085 rule
->tuples_mask
.ip_proto
);
4088 case BIT(INNER_SRC_IP
):
4089 calc_x(tmp_x_l
, rule
->tuples
.src_ip
[3],
4090 rule
->tuples_mask
.src_ip
[3]);
4091 calc_y(tmp_y_l
, rule
->tuples
.src_ip
[3],
4092 rule
->tuples_mask
.src_ip
[3]);
4093 *(__le32
*)key_x
= cpu_to_le32(tmp_x_l
);
4094 *(__le32
*)key_y
= cpu_to_le32(tmp_y_l
);
4097 case BIT(INNER_DST_IP
):
4098 calc_x(tmp_x_l
, rule
->tuples
.dst_ip
[3],
4099 rule
->tuples_mask
.dst_ip
[3]);
4100 calc_y(tmp_y_l
, rule
->tuples
.dst_ip
[3],
4101 rule
->tuples_mask
.dst_ip
[3]);
4102 *(__le32
*)key_x
= cpu_to_le32(tmp_x_l
);
4103 *(__le32
*)key_y
= cpu_to_le32(tmp_y_l
);
4106 case BIT(INNER_SRC_PORT
):
4107 calc_x(tmp_x_s
, rule
->tuples
.src_port
,
4108 rule
->tuples_mask
.src_port
);
4109 calc_y(tmp_y_s
, rule
->tuples
.src_port
,
4110 rule
->tuples_mask
.src_port
);
4111 *(__le16
*)key_x
= cpu_to_le16(tmp_x_s
);
4112 *(__le16
*)key_y
= cpu_to_le16(tmp_y_s
);
4115 case BIT(INNER_DST_PORT
):
4116 calc_x(tmp_x_s
, rule
->tuples
.dst_port
,
4117 rule
->tuples_mask
.dst_port
);
4118 calc_y(tmp_y_s
, rule
->tuples
.dst_port
,
4119 rule
->tuples_mask
.dst_port
);
4120 *(__le16
*)key_x
= cpu_to_le16(tmp_x_s
);
4121 *(__le16
*)key_y
= cpu_to_le16(tmp_y_s
);
4129 static u32
hclge_get_port_number(enum HLCGE_PORT_TYPE port_type
, u8 pf_id
,
4130 u8 vf_id
, u8 network_port_id
)
4132 u32 port_number
= 0;
4134 if (port_type
== HOST_PORT
) {
4135 hnae3_set_field(port_number
, HCLGE_PF_ID_M
, HCLGE_PF_ID_S
,
4137 hnae3_set_field(port_number
, HCLGE_VF_ID_M
, HCLGE_VF_ID_S
,
4139 hnae3_set_bit(port_number
, HCLGE_PORT_TYPE_B
, HOST_PORT
);
4141 hnae3_set_field(port_number
, HCLGE_NETWORK_PORT_ID_M
,
4142 HCLGE_NETWORK_PORT_ID_S
, network_port_id
);
4143 hnae3_set_bit(port_number
, HCLGE_PORT_TYPE_B
, NETWORK_PORT
);
4149 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg
*key_cfg
,
4150 __le32
*key_x
, __le32
*key_y
,
4151 struct hclge_fd_rule
*rule
)
4153 u32 tuple_bit
, meta_data
= 0, tmp_x
, tmp_y
, port_number
;
4154 u8 cur_pos
= 0, tuple_size
, shift_bits
;
4157 for (i
= 0; i
< MAX_META_DATA
; i
++) {
4158 tuple_size
= meta_data_key_info
[i
].key_length
;
4159 tuple_bit
= key_cfg
->meta_data_active
& BIT(i
);
4161 switch (tuple_bit
) {
4162 case BIT(ROCE_TYPE
):
4163 hnae3_set_bit(meta_data
, cur_pos
, NIC_PACKET
);
4164 cur_pos
+= tuple_size
;
4166 case BIT(DST_VPORT
):
4167 port_number
= hclge_get_port_number(HOST_PORT
, 0,
4169 hnae3_set_field(meta_data
,
4170 GENMASK(cur_pos
+ tuple_size
, cur_pos
),
4171 cur_pos
, port_number
);
4172 cur_pos
+= tuple_size
;
4179 calc_x(tmp_x
, meta_data
, 0xFFFFFFFF);
4180 calc_y(tmp_y
, meta_data
, 0xFFFFFFFF);
4181 shift_bits
= sizeof(meta_data
) * 8 - cur_pos
;
4183 *key_x
= cpu_to_le32(tmp_x
<< shift_bits
);
4184 *key_y
= cpu_to_le32(tmp_y
<< shift_bits
);
4187 /* A complete key is combined with meta data key and tuple key.
4188 * Meta data key is stored at the MSB region, and tuple key is stored at
4189 * the LSB region, unused bits will be filled 0.
4191 static int hclge_config_key(struct hclge_dev
*hdev
, u8 stage
,
4192 struct hclge_fd_rule
*rule
)
4194 struct hclge_fd_key_cfg
*key_cfg
= &hdev
->fd_cfg
.key_cfg
[stage
];
4195 u8 key_x
[MAX_KEY_BYTES
], key_y
[MAX_KEY_BYTES
];
4196 u8
*cur_key_x
, *cur_key_y
;
4197 int i
, ret
, tuple_size
;
4198 u8 meta_data_region
;
4200 memset(key_x
, 0, sizeof(key_x
));
4201 memset(key_y
, 0, sizeof(key_y
));
4205 for (i
= 0 ; i
< MAX_TUPLE
; i
++) {
4209 tuple_size
= tuple_key_info
[i
].key_length
/ 8;
4210 check_tuple
= key_cfg
->tuple_active
& BIT(i
);
4212 tuple_valid
= hclge_fd_convert_tuple(check_tuple
, cur_key_x
,
4215 cur_key_x
+= tuple_size
;
4216 cur_key_y
+= tuple_size
;
4220 meta_data_region
= hdev
->fd_cfg
.max_key_length
/ 8 -
4221 MAX_META_DATA_LENGTH
/ 8;
4223 hclge_fd_convert_meta_data(key_cfg
,
4224 (__le32
*)(key_x
+ meta_data_region
),
4225 (__le32
*)(key_y
+ meta_data_region
),
4228 ret
= hclge_fd_tcam_config(hdev
, stage
, false, rule
->location
, key_y
,
4231 dev_err(&hdev
->pdev
->dev
,
4232 "fd key_y config fail, loc=%d, ret=%d\n",
4233 rule
->queue_id
, ret
);
4237 ret
= hclge_fd_tcam_config(hdev
, stage
, true, rule
->location
, key_x
,
4240 dev_err(&hdev
->pdev
->dev
,
4241 "fd key_x config fail, loc=%d, ret=%d\n",
4242 rule
->queue_id
, ret
);
4246 static int hclge_config_action(struct hclge_dev
*hdev
, u8 stage
,
4247 struct hclge_fd_rule
*rule
)
4249 struct hclge_fd_ad_data ad_data
;
4251 ad_data
.ad_id
= rule
->location
;
4253 if (rule
->action
== HCLGE_FD_ACTION_DROP_PACKET
) {
4254 ad_data
.drop_packet
= true;
4255 ad_data
.forward_to_direct_queue
= false;
4256 ad_data
.queue_id
= 0;
4258 ad_data
.drop_packet
= false;
4259 ad_data
.forward_to_direct_queue
= true;
4260 ad_data
.queue_id
= rule
->queue_id
;
4263 ad_data
.use_counter
= false;
4264 ad_data
.counter_id
= 0;
4266 ad_data
.use_next_stage
= false;
4267 ad_data
.next_input_key
= 0;
4269 ad_data
.write_rule_id_to_bd
= true;
4270 ad_data
.rule_id
= rule
->location
;
4272 return hclge_fd_ad_config(hdev
, stage
, ad_data
.ad_id
, &ad_data
);
4275 static int hclge_fd_check_spec(struct hclge_dev
*hdev
,
4276 struct ethtool_rx_flow_spec
*fs
, u32
*unused
)
4278 struct ethtool_tcpip4_spec
*tcp_ip4_spec
;
4279 struct ethtool_usrip4_spec
*usr_ip4_spec
;
4280 struct ethtool_tcpip6_spec
*tcp_ip6_spec
;
4281 struct ethtool_usrip6_spec
*usr_ip6_spec
;
4282 struct ethhdr
*ether_spec
;
4284 if (fs
->location
>= hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
])
4287 if (!(fs
->flow_type
& hdev
->fd_cfg
.proto_support
))
4290 if ((fs
->flow_type
& FLOW_EXT
) &&
4291 (fs
->h_ext
.data
[0] != 0 || fs
->h_ext
.data
[1] != 0)) {
4292 dev_err(&hdev
->pdev
->dev
, "user-def bytes are not supported\n");
4296 switch (fs
->flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
)) {
4300 tcp_ip4_spec
= &fs
->h_u
.tcp_ip4_spec
;
4301 *unused
|= BIT(INNER_SRC_MAC
) | BIT(INNER_DST_MAC
);
4303 if (!tcp_ip4_spec
->ip4src
)
4304 *unused
|= BIT(INNER_SRC_IP
);
4306 if (!tcp_ip4_spec
->ip4dst
)
4307 *unused
|= BIT(INNER_DST_IP
);
4309 if (!tcp_ip4_spec
->psrc
)
4310 *unused
|= BIT(INNER_SRC_PORT
);
4312 if (!tcp_ip4_spec
->pdst
)
4313 *unused
|= BIT(INNER_DST_PORT
);
4315 if (!tcp_ip4_spec
->tos
)
4316 *unused
|= BIT(INNER_IP_TOS
);
4320 usr_ip4_spec
= &fs
->h_u
.usr_ip4_spec
;
4321 *unused
|= BIT(INNER_SRC_MAC
) | BIT(INNER_DST_MAC
) |
4322 BIT(INNER_SRC_PORT
) | BIT(INNER_DST_PORT
);
4324 if (!usr_ip4_spec
->ip4src
)
4325 *unused
|= BIT(INNER_SRC_IP
);
4327 if (!usr_ip4_spec
->ip4dst
)
4328 *unused
|= BIT(INNER_DST_IP
);
4330 if (!usr_ip4_spec
->tos
)
4331 *unused
|= BIT(INNER_IP_TOS
);
4333 if (!usr_ip4_spec
->proto
)
4334 *unused
|= BIT(INNER_IP_PROTO
);
4336 if (usr_ip4_spec
->l4_4_bytes
)
4339 if (usr_ip4_spec
->ip_ver
!= ETH_RX_NFC_IP4
)
4346 tcp_ip6_spec
= &fs
->h_u
.tcp_ip6_spec
;
4347 *unused
|= BIT(INNER_SRC_MAC
) | BIT(INNER_DST_MAC
) |
4350 if (!tcp_ip6_spec
->ip6src
[0] && !tcp_ip6_spec
->ip6src
[1] &&
4351 !tcp_ip6_spec
->ip6src
[2] && !tcp_ip6_spec
->ip6src
[3])
4352 *unused
|= BIT(INNER_SRC_IP
);
4354 if (!tcp_ip6_spec
->ip6dst
[0] && !tcp_ip6_spec
->ip6dst
[1] &&
4355 !tcp_ip6_spec
->ip6dst
[2] && !tcp_ip6_spec
->ip6dst
[3])
4356 *unused
|= BIT(INNER_DST_IP
);
4358 if (!tcp_ip6_spec
->psrc
)
4359 *unused
|= BIT(INNER_SRC_PORT
);
4361 if (!tcp_ip6_spec
->pdst
)
4362 *unused
|= BIT(INNER_DST_PORT
);
4364 if (tcp_ip6_spec
->tclass
)
4368 case IPV6_USER_FLOW
:
4369 usr_ip6_spec
= &fs
->h_u
.usr_ip6_spec
;
4370 *unused
|= BIT(INNER_SRC_MAC
) | BIT(INNER_DST_MAC
) |
4371 BIT(INNER_IP_TOS
) | BIT(INNER_SRC_PORT
) |
4372 BIT(INNER_DST_PORT
);
4374 if (!usr_ip6_spec
->ip6src
[0] && !usr_ip6_spec
->ip6src
[1] &&
4375 !usr_ip6_spec
->ip6src
[2] && !usr_ip6_spec
->ip6src
[3])
4376 *unused
|= BIT(INNER_SRC_IP
);
4378 if (!usr_ip6_spec
->ip6dst
[0] && !usr_ip6_spec
->ip6dst
[1] &&
4379 !usr_ip6_spec
->ip6dst
[2] && !usr_ip6_spec
->ip6dst
[3])
4380 *unused
|= BIT(INNER_DST_IP
);
4382 if (!usr_ip6_spec
->l4_proto
)
4383 *unused
|= BIT(INNER_IP_PROTO
);
4385 if (usr_ip6_spec
->tclass
)
4388 if (usr_ip6_spec
->l4_4_bytes
)
4393 ether_spec
= &fs
->h_u
.ether_spec
;
4394 *unused
|= BIT(INNER_SRC_IP
) | BIT(INNER_DST_IP
) |
4395 BIT(INNER_SRC_PORT
) | BIT(INNER_DST_PORT
) |
4396 BIT(INNER_IP_TOS
) | BIT(INNER_IP_PROTO
);
4398 if (is_zero_ether_addr(ether_spec
->h_source
))
4399 *unused
|= BIT(INNER_SRC_MAC
);
4401 if (is_zero_ether_addr(ether_spec
->h_dest
))
4402 *unused
|= BIT(INNER_DST_MAC
);
4404 if (!ether_spec
->h_proto
)
4405 *unused
|= BIT(INNER_ETH_TYPE
);
4412 if ((fs
->flow_type
& FLOW_EXT
)) {
4413 if (fs
->h_ext
.vlan_etype
)
4415 if (!fs
->h_ext
.vlan_tci
)
4416 *unused
|= BIT(INNER_VLAN_TAG_FST
);
4418 if (fs
->m_ext
.vlan_tci
) {
4419 if (be16_to_cpu(fs
->h_ext
.vlan_tci
) >= VLAN_N_VID
)
4423 *unused
|= BIT(INNER_VLAN_TAG_FST
);
4426 if (fs
->flow_type
& FLOW_MAC_EXT
) {
4427 if (!(hdev
->fd_cfg
.proto_support
& ETHER_FLOW
))
4430 if (is_zero_ether_addr(fs
->h_ext
.h_dest
))
4431 *unused
|= BIT(INNER_DST_MAC
);
4433 *unused
&= ~(BIT(INNER_DST_MAC
));
4439 static bool hclge_fd_rule_exist(struct hclge_dev
*hdev
, u16 location
)
4441 struct hclge_fd_rule
*rule
= NULL
;
4442 struct hlist_node
*node2
;
4444 hlist_for_each_entry_safe(rule
, node2
, &hdev
->fd_rule_list
, rule_node
) {
4445 if (rule
->location
>= location
)
4449 return rule
&& rule
->location
== location
;
4452 static int hclge_fd_update_rule_list(struct hclge_dev
*hdev
,
4453 struct hclge_fd_rule
*new_rule
,
4457 struct hclge_fd_rule
*rule
= NULL
, *parent
= NULL
;
4458 struct hlist_node
*node2
;
4460 if (is_add
&& !new_rule
)
4463 hlist_for_each_entry_safe(rule
, node2
,
4464 &hdev
->fd_rule_list
, rule_node
) {
4465 if (rule
->location
>= location
)
4470 if (rule
&& rule
->location
== location
) {
4471 hlist_del(&rule
->rule_node
);
4473 hdev
->hclge_fd_rule_num
--;
4478 } else if (!is_add
) {
4479 dev_err(&hdev
->pdev
->dev
,
4480 "delete fail, rule %d is inexistent\n",
4485 INIT_HLIST_NODE(&new_rule
->rule_node
);
4488 hlist_add_behind(&new_rule
->rule_node
, &parent
->rule_node
);
4490 hlist_add_head(&new_rule
->rule_node
, &hdev
->fd_rule_list
);
4492 hdev
->hclge_fd_rule_num
++;
4497 static int hclge_fd_get_tuple(struct hclge_dev
*hdev
,
4498 struct ethtool_rx_flow_spec
*fs
,
4499 struct hclge_fd_rule
*rule
)
4501 u32 flow_type
= fs
->flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
);
4503 switch (flow_type
) {
4507 rule
->tuples
.src_ip
[3] =
4508 be32_to_cpu(fs
->h_u
.tcp_ip4_spec
.ip4src
);
4509 rule
->tuples_mask
.src_ip
[3] =
4510 be32_to_cpu(fs
->m_u
.tcp_ip4_spec
.ip4src
);
4512 rule
->tuples
.dst_ip
[3] =
4513 be32_to_cpu(fs
->h_u
.tcp_ip4_spec
.ip4dst
);
4514 rule
->tuples_mask
.dst_ip
[3] =
4515 be32_to_cpu(fs
->m_u
.tcp_ip4_spec
.ip4dst
);
4517 rule
->tuples
.src_port
= be16_to_cpu(fs
->h_u
.tcp_ip4_spec
.psrc
);
4518 rule
->tuples_mask
.src_port
=
4519 be16_to_cpu(fs
->m_u
.tcp_ip4_spec
.psrc
);
4521 rule
->tuples
.dst_port
= be16_to_cpu(fs
->h_u
.tcp_ip4_spec
.pdst
);
4522 rule
->tuples_mask
.dst_port
=
4523 be16_to_cpu(fs
->m_u
.tcp_ip4_spec
.pdst
);
4525 rule
->tuples
.ip_tos
= fs
->h_u
.tcp_ip4_spec
.tos
;
4526 rule
->tuples_mask
.ip_tos
= fs
->m_u
.tcp_ip4_spec
.tos
;
4528 rule
->tuples
.ether_proto
= ETH_P_IP
;
4529 rule
->tuples_mask
.ether_proto
= 0xFFFF;
4533 rule
->tuples
.src_ip
[3] =
4534 be32_to_cpu(fs
->h_u
.usr_ip4_spec
.ip4src
);
4535 rule
->tuples_mask
.src_ip
[3] =
4536 be32_to_cpu(fs
->m_u
.usr_ip4_spec
.ip4src
);
4538 rule
->tuples
.dst_ip
[3] =
4539 be32_to_cpu(fs
->h_u
.usr_ip4_spec
.ip4dst
);
4540 rule
->tuples_mask
.dst_ip
[3] =
4541 be32_to_cpu(fs
->m_u
.usr_ip4_spec
.ip4dst
);
4543 rule
->tuples
.ip_tos
= fs
->h_u
.usr_ip4_spec
.tos
;
4544 rule
->tuples_mask
.ip_tos
= fs
->m_u
.usr_ip4_spec
.tos
;
4546 rule
->tuples
.ip_proto
= fs
->h_u
.usr_ip4_spec
.proto
;
4547 rule
->tuples_mask
.ip_proto
= fs
->m_u
.usr_ip4_spec
.proto
;
4549 rule
->tuples
.ether_proto
= ETH_P_IP
;
4550 rule
->tuples_mask
.ether_proto
= 0xFFFF;
4556 be32_to_cpu_array(rule
->tuples
.src_ip
,
4557 fs
->h_u
.tcp_ip6_spec
.ip6src
, 4);
4558 be32_to_cpu_array(rule
->tuples_mask
.src_ip
,
4559 fs
->m_u
.tcp_ip6_spec
.ip6src
, 4);
4561 be32_to_cpu_array(rule
->tuples
.dst_ip
,
4562 fs
->h_u
.tcp_ip6_spec
.ip6dst
, 4);
4563 be32_to_cpu_array(rule
->tuples_mask
.dst_ip
,
4564 fs
->m_u
.tcp_ip6_spec
.ip6dst
, 4);
4566 rule
->tuples
.src_port
= be16_to_cpu(fs
->h_u
.tcp_ip6_spec
.psrc
);
4567 rule
->tuples_mask
.src_port
=
4568 be16_to_cpu(fs
->m_u
.tcp_ip6_spec
.psrc
);
4570 rule
->tuples
.dst_port
= be16_to_cpu(fs
->h_u
.tcp_ip6_spec
.pdst
);
4571 rule
->tuples_mask
.dst_port
=
4572 be16_to_cpu(fs
->m_u
.tcp_ip6_spec
.pdst
);
4574 rule
->tuples
.ether_proto
= ETH_P_IPV6
;
4575 rule
->tuples_mask
.ether_proto
= 0xFFFF;
4578 case IPV6_USER_FLOW
:
4579 be32_to_cpu_array(rule
->tuples
.src_ip
,
4580 fs
->h_u
.usr_ip6_spec
.ip6src
, 4);
4581 be32_to_cpu_array(rule
->tuples_mask
.src_ip
,
4582 fs
->m_u
.usr_ip6_spec
.ip6src
, 4);
4584 be32_to_cpu_array(rule
->tuples
.dst_ip
,
4585 fs
->h_u
.usr_ip6_spec
.ip6dst
, 4);
4586 be32_to_cpu_array(rule
->tuples_mask
.dst_ip
,
4587 fs
->m_u
.usr_ip6_spec
.ip6dst
, 4);
4589 rule
->tuples
.ip_proto
= fs
->h_u
.usr_ip6_spec
.l4_proto
;
4590 rule
->tuples_mask
.ip_proto
= fs
->m_u
.usr_ip6_spec
.l4_proto
;
4592 rule
->tuples
.ether_proto
= ETH_P_IPV6
;
4593 rule
->tuples_mask
.ether_proto
= 0xFFFF;
4597 ether_addr_copy(rule
->tuples
.src_mac
,
4598 fs
->h_u
.ether_spec
.h_source
);
4599 ether_addr_copy(rule
->tuples_mask
.src_mac
,
4600 fs
->m_u
.ether_spec
.h_source
);
4602 ether_addr_copy(rule
->tuples
.dst_mac
,
4603 fs
->h_u
.ether_spec
.h_dest
);
4604 ether_addr_copy(rule
->tuples_mask
.dst_mac
,
4605 fs
->m_u
.ether_spec
.h_dest
);
4607 rule
->tuples
.ether_proto
=
4608 be16_to_cpu(fs
->h_u
.ether_spec
.h_proto
);
4609 rule
->tuples_mask
.ether_proto
=
4610 be16_to_cpu(fs
->m_u
.ether_spec
.h_proto
);
4617 switch (flow_type
) {
4620 rule
->tuples
.ip_proto
= IPPROTO_SCTP
;
4621 rule
->tuples_mask
.ip_proto
= 0xFF;
4625 rule
->tuples
.ip_proto
= IPPROTO_TCP
;
4626 rule
->tuples_mask
.ip_proto
= 0xFF;
4630 rule
->tuples
.ip_proto
= IPPROTO_UDP
;
4631 rule
->tuples_mask
.ip_proto
= 0xFF;
4637 if ((fs
->flow_type
& FLOW_EXT
)) {
4638 rule
->tuples
.vlan_tag1
= be16_to_cpu(fs
->h_ext
.vlan_tci
);
4639 rule
->tuples_mask
.vlan_tag1
= be16_to_cpu(fs
->m_ext
.vlan_tci
);
4642 if (fs
->flow_type
& FLOW_MAC_EXT
) {
4643 ether_addr_copy(rule
->tuples
.dst_mac
, fs
->h_ext
.h_dest
);
4644 ether_addr_copy(rule
->tuples_mask
.dst_mac
, fs
->m_ext
.h_dest
);
4650 static int hclge_add_fd_entry(struct hnae3_handle
*handle
,
4651 struct ethtool_rxnfc
*cmd
)
4653 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4654 struct hclge_dev
*hdev
= vport
->back
;
4655 u16 dst_vport_id
= 0, q_index
= 0;
4656 struct ethtool_rx_flow_spec
*fs
;
4657 struct hclge_fd_rule
*rule
;
4662 if (!hnae3_dev_fd_supported(hdev
))
4665 if (!hdev
->fd_cfg
.fd_en
) {
4666 dev_warn(&hdev
->pdev
->dev
,
4667 "Please enable flow director first\n");
4671 fs
= (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
4673 ret
= hclge_fd_check_spec(hdev
, fs
, &unused
);
4675 dev_err(&hdev
->pdev
->dev
, "Check fd spec failed\n");
4679 if (fs
->ring_cookie
== RX_CLS_FLOW_DISC
) {
4680 action
= HCLGE_FD_ACTION_DROP_PACKET
;
4682 u32 ring
= ethtool_get_flow_spec_ring(fs
->ring_cookie
);
4683 u8 vf
= ethtool_get_flow_spec_ring_vf(fs
->ring_cookie
);
4686 dst_vport_id
= vf
? hdev
->vport
[vf
].vport_id
: vport
->vport_id
;
4687 tqps
= vf
? hdev
->vport
[vf
].alloc_tqps
: vport
->alloc_tqps
;
4690 dev_err(&hdev
->pdev
->dev
,
4691 "Error: queue id (%d) > max tqp num (%d)\n",
4696 if (vf
> hdev
->num_req_vfs
) {
4697 dev_err(&hdev
->pdev
->dev
,
4698 "Error: vf id (%d) > max vf num (%d)\n",
4699 vf
, hdev
->num_req_vfs
);
4703 action
= HCLGE_FD_ACTION_ACCEPT_PACKET
;
4707 rule
= kzalloc(sizeof(*rule
), GFP_KERNEL
);
4711 ret
= hclge_fd_get_tuple(hdev
, fs
, rule
);
4715 rule
->flow_type
= fs
->flow_type
;
4717 rule
->location
= fs
->location
;
4718 rule
->unused_tuple
= unused
;
4719 rule
->vf_id
= dst_vport_id
;
4720 rule
->queue_id
= q_index
;
4721 rule
->action
= action
;
4723 ret
= hclge_config_action(hdev
, HCLGE_FD_STAGE_1
, rule
);
4727 ret
= hclge_config_key(hdev
, HCLGE_FD_STAGE_1
, rule
);
4731 ret
= hclge_fd_update_rule_list(hdev
, rule
, fs
->location
, true);
4742 static int hclge_del_fd_entry(struct hnae3_handle
*handle
,
4743 struct ethtool_rxnfc
*cmd
)
4745 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4746 struct hclge_dev
*hdev
= vport
->back
;
4747 struct ethtool_rx_flow_spec
*fs
;
4750 if (!hnae3_dev_fd_supported(hdev
))
4753 fs
= (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
4755 if (fs
->location
>= hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
])
4758 if (!hclge_fd_rule_exist(hdev
, fs
->location
)) {
4759 dev_err(&hdev
->pdev
->dev
,
4760 "Delete fail, rule %d is inexistent\n",
4765 ret
= hclge_fd_tcam_config(hdev
, HCLGE_FD_STAGE_1
, true,
4766 fs
->location
, NULL
, false);
4770 return hclge_fd_update_rule_list(hdev
, NULL
, fs
->location
,
4774 static void hclge_del_all_fd_entries(struct hnae3_handle
*handle
,
4777 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4778 struct hclge_dev
*hdev
= vport
->back
;
4779 struct hclge_fd_rule
*rule
;
4780 struct hlist_node
*node
;
4782 if (!hnae3_dev_fd_supported(hdev
))
4786 hlist_for_each_entry_safe(rule
, node
, &hdev
->fd_rule_list
,
4788 hclge_fd_tcam_config(hdev
, HCLGE_FD_STAGE_1
, true,
4789 rule
->location
, NULL
, false);
4790 hlist_del(&rule
->rule_node
);
4792 hdev
->hclge_fd_rule_num
--;
4795 hlist_for_each_entry_safe(rule
, node
, &hdev
->fd_rule_list
,
4797 hclge_fd_tcam_config(hdev
, HCLGE_FD_STAGE_1
, true,
4798 rule
->location
, NULL
, false);
4802 static int hclge_restore_fd_entries(struct hnae3_handle
*handle
)
4804 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4805 struct hclge_dev
*hdev
= vport
->back
;
4806 struct hclge_fd_rule
*rule
;
4807 struct hlist_node
*node
;
4810 /* Return ok here, because reset error handling will check this
4811 * return value. If error is returned here, the reset process will
4814 if (!hnae3_dev_fd_supported(hdev
))
4817 hlist_for_each_entry_safe(rule
, node
, &hdev
->fd_rule_list
, rule_node
) {
4818 ret
= hclge_config_action(hdev
, HCLGE_FD_STAGE_1
, rule
);
4820 ret
= hclge_config_key(hdev
, HCLGE_FD_STAGE_1
, rule
);
4823 dev_warn(&hdev
->pdev
->dev
,
4824 "Restore rule %d failed, remove it\n",
4826 hlist_del(&rule
->rule_node
);
4828 hdev
->hclge_fd_rule_num
--;
4834 static int hclge_get_fd_rule_cnt(struct hnae3_handle
*handle
,
4835 struct ethtool_rxnfc
*cmd
)
4837 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4838 struct hclge_dev
*hdev
= vport
->back
;
4840 if (!hnae3_dev_fd_supported(hdev
))
4843 cmd
->rule_cnt
= hdev
->hclge_fd_rule_num
;
4844 cmd
->data
= hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
];
4849 static int hclge_get_fd_rule_info(struct hnae3_handle
*handle
,
4850 struct ethtool_rxnfc
*cmd
)
4852 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4853 struct hclge_fd_rule
*rule
= NULL
;
4854 struct hclge_dev
*hdev
= vport
->back
;
4855 struct ethtool_rx_flow_spec
*fs
;
4856 struct hlist_node
*node2
;
4858 if (!hnae3_dev_fd_supported(hdev
))
4861 fs
= (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
4863 hlist_for_each_entry_safe(rule
, node2
, &hdev
->fd_rule_list
, rule_node
) {
4864 if (rule
->location
>= fs
->location
)
4868 if (!rule
|| fs
->location
!= rule
->location
)
4871 fs
->flow_type
= rule
->flow_type
;
4872 switch (fs
->flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
)) {
4876 fs
->h_u
.tcp_ip4_spec
.ip4src
=
4877 cpu_to_be32(rule
->tuples
.src_ip
[3]);
4878 fs
->m_u
.tcp_ip4_spec
.ip4src
=
4879 rule
->unused_tuple
& BIT(INNER_SRC_IP
) ?
4880 0 : cpu_to_be32(rule
->tuples_mask
.src_ip
[3]);
4882 fs
->h_u
.tcp_ip4_spec
.ip4dst
=
4883 cpu_to_be32(rule
->tuples
.dst_ip
[3]);
4884 fs
->m_u
.tcp_ip4_spec
.ip4dst
=
4885 rule
->unused_tuple
& BIT(INNER_DST_IP
) ?
4886 0 : cpu_to_be32(rule
->tuples_mask
.dst_ip
[3]);
4888 fs
->h_u
.tcp_ip4_spec
.psrc
= cpu_to_be16(rule
->tuples
.src_port
);
4889 fs
->m_u
.tcp_ip4_spec
.psrc
=
4890 rule
->unused_tuple
& BIT(INNER_SRC_PORT
) ?
4891 0 : cpu_to_be16(rule
->tuples_mask
.src_port
);
4893 fs
->h_u
.tcp_ip4_spec
.pdst
= cpu_to_be16(rule
->tuples
.dst_port
);
4894 fs
->m_u
.tcp_ip4_spec
.pdst
=
4895 rule
->unused_tuple
& BIT(INNER_DST_PORT
) ?
4896 0 : cpu_to_be16(rule
->tuples_mask
.dst_port
);
4898 fs
->h_u
.tcp_ip4_spec
.tos
= rule
->tuples
.ip_tos
;
4899 fs
->m_u
.tcp_ip4_spec
.tos
=
4900 rule
->unused_tuple
& BIT(INNER_IP_TOS
) ?
4901 0 : rule
->tuples_mask
.ip_tos
;
4905 fs
->h_u
.usr_ip4_spec
.ip4src
=
4906 cpu_to_be32(rule
->tuples
.src_ip
[3]);
4907 fs
->m_u
.tcp_ip4_spec
.ip4src
=
4908 rule
->unused_tuple
& BIT(INNER_SRC_IP
) ?
4909 0 : cpu_to_be32(rule
->tuples_mask
.src_ip
[3]);
4911 fs
->h_u
.usr_ip4_spec
.ip4dst
=
4912 cpu_to_be32(rule
->tuples
.dst_ip
[3]);
4913 fs
->m_u
.usr_ip4_spec
.ip4dst
=
4914 rule
->unused_tuple
& BIT(INNER_DST_IP
) ?
4915 0 : cpu_to_be32(rule
->tuples_mask
.dst_ip
[3]);
4917 fs
->h_u
.usr_ip4_spec
.tos
= rule
->tuples
.ip_tos
;
4918 fs
->m_u
.usr_ip4_spec
.tos
=
4919 rule
->unused_tuple
& BIT(INNER_IP_TOS
) ?
4920 0 : rule
->tuples_mask
.ip_tos
;
4922 fs
->h_u
.usr_ip4_spec
.proto
= rule
->tuples
.ip_proto
;
4923 fs
->m_u
.usr_ip4_spec
.proto
=
4924 rule
->unused_tuple
& BIT(INNER_IP_PROTO
) ?
4925 0 : rule
->tuples_mask
.ip_proto
;
4927 fs
->h_u
.usr_ip4_spec
.ip_ver
= ETH_RX_NFC_IP4
;
4933 cpu_to_be32_array(fs
->h_u
.tcp_ip6_spec
.ip6src
,
4934 rule
->tuples
.src_ip
, 4);
4935 if (rule
->unused_tuple
& BIT(INNER_SRC_IP
))
4936 memset(fs
->m_u
.tcp_ip6_spec
.ip6src
, 0, sizeof(int) * 4);
4938 cpu_to_be32_array(fs
->m_u
.tcp_ip6_spec
.ip6src
,
4939 rule
->tuples_mask
.src_ip
, 4);
4941 cpu_to_be32_array(fs
->h_u
.tcp_ip6_spec
.ip6dst
,
4942 rule
->tuples
.dst_ip
, 4);
4943 if (rule
->unused_tuple
& BIT(INNER_DST_IP
))
4944 memset(fs
->m_u
.tcp_ip6_spec
.ip6dst
, 0, sizeof(int) * 4);
4946 cpu_to_be32_array(fs
->m_u
.tcp_ip6_spec
.ip6dst
,
4947 rule
->tuples_mask
.dst_ip
, 4);
4949 fs
->h_u
.tcp_ip6_spec
.psrc
= cpu_to_be16(rule
->tuples
.src_port
);
4950 fs
->m_u
.tcp_ip6_spec
.psrc
=
4951 rule
->unused_tuple
& BIT(INNER_SRC_PORT
) ?
4952 0 : cpu_to_be16(rule
->tuples_mask
.src_port
);
4954 fs
->h_u
.tcp_ip6_spec
.pdst
= cpu_to_be16(rule
->tuples
.dst_port
);
4955 fs
->m_u
.tcp_ip6_spec
.pdst
=
4956 rule
->unused_tuple
& BIT(INNER_DST_PORT
) ?
4957 0 : cpu_to_be16(rule
->tuples_mask
.dst_port
);
4960 case IPV6_USER_FLOW
:
4961 cpu_to_be32_array(fs
->h_u
.usr_ip6_spec
.ip6src
,
4962 rule
->tuples
.src_ip
, 4);
4963 if (rule
->unused_tuple
& BIT(INNER_SRC_IP
))
4964 memset(fs
->m_u
.usr_ip6_spec
.ip6src
, 0, sizeof(int) * 4);
4966 cpu_to_be32_array(fs
->m_u
.usr_ip6_spec
.ip6src
,
4967 rule
->tuples_mask
.src_ip
, 4);
4969 cpu_to_be32_array(fs
->h_u
.usr_ip6_spec
.ip6dst
,
4970 rule
->tuples
.dst_ip
, 4);
4971 if (rule
->unused_tuple
& BIT(INNER_DST_IP
))
4972 memset(fs
->m_u
.usr_ip6_spec
.ip6dst
, 0, sizeof(int) * 4);
4974 cpu_to_be32_array(fs
->m_u
.usr_ip6_spec
.ip6dst
,
4975 rule
->tuples_mask
.dst_ip
, 4);
4977 fs
->h_u
.usr_ip6_spec
.l4_proto
= rule
->tuples
.ip_proto
;
4978 fs
->m_u
.usr_ip6_spec
.l4_proto
=
4979 rule
->unused_tuple
& BIT(INNER_IP_PROTO
) ?
4980 0 : rule
->tuples_mask
.ip_proto
;
4984 ether_addr_copy(fs
->h_u
.ether_spec
.h_source
,
4985 rule
->tuples
.src_mac
);
4986 if (rule
->unused_tuple
& BIT(INNER_SRC_MAC
))
4987 eth_zero_addr(fs
->m_u
.ether_spec
.h_source
);
4989 ether_addr_copy(fs
->m_u
.ether_spec
.h_source
,
4990 rule
->tuples_mask
.src_mac
);
4992 ether_addr_copy(fs
->h_u
.ether_spec
.h_dest
,
4993 rule
->tuples
.dst_mac
);
4994 if (rule
->unused_tuple
& BIT(INNER_DST_MAC
))
4995 eth_zero_addr(fs
->m_u
.ether_spec
.h_dest
);
4997 ether_addr_copy(fs
->m_u
.ether_spec
.h_dest
,
4998 rule
->tuples_mask
.dst_mac
);
5000 fs
->h_u
.ether_spec
.h_proto
=
5001 cpu_to_be16(rule
->tuples
.ether_proto
);
5002 fs
->m_u
.ether_spec
.h_proto
=
5003 rule
->unused_tuple
& BIT(INNER_ETH_TYPE
) ?
5004 0 : cpu_to_be16(rule
->tuples_mask
.ether_proto
);
5011 if (fs
->flow_type
& FLOW_EXT
) {
5012 fs
->h_ext
.vlan_tci
= cpu_to_be16(rule
->tuples
.vlan_tag1
);
5013 fs
->m_ext
.vlan_tci
=
5014 rule
->unused_tuple
& BIT(INNER_VLAN_TAG_FST
) ?
5015 cpu_to_be16(VLAN_VID_MASK
) :
5016 cpu_to_be16(rule
->tuples_mask
.vlan_tag1
);
5019 if (fs
->flow_type
& FLOW_MAC_EXT
) {
5020 ether_addr_copy(fs
->h_ext
.h_dest
, rule
->tuples
.dst_mac
);
5021 if (rule
->unused_tuple
& BIT(INNER_DST_MAC
))
5022 eth_zero_addr(fs
->m_u
.ether_spec
.h_dest
);
5024 ether_addr_copy(fs
->m_u
.ether_spec
.h_dest
,
5025 rule
->tuples_mask
.dst_mac
);
5028 if (rule
->action
== HCLGE_FD_ACTION_DROP_PACKET
) {
5029 fs
->ring_cookie
= RX_CLS_FLOW_DISC
;
5033 fs
->ring_cookie
= rule
->queue_id
;
5034 vf_id
= rule
->vf_id
;
5035 vf_id
<<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF
;
5036 fs
->ring_cookie
|= vf_id
;
5042 static int hclge_get_all_rules(struct hnae3_handle
*handle
,
5043 struct ethtool_rxnfc
*cmd
, u32
*rule_locs
)
5045 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5046 struct hclge_dev
*hdev
= vport
->back
;
5047 struct hclge_fd_rule
*rule
;
5048 struct hlist_node
*node2
;
5051 if (!hnae3_dev_fd_supported(hdev
))
5054 cmd
->data
= hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
];
5056 hlist_for_each_entry_safe(rule
, node2
,
5057 &hdev
->fd_rule_list
, rule_node
) {
5058 if (cnt
== cmd
->rule_cnt
)
5061 rule_locs
[cnt
] = rule
->location
;
5065 cmd
->rule_cnt
= cnt
;
5070 static bool hclge_get_hw_reset_stat(struct hnae3_handle
*handle
)
5072 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5073 struct hclge_dev
*hdev
= vport
->back
;
5075 return hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
) ||
5076 hclge_read_dev(&hdev
->hw
, HCLGE_FUN_RST_ING
);
5079 static bool hclge_ae_dev_resetting(struct hnae3_handle
*handle
)
5081 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5082 struct hclge_dev
*hdev
= vport
->back
;
5084 return test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
5087 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle
*handle
)
5089 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5090 struct hclge_dev
*hdev
= vport
->back
;
5092 return hdev
->reset_count
;
5095 static void hclge_enable_fd(struct hnae3_handle
*handle
, bool enable
)
5097 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5098 struct hclge_dev
*hdev
= vport
->back
;
5100 hdev
->fd_cfg
.fd_en
= enable
;
5102 hclge_del_all_fd_entries(handle
, false);
5104 hclge_restore_fd_entries(handle
);
5107 static void hclge_cfg_mac_mode(struct hclge_dev
*hdev
, bool enable
)
5109 struct hclge_desc desc
;
5110 struct hclge_config_mac_mode_cmd
*req
=
5111 (struct hclge_config_mac_mode_cmd
*)desc
.data
;
5115 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAC_MODE
, false);
5116 hnae3_set_bit(loop_en
, HCLGE_MAC_TX_EN_B
, enable
);
5117 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_EN_B
, enable
);
5118 hnae3_set_bit(loop_en
, HCLGE_MAC_PAD_TX_B
, enable
);
5119 hnae3_set_bit(loop_en
, HCLGE_MAC_PAD_RX_B
, enable
);
5120 hnae3_set_bit(loop_en
, HCLGE_MAC_1588_TX_B
, 0);
5121 hnae3_set_bit(loop_en
, HCLGE_MAC_1588_RX_B
, 0);
5122 hnae3_set_bit(loop_en
, HCLGE_MAC_APP_LP_B
, 0);
5123 hnae3_set_bit(loop_en
, HCLGE_MAC_LINE_LP_B
, 0);
5124 hnae3_set_bit(loop_en
, HCLGE_MAC_FCS_TX_B
, enable
);
5125 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_FCS_B
, enable
);
5126 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_FCS_STRIP_B
, enable
);
5127 hnae3_set_bit(loop_en
, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B
, enable
);
5128 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B
, enable
);
5129 hnae3_set_bit(loop_en
, HCLGE_MAC_TX_UNDER_MIN_ERR_B
, enable
);
5130 req
->txrx_pad_fcs_loop_en
= cpu_to_le32(loop_en
);
5132 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5134 dev_err(&hdev
->pdev
->dev
,
5135 "mac enable fail, ret =%d.\n", ret
);
5138 static int hclge_set_app_loopback(struct hclge_dev
*hdev
, bool en
)
5140 struct hclge_config_mac_mode_cmd
*req
;
5141 struct hclge_desc desc
;
5145 req
= (struct hclge_config_mac_mode_cmd
*)&desc
.data
[0];
5146 /* 1 Read out the MAC mode config at first */
5147 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAC_MODE
, true);
5148 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5150 dev_err(&hdev
->pdev
->dev
,
5151 "mac loopback get fail, ret =%d.\n", ret
);
5155 /* 2 Then setup the loopback flag */
5156 loop_en
= le32_to_cpu(req
->txrx_pad_fcs_loop_en
);
5157 hnae3_set_bit(loop_en
, HCLGE_MAC_APP_LP_B
, en
? 1 : 0);
5158 hnae3_set_bit(loop_en
, HCLGE_MAC_TX_EN_B
, en
? 1 : 0);
5159 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_EN_B
, en
? 1 : 0);
5161 req
->txrx_pad_fcs_loop_en
= cpu_to_le32(loop_en
);
5163 /* 3 Config mac work mode with loopback flag
5164 * and its original configure parameters
5166 hclge_cmd_reuse_desc(&desc
, false);
5167 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5169 dev_err(&hdev
->pdev
->dev
,
5170 "mac loopback set fail, ret =%d.\n", ret
);
5174 static int hclge_set_serdes_loopback(struct hclge_dev
*hdev
, bool en
,
5175 enum hnae3_loop loop_mode
)
5177 #define HCLGE_SERDES_RETRY_MS 10
5178 #define HCLGE_SERDES_RETRY_NUM 100
5179 struct hclge_serdes_lb_cmd
*req
;
5180 struct hclge_desc desc
;
5184 req
= (struct hclge_serdes_lb_cmd
*)desc
.data
;
5185 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_SERDES_LOOPBACK
, false);
5187 switch (loop_mode
) {
5188 case HNAE3_LOOP_SERIAL_SERDES
:
5189 loop_mode_b
= HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B
;
5191 case HNAE3_LOOP_PARALLEL_SERDES
:
5192 loop_mode_b
= HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B
;
5195 dev_err(&hdev
->pdev
->dev
,
5196 "unsupported serdes loopback mode %d\n", loop_mode
);
5201 req
->enable
= loop_mode_b
;
5202 req
->mask
= loop_mode_b
;
5204 req
->mask
= loop_mode_b
;
5207 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5209 dev_err(&hdev
->pdev
->dev
,
5210 "serdes loopback set fail, ret = %d\n", ret
);
5215 msleep(HCLGE_SERDES_RETRY_MS
);
5216 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_SERDES_LOOPBACK
,
5218 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5220 dev_err(&hdev
->pdev
->dev
,
5221 "serdes loopback get, ret = %d\n", ret
);
5224 } while (++i
< HCLGE_SERDES_RETRY_NUM
&&
5225 !(req
->result
& HCLGE_CMD_SERDES_DONE_B
));
5227 if (!(req
->result
& HCLGE_CMD_SERDES_DONE_B
)) {
5228 dev_err(&hdev
->pdev
->dev
, "serdes loopback set timeout\n");
5230 } else if (!(req
->result
& HCLGE_CMD_SERDES_SUCCESS_B
)) {
5231 dev_err(&hdev
->pdev
->dev
, "serdes loopback set failed in fw\n");
5235 hclge_cfg_mac_mode(hdev
, en
);
5239 static int hclge_tqp_enable(struct hclge_dev
*hdev
, int tqp_id
,
5240 int stream_id
, bool enable
)
5242 struct hclge_desc desc
;
5243 struct hclge_cfg_com_tqp_queue_cmd
*req
=
5244 (struct hclge_cfg_com_tqp_queue_cmd
*)desc
.data
;
5247 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_COM_TQP_QUEUE
, false);
5248 req
->tqp_id
= cpu_to_le16(tqp_id
& HCLGE_RING_ID_MASK
);
5249 req
->stream_id
= cpu_to_le16(stream_id
);
5250 req
->enable
|= enable
<< HCLGE_TQP_ENABLE_B
;
5252 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5254 dev_err(&hdev
->pdev
->dev
,
5255 "Tqp enable fail, status =%d.\n", ret
);
5259 static int hclge_set_loopback(struct hnae3_handle
*handle
,
5260 enum hnae3_loop loop_mode
, bool en
)
5262 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5263 struct hclge_dev
*hdev
= vport
->back
;
5266 switch (loop_mode
) {
5267 case HNAE3_LOOP_APP
:
5268 ret
= hclge_set_app_loopback(hdev
, en
);
5270 case HNAE3_LOOP_SERIAL_SERDES
:
5271 case HNAE3_LOOP_PARALLEL_SERDES
:
5272 ret
= hclge_set_serdes_loopback(hdev
, en
, loop_mode
);
5276 dev_err(&hdev
->pdev
->dev
,
5277 "loop_mode %d is not supported\n", loop_mode
);
5281 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
5282 ret
= hclge_tqp_enable(hdev
, i
, 0, en
);
5290 static void hclge_reset_tqp_stats(struct hnae3_handle
*handle
)
5292 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5293 struct hnae3_queue
*queue
;
5294 struct hclge_tqp
*tqp
;
5297 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
5298 queue
= handle
->kinfo
.tqp
[i
];
5299 tqp
= container_of(queue
, struct hclge_tqp
, q
);
5300 memset(&tqp
->tqp_stats
, 0, sizeof(tqp
->tqp_stats
));
5304 static void hclge_set_timer_task(struct hnae3_handle
*handle
, bool enable
)
5306 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5307 struct hclge_dev
*hdev
= vport
->back
;
5310 mod_timer(&hdev
->service_timer
, jiffies
+ HZ
);
5312 del_timer_sync(&hdev
->service_timer
);
5313 cancel_work_sync(&hdev
->service_task
);
5314 clear_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
);
5318 static int hclge_ae_start(struct hnae3_handle
*handle
)
5320 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5321 struct hclge_dev
*hdev
= vport
->back
;
5324 hclge_cfg_mac_mode(hdev
, true);
5325 clear_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
5326 hdev
->hw
.mac
.link
= 0;
5328 /* reset tqp stats */
5329 hclge_reset_tqp_stats(handle
);
5331 hclge_mac_start_phy(hdev
);
5336 static void hclge_ae_stop(struct hnae3_handle
*handle
)
5338 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5339 struct hclge_dev
*hdev
= vport
->back
;
5341 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
5343 /* If it is not PF reset, the firmware will disable the MAC,
5344 * so it only need to stop phy here.
5346 if (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
) &&
5347 hdev
->reset_type
!= HNAE3_FUNC_RESET
) {
5348 hclge_mac_stop_phy(hdev
);
5353 hclge_cfg_mac_mode(hdev
, false);
5355 hclge_mac_stop_phy(hdev
);
5357 /* reset tqp stats */
5358 hclge_reset_tqp_stats(handle
);
5359 hclge_update_link_status(hdev
);
5362 int hclge_vport_start(struct hclge_vport
*vport
)
5364 set_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
);
5365 vport
->last_active_jiffies
= jiffies
;
5369 void hclge_vport_stop(struct hclge_vport
*vport
)
5371 clear_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
);
5374 static int hclge_client_start(struct hnae3_handle
*handle
)
5376 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5378 return hclge_vport_start(vport
);
5381 static void hclge_client_stop(struct hnae3_handle
*handle
)
5383 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5385 hclge_vport_stop(vport
);
5388 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport
*vport
,
5389 u16 cmdq_resp
, u8 resp_code
,
5390 enum hclge_mac_vlan_tbl_opcode op
)
5392 struct hclge_dev
*hdev
= vport
->back
;
5393 int return_status
= -EIO
;
5396 dev_err(&hdev
->pdev
->dev
,
5397 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5402 if (op
== HCLGE_MAC_VLAN_ADD
) {
5403 if ((!resp_code
) || (resp_code
== 1)) {
5405 } else if (resp_code
== 2) {
5406 return_status
= -ENOSPC
;
5407 dev_err(&hdev
->pdev
->dev
,
5408 "add mac addr failed for uc_overflow.\n");
5409 } else if (resp_code
== 3) {
5410 return_status
= -ENOSPC
;
5411 dev_err(&hdev
->pdev
->dev
,
5412 "add mac addr failed for mc_overflow.\n");
5414 dev_err(&hdev
->pdev
->dev
,
5415 "add mac addr failed for undefined, code=%d.\n",
5418 } else if (op
== HCLGE_MAC_VLAN_REMOVE
) {
5421 } else if (resp_code
== 1) {
5422 return_status
= -ENOENT
;
5423 dev_dbg(&hdev
->pdev
->dev
,
5424 "remove mac addr failed for miss.\n");
5426 dev_err(&hdev
->pdev
->dev
,
5427 "remove mac addr failed for undefined, code=%d.\n",
5430 } else if (op
== HCLGE_MAC_VLAN_LKUP
) {
5433 } else if (resp_code
== 1) {
5434 return_status
= -ENOENT
;
5435 dev_dbg(&hdev
->pdev
->dev
,
5436 "lookup mac addr failed for miss.\n");
5438 dev_err(&hdev
->pdev
->dev
,
5439 "lookup mac addr failed for undefined, code=%d.\n",
5443 return_status
= -EINVAL
;
5444 dev_err(&hdev
->pdev
->dev
,
5445 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5449 return return_status
;
5452 static int hclge_update_desc_vfid(struct hclge_desc
*desc
, int vfid
, bool clr
)
5457 if (vfid
> 255 || vfid
< 0)
5460 if (vfid
>= 0 && vfid
<= 191) {
5461 word_num
= vfid
/ 32;
5462 bit_num
= vfid
% 32;
5464 desc
[1].data
[word_num
] &= cpu_to_le32(~(1 << bit_num
));
5466 desc
[1].data
[word_num
] |= cpu_to_le32(1 << bit_num
);
5468 word_num
= (vfid
- 192) / 32;
5469 bit_num
= vfid
% 32;
5471 desc
[2].data
[word_num
] &= cpu_to_le32(~(1 << bit_num
));
5473 desc
[2].data
[word_num
] |= cpu_to_le32(1 << bit_num
);
5479 static bool hclge_is_all_function_id_zero(struct hclge_desc
*desc
)
5481 #define HCLGE_DESC_NUMBER 3
5482 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5485 for (i
= 0; i
< HCLGE_DESC_NUMBER
; i
++)
5486 for (j
= 0; j
< HCLGE_FUNC_NUMBER_PER_DESC
; j
++)
5487 if (desc
[i
].data
[j
])
5493 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd
*new_req
,
5496 const unsigned char *mac_addr
= addr
;
5497 u32 high_val
= mac_addr
[2] << 16 | (mac_addr
[3] << 24) |
5498 (mac_addr
[0]) | (mac_addr
[1] << 8);
5499 u32 low_val
= mac_addr
[4] | (mac_addr
[5] << 8);
5501 new_req
->mac_addr_hi32
= cpu_to_le32(high_val
);
5502 new_req
->mac_addr_lo16
= cpu_to_le16(low_val
& 0xffff);
5505 static int hclge_remove_mac_vlan_tbl(struct hclge_vport
*vport
,
5506 struct hclge_mac_vlan_tbl_entry_cmd
*req
)
5508 struct hclge_dev
*hdev
= vport
->back
;
5509 struct hclge_desc desc
;
5514 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_REMOVE
, false);
5516 memcpy(desc
.data
, req
, sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
5518 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5520 dev_err(&hdev
->pdev
->dev
,
5521 "del mac addr failed for cmd_send, ret =%d.\n",
5525 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
5526 retval
= le16_to_cpu(desc
.retval
);
5528 return hclge_get_mac_vlan_cmd_status(vport
, retval
, resp_code
,
5529 HCLGE_MAC_VLAN_REMOVE
);
5532 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport
*vport
,
5533 struct hclge_mac_vlan_tbl_entry_cmd
*req
,
5534 struct hclge_desc
*desc
,
5537 struct hclge_dev
*hdev
= vport
->back
;
5542 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_MAC_VLAN_ADD
, true);
5544 desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
5545 memcpy(desc
[0].data
,
5547 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
5548 hclge_cmd_setup_basic_desc(&desc
[1],
5549 HCLGE_OPC_MAC_VLAN_ADD
,
5551 desc
[1].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
5552 hclge_cmd_setup_basic_desc(&desc
[2],
5553 HCLGE_OPC_MAC_VLAN_ADD
,
5555 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 3);
5557 memcpy(desc
[0].data
,
5559 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
5560 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
5563 dev_err(&hdev
->pdev
->dev
,
5564 "lookup mac addr failed for cmd_send, ret =%d.\n",
5568 resp_code
= (le32_to_cpu(desc
[0].data
[0]) >> 8) & 0xff;
5569 retval
= le16_to_cpu(desc
[0].retval
);
5571 return hclge_get_mac_vlan_cmd_status(vport
, retval
, resp_code
,
5572 HCLGE_MAC_VLAN_LKUP
);
5575 static int hclge_add_mac_vlan_tbl(struct hclge_vport
*vport
,
5576 struct hclge_mac_vlan_tbl_entry_cmd
*req
,
5577 struct hclge_desc
*mc_desc
)
5579 struct hclge_dev
*hdev
= vport
->back
;
5586 struct hclge_desc desc
;
5588 hclge_cmd_setup_basic_desc(&desc
,
5589 HCLGE_OPC_MAC_VLAN_ADD
,
5591 memcpy(desc
.data
, req
,
5592 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
5593 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5594 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
5595 retval
= le16_to_cpu(desc
.retval
);
5597 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, retval
,
5599 HCLGE_MAC_VLAN_ADD
);
5601 hclge_cmd_reuse_desc(&mc_desc
[0], false);
5602 mc_desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
5603 hclge_cmd_reuse_desc(&mc_desc
[1], false);
5604 mc_desc
[1].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
5605 hclge_cmd_reuse_desc(&mc_desc
[2], false);
5606 mc_desc
[2].flag
&= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT
);
5607 memcpy(mc_desc
[0].data
, req
,
5608 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
5609 ret
= hclge_cmd_send(&hdev
->hw
, mc_desc
, 3);
5610 resp_code
= (le32_to_cpu(mc_desc
[0].data
[0]) >> 8) & 0xff;
5611 retval
= le16_to_cpu(mc_desc
[0].retval
);
5613 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, retval
,
5615 HCLGE_MAC_VLAN_ADD
);
5619 dev_err(&hdev
->pdev
->dev
,
5620 "add mac addr failed for cmd_send, ret =%d.\n",
5628 static int hclge_init_umv_space(struct hclge_dev
*hdev
)
5630 u16 allocated_size
= 0;
5633 ret
= hclge_set_umv_space(hdev
, hdev
->wanted_umv_size
, &allocated_size
,
5638 if (allocated_size
< hdev
->wanted_umv_size
)
5639 dev_warn(&hdev
->pdev
->dev
,
5640 "Alloc umv space failed, want %d, get %d\n",
5641 hdev
->wanted_umv_size
, allocated_size
);
5643 mutex_init(&hdev
->umv_mutex
);
5644 hdev
->max_umv_size
= allocated_size
;
5645 hdev
->priv_umv_size
= hdev
->max_umv_size
/ (hdev
->num_req_vfs
+ 2);
5646 hdev
->share_umv_size
= hdev
->priv_umv_size
+
5647 hdev
->max_umv_size
% (hdev
->num_req_vfs
+ 2);
5652 static int hclge_uninit_umv_space(struct hclge_dev
*hdev
)
5656 if (hdev
->max_umv_size
> 0) {
5657 ret
= hclge_set_umv_space(hdev
, hdev
->max_umv_size
, NULL
,
5661 hdev
->max_umv_size
= 0;
5663 mutex_destroy(&hdev
->umv_mutex
);
5668 static int hclge_set_umv_space(struct hclge_dev
*hdev
, u16 space_size
,
5669 u16
*allocated_size
, bool is_alloc
)
5671 struct hclge_umv_spc_alc_cmd
*req
;
5672 struct hclge_desc desc
;
5675 req
= (struct hclge_umv_spc_alc_cmd
*)desc
.data
;
5676 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_ALLOCATE
, false);
5677 hnae3_set_bit(req
->allocate
, HCLGE_UMV_SPC_ALC_B
, !is_alloc
);
5678 req
->space_size
= cpu_to_le32(space_size
);
5680 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5682 dev_err(&hdev
->pdev
->dev
,
5683 "%s umv space failed for cmd_send, ret =%d\n",
5684 is_alloc
? "allocate" : "free", ret
);
5688 if (is_alloc
&& allocated_size
)
5689 *allocated_size
= le32_to_cpu(desc
.data
[1]);
5694 static void hclge_reset_umv_space(struct hclge_dev
*hdev
)
5696 struct hclge_vport
*vport
;
5699 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
5700 vport
= &hdev
->vport
[i
];
5701 vport
->used_umv_num
= 0;
5704 mutex_lock(&hdev
->umv_mutex
);
5705 hdev
->share_umv_size
= hdev
->priv_umv_size
+
5706 hdev
->max_umv_size
% (hdev
->num_req_vfs
+ 2);
5707 mutex_unlock(&hdev
->umv_mutex
);
5710 static bool hclge_is_umv_space_full(struct hclge_vport
*vport
)
5712 struct hclge_dev
*hdev
= vport
->back
;
5715 mutex_lock(&hdev
->umv_mutex
);
5716 is_full
= (vport
->used_umv_num
>= hdev
->priv_umv_size
&&
5717 hdev
->share_umv_size
== 0);
5718 mutex_unlock(&hdev
->umv_mutex
);
5723 static void hclge_update_umv_space(struct hclge_vport
*vport
, bool is_free
)
5725 struct hclge_dev
*hdev
= vport
->back
;
5727 mutex_lock(&hdev
->umv_mutex
);
5729 if (vport
->used_umv_num
> hdev
->priv_umv_size
)
5730 hdev
->share_umv_size
++;
5731 vport
->used_umv_num
--;
5733 if (vport
->used_umv_num
>= hdev
->priv_umv_size
)
5734 hdev
->share_umv_size
--;
5735 vport
->used_umv_num
++;
5737 mutex_unlock(&hdev
->umv_mutex
);
5740 static int hclge_add_uc_addr(struct hnae3_handle
*handle
,
5741 const unsigned char *addr
)
5743 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5745 return hclge_add_uc_addr_common(vport
, addr
);
5748 int hclge_add_uc_addr_common(struct hclge_vport
*vport
,
5749 const unsigned char *addr
)
5751 struct hclge_dev
*hdev
= vport
->back
;
5752 struct hclge_mac_vlan_tbl_entry_cmd req
;
5753 struct hclge_desc desc
;
5754 u16 egress_port
= 0;
5757 /* mac addr check */
5758 if (is_zero_ether_addr(addr
) ||
5759 is_broadcast_ether_addr(addr
) ||
5760 is_multicast_ether_addr(addr
)) {
5761 dev_err(&hdev
->pdev
->dev
,
5762 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5764 is_zero_ether_addr(addr
),
5765 is_broadcast_ether_addr(addr
),
5766 is_multicast_ether_addr(addr
));
5770 memset(&req
, 0, sizeof(req
));
5771 hnae3_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
5773 hnae3_set_field(egress_port
, HCLGE_MAC_EPORT_VFID_M
,
5774 HCLGE_MAC_EPORT_VFID_S
, vport
->vport_id
);
5776 req
.egress_port
= cpu_to_le16(egress_port
);
5778 hclge_prepare_mac_addr(&req
, addr
);
5780 /* Lookup the mac address in the mac_vlan table, and add
5781 * it if the entry is inexistent. Repeated unicast entry
5782 * is not allowed in the mac vlan table.
5784 ret
= hclge_lookup_mac_vlan_tbl(vport
, &req
, &desc
, false);
5785 if (ret
== -ENOENT
) {
5786 if (!hclge_is_umv_space_full(vport
)) {
5787 ret
= hclge_add_mac_vlan_tbl(vport
, &req
, NULL
);
5789 hclge_update_umv_space(vport
, false);
5793 dev_err(&hdev
->pdev
->dev
, "UC MAC table full(%u)\n",
5794 hdev
->priv_umv_size
);
5799 /* check if we just hit the duplicate */
5803 dev_err(&hdev
->pdev
->dev
,
5804 "PF failed to add unicast entry(%pM) in the MAC table\n",
5810 static int hclge_rm_uc_addr(struct hnae3_handle
*handle
,
5811 const unsigned char *addr
)
5813 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5815 return hclge_rm_uc_addr_common(vport
, addr
);
5818 int hclge_rm_uc_addr_common(struct hclge_vport
*vport
,
5819 const unsigned char *addr
)
5821 struct hclge_dev
*hdev
= vport
->back
;
5822 struct hclge_mac_vlan_tbl_entry_cmd req
;
5825 /* mac addr check */
5826 if (is_zero_ether_addr(addr
) ||
5827 is_broadcast_ether_addr(addr
) ||
5828 is_multicast_ether_addr(addr
)) {
5829 dev_dbg(&hdev
->pdev
->dev
,
5830 "Remove mac err! invalid mac:%pM.\n",
5835 memset(&req
, 0, sizeof(req
));
5836 hnae3_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
5837 hnae3_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
5838 hclge_prepare_mac_addr(&req
, addr
);
5839 ret
= hclge_remove_mac_vlan_tbl(vport
, &req
);
5841 hclge_update_umv_space(vport
, true);
5846 static int hclge_add_mc_addr(struct hnae3_handle
*handle
,
5847 const unsigned char *addr
)
5849 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5851 return hclge_add_mc_addr_common(vport
, addr
);
5854 int hclge_add_mc_addr_common(struct hclge_vport
*vport
,
5855 const unsigned char *addr
)
5857 struct hclge_dev
*hdev
= vport
->back
;
5858 struct hclge_mac_vlan_tbl_entry_cmd req
;
5859 struct hclge_desc desc
[3];
5862 /* mac addr check */
5863 if (!is_multicast_ether_addr(addr
)) {
5864 dev_err(&hdev
->pdev
->dev
,
5865 "Add mc mac err! invalid mac:%pM.\n",
5869 memset(&req
, 0, sizeof(req
));
5870 hnae3_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
5871 hnae3_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
5872 hnae3_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 1);
5873 hnae3_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
5874 hclge_prepare_mac_addr(&req
, addr
);
5875 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
5877 /* This mac addr exist, update VFID for it */
5878 hclge_update_desc_vfid(desc
, vport
->vport_id
, false);
5879 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
5881 /* This mac addr do not exist, add new entry for it */
5882 memset(desc
[0].data
, 0, sizeof(desc
[0].data
));
5883 memset(desc
[1].data
, 0, sizeof(desc
[0].data
));
5884 memset(desc
[2].data
, 0, sizeof(desc
[0].data
));
5885 hclge_update_desc_vfid(desc
, vport
->vport_id
, false);
5886 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
5889 if (status
== -ENOSPC
)
5890 dev_err(&hdev
->pdev
->dev
, "mc mac vlan table is full\n");
5895 static int hclge_rm_mc_addr(struct hnae3_handle
*handle
,
5896 const unsigned char *addr
)
5898 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5900 return hclge_rm_mc_addr_common(vport
, addr
);
5903 int hclge_rm_mc_addr_common(struct hclge_vport
*vport
,
5904 const unsigned char *addr
)
5906 struct hclge_dev
*hdev
= vport
->back
;
5907 struct hclge_mac_vlan_tbl_entry_cmd req
;
5908 enum hclge_cmd_status status
;
5909 struct hclge_desc desc
[3];
5911 /* mac addr check */
5912 if (!is_multicast_ether_addr(addr
)) {
5913 dev_dbg(&hdev
->pdev
->dev
,
5914 "Remove mc mac err! invalid mac:%pM.\n",
5919 memset(&req
, 0, sizeof(req
));
5920 hnae3_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
5921 hnae3_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
5922 hnae3_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 1);
5923 hnae3_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
5924 hclge_prepare_mac_addr(&req
, addr
);
5925 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
5927 /* This mac addr exist, remove this handle's VFID for it */
5928 hclge_update_desc_vfid(desc
, vport
->vport_id
, true);
5930 if (hclge_is_all_function_id_zero(desc
))
5931 /* All the vfid is zero, so need to delete this entry */
5932 status
= hclge_remove_mac_vlan_tbl(vport
, &req
);
5934 /* Not all the vfid is zero, update the vfid */
5935 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
5938 /* Maybe this mac address is in mta table, but it cannot be
5939 * deleted here because an entry of mta represents an address
5940 * range rather than a specific address. the delete action to
5941 * all entries will take effect in update_mta_status called by
5942 * hns3_nic_set_rx_mode.
5950 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev
*hdev
,
5951 u16 cmdq_resp
, u8 resp_code
)
5953 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
5954 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
5955 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
5956 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
5961 dev_err(&hdev
->pdev
->dev
,
5962 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
5967 switch (resp_code
) {
5968 case HCLGE_ETHERTYPE_SUCCESS_ADD
:
5969 case HCLGE_ETHERTYPE_ALREADY_ADD
:
5972 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW
:
5973 dev_err(&hdev
->pdev
->dev
,
5974 "add mac ethertype failed for manager table overflow.\n");
5975 return_status
= -EIO
;
5977 case HCLGE_ETHERTYPE_KEY_CONFLICT
:
5978 dev_err(&hdev
->pdev
->dev
,
5979 "add mac ethertype failed for key conflict.\n");
5980 return_status
= -EIO
;
5983 dev_err(&hdev
->pdev
->dev
,
5984 "add mac ethertype failed for undefined, code=%d.\n",
5986 return_status
= -EIO
;
5989 return return_status
;
5992 static int hclge_add_mgr_tbl(struct hclge_dev
*hdev
,
5993 const struct hclge_mac_mgr_tbl_entry_cmd
*req
)
5995 struct hclge_desc desc
;
6000 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_ETHTYPE_ADD
, false);
6001 memcpy(desc
.data
, req
, sizeof(struct hclge_mac_mgr_tbl_entry_cmd
));
6003 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
6005 dev_err(&hdev
->pdev
->dev
,
6006 "add mac ethertype failed for cmd_send, ret =%d.\n",
6011 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
6012 retval
= le16_to_cpu(desc
.retval
);
6014 return hclge_get_mac_ethertype_cmd_status(hdev
, retval
, resp_code
);
6017 static int init_mgr_tbl(struct hclge_dev
*hdev
)
6022 for (i
= 0; i
< ARRAY_SIZE(hclge_mgr_table
); i
++) {
6023 ret
= hclge_add_mgr_tbl(hdev
, &hclge_mgr_table
[i
]);
6025 dev_err(&hdev
->pdev
->dev
,
6026 "add mac ethertype failed, ret =%d.\n",
6035 static void hclge_get_mac_addr(struct hnae3_handle
*handle
, u8
*p
)
6037 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6038 struct hclge_dev
*hdev
= vport
->back
;
6040 ether_addr_copy(p
, hdev
->hw
.mac
.mac_addr
);
6043 static int hclge_set_mac_addr(struct hnae3_handle
*handle
, void *p
,
6046 const unsigned char *new_addr
= (const unsigned char *)p
;
6047 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6048 struct hclge_dev
*hdev
= vport
->back
;
6051 /* mac addr check */
6052 if (is_zero_ether_addr(new_addr
) ||
6053 is_broadcast_ether_addr(new_addr
) ||
6054 is_multicast_ether_addr(new_addr
)) {
6055 dev_err(&hdev
->pdev
->dev
,
6056 "Change uc mac err! invalid mac:%p.\n",
6061 if (!is_first
&& hclge_rm_uc_addr(handle
, hdev
->hw
.mac
.mac_addr
))
6062 dev_warn(&hdev
->pdev
->dev
,
6063 "remove old uc mac address fail.\n");
6065 ret
= hclge_add_uc_addr(handle
, new_addr
);
6067 dev_err(&hdev
->pdev
->dev
,
6068 "add uc mac address fail, ret =%d.\n",
6072 hclge_add_uc_addr(handle
, hdev
->hw
.mac
.mac_addr
))
6073 dev_err(&hdev
->pdev
->dev
,
6074 "restore uc mac address fail.\n");
6079 ret
= hclge_pause_addr_cfg(hdev
, new_addr
);
6081 dev_err(&hdev
->pdev
->dev
,
6082 "configure mac pause address fail, ret =%d.\n",
6087 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, new_addr
);
6092 static int hclge_do_ioctl(struct hnae3_handle
*handle
, struct ifreq
*ifr
,
6095 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6096 struct hclge_dev
*hdev
= vport
->back
;
6098 if (!hdev
->hw
.mac
.phydev
)
6101 return phy_mii_ioctl(hdev
->hw
.mac
.phydev
, ifr
, cmd
);
6104 static int hclge_set_vlan_filter_ctrl(struct hclge_dev
*hdev
, u8 vlan_type
,
6105 u8 fe_type
, bool filter_en
)
6107 struct hclge_vlan_filter_ctrl_cmd
*req
;
6108 struct hclge_desc desc
;
6111 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_CTRL
, false);
6113 req
= (struct hclge_vlan_filter_ctrl_cmd
*)desc
.data
;
6114 req
->vlan_type
= vlan_type
;
6115 req
->vlan_fe
= filter_en
? fe_type
: 0;
6117 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
6119 dev_err(&hdev
->pdev
->dev
, "set vlan filter fail, ret =%d.\n",
6125 #define HCLGE_FILTER_TYPE_VF 0
6126 #define HCLGE_FILTER_TYPE_PORT 1
6127 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
6128 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
6129 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
6130 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
6131 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
6132 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
6133 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6134 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
6135 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
6137 static void hclge_enable_vlan_filter(struct hnae3_handle
*handle
, bool enable
)
6139 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6140 struct hclge_dev
*hdev
= vport
->back
;
6142 if (hdev
->pdev
->revision
>= 0x21) {
6143 hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
,
6144 HCLGE_FILTER_FE_EGRESS
, enable
);
6145 hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_PORT
,
6146 HCLGE_FILTER_FE_INGRESS
, enable
);
6148 hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
,
6149 HCLGE_FILTER_FE_EGRESS_V1_B
, enable
);
6152 handle
->netdev_flags
|= HNAE3_VLAN_FLTR
;
6154 handle
->netdev_flags
&= ~HNAE3_VLAN_FLTR
;
6157 static int hclge_set_vf_vlan_common(struct hclge_dev
*hdev
, int vfid
,
6158 bool is_kill
, u16 vlan
, u8 qos
,
6161 #define HCLGE_MAX_VF_BYTES 16
6162 struct hclge_vlan_filter_vf_cfg_cmd
*req0
;
6163 struct hclge_vlan_filter_vf_cfg_cmd
*req1
;
6164 struct hclge_desc desc
[2];
6169 hclge_cmd_setup_basic_desc(&desc
[0],
6170 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
6171 hclge_cmd_setup_basic_desc(&desc
[1],
6172 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
6174 desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
6176 vf_byte_off
= vfid
/ 8;
6177 vf_byte_val
= 1 << (vfid
% 8);
6179 req0
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[0].data
;
6180 req1
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[1].data
;
6182 req0
->vlan_id
= cpu_to_le16(vlan
);
6183 req0
->vlan_cfg
= is_kill
;
6185 if (vf_byte_off
< HCLGE_MAX_VF_BYTES
)
6186 req0
->vf_bitmap
[vf_byte_off
] = vf_byte_val
;
6188 req1
->vf_bitmap
[vf_byte_off
- HCLGE_MAX_VF_BYTES
] = vf_byte_val
;
6190 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
6192 dev_err(&hdev
->pdev
->dev
,
6193 "Send vf vlan command fail, ret =%d.\n",
6199 #define HCLGE_VF_VLAN_NO_ENTRY 2
6200 if (!req0
->resp_code
|| req0
->resp_code
== 1)
6203 if (req0
->resp_code
== HCLGE_VF_VLAN_NO_ENTRY
) {
6204 dev_warn(&hdev
->pdev
->dev
,
6205 "vf vlan table is full, vf vlan filter is disabled\n");
6209 dev_err(&hdev
->pdev
->dev
,
6210 "Add vf vlan filter fail, ret =%d.\n",
6213 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
6214 if (!req0
->resp_code
)
6217 if (req0
->resp_code
== HCLGE_VF_VLAN_DEL_NO_FOUND
) {
6218 dev_warn(&hdev
->pdev
->dev
,
6219 "vlan %d filter is not in vf vlan table\n",
6224 dev_err(&hdev
->pdev
->dev
,
6225 "Kill vf vlan filter fail, ret =%d.\n",
6232 static int hclge_set_port_vlan_filter(struct hclge_dev
*hdev
, __be16 proto
,
6233 u16 vlan_id
, bool is_kill
)
6235 struct hclge_vlan_filter_pf_cfg_cmd
*req
;
6236 struct hclge_desc desc
;
6237 u8 vlan_offset_byte_val
;
6238 u8 vlan_offset_byte
;
6242 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_PF_CFG
, false);
6244 vlan_offset_160
= vlan_id
/ 160;
6245 vlan_offset_byte
= (vlan_id
% 160) / 8;
6246 vlan_offset_byte_val
= 1 << (vlan_id
% 8);
6248 req
= (struct hclge_vlan_filter_pf_cfg_cmd
*)desc
.data
;
6249 req
->vlan_offset
= vlan_offset_160
;
6250 req
->vlan_cfg
= is_kill
;
6251 req
->vlan_offset_bitmap
[vlan_offset_byte
] = vlan_offset_byte_val
;
6253 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
6255 dev_err(&hdev
->pdev
->dev
,
6256 "port vlan command, send fail, ret =%d.\n", ret
);
6260 static int hclge_set_vlan_filter_hw(struct hclge_dev
*hdev
, __be16 proto
,
6261 u16 vport_id
, u16 vlan_id
, u8 qos
,
6264 u16 vport_idx
, vport_num
= 0;
6267 if (is_kill
&& !vlan_id
)
6270 ret
= hclge_set_vf_vlan_common(hdev
, vport_id
, is_kill
, vlan_id
,
6273 dev_err(&hdev
->pdev
->dev
,
6274 "Set %d vport vlan filter config fail, ret =%d.\n",
6279 /* vlan 0 may be added twice when 8021q module is enabled */
6280 if (!is_kill
&& !vlan_id
&&
6281 test_bit(vport_id
, hdev
->vlan_table
[vlan_id
]))
6284 if (!is_kill
&& test_and_set_bit(vport_id
, hdev
->vlan_table
[vlan_id
])) {
6285 dev_err(&hdev
->pdev
->dev
,
6286 "Add port vlan failed, vport %d is already in vlan %d\n",
6292 !test_and_clear_bit(vport_id
, hdev
->vlan_table
[vlan_id
])) {
6293 dev_err(&hdev
->pdev
->dev
,
6294 "Delete port vlan failed, vport %d is not in vlan %d\n",
6299 for_each_set_bit(vport_idx
, hdev
->vlan_table
[vlan_id
], HCLGE_VPORT_NUM
)
6302 if ((is_kill
&& vport_num
== 0) || (!is_kill
&& vport_num
== 1))
6303 ret
= hclge_set_port_vlan_filter(hdev
, proto
, vlan_id
,
6309 int hclge_set_vlan_filter(struct hnae3_handle
*handle
, __be16 proto
,
6310 u16 vlan_id
, bool is_kill
)
6312 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6313 struct hclge_dev
*hdev
= vport
->back
;
6315 return hclge_set_vlan_filter_hw(hdev
, proto
, vport
->vport_id
, vlan_id
,
6319 static int hclge_set_vf_vlan_filter(struct hnae3_handle
*handle
, int vfid
,
6320 u16 vlan
, u8 qos
, __be16 proto
)
6322 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6323 struct hclge_dev
*hdev
= vport
->back
;
6325 if ((vfid
>= hdev
->num_alloc_vfs
) || (vlan
> 4095) || (qos
> 7))
6327 if (proto
!= htons(ETH_P_8021Q
))
6328 return -EPROTONOSUPPORT
;
6330 return hclge_set_vlan_filter_hw(hdev
, proto
, vfid
, vlan
, qos
, false);
6333 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport
*vport
)
6335 struct hclge_tx_vtag_cfg
*vcfg
= &vport
->txvlan_cfg
;
6336 struct hclge_vport_vtag_tx_cfg_cmd
*req
;
6337 struct hclge_dev
*hdev
= vport
->back
;
6338 struct hclge_desc desc
;
6341 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_PORT_TX_CFG
, false);
6343 req
= (struct hclge_vport_vtag_tx_cfg_cmd
*)desc
.data
;
6344 req
->def_vlan_tag1
= cpu_to_le16(vcfg
->default_tag1
);
6345 req
->def_vlan_tag2
= cpu_to_le16(vcfg
->default_tag2
);
6346 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_TAG1_B
,
6347 vcfg
->accept_tag1
? 1 : 0);
6348 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_UNTAG1_B
,
6349 vcfg
->accept_untag1
? 1 : 0);
6350 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_TAG2_B
,
6351 vcfg
->accept_tag2
? 1 : 0);
6352 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_UNTAG2_B
,
6353 vcfg
->accept_untag2
? 1 : 0);
6354 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_PORT_INS_TAG1_EN_B
,
6355 vcfg
->insert_tag1_en
? 1 : 0);
6356 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_PORT_INS_TAG2_EN_B
,
6357 vcfg
->insert_tag2_en
? 1 : 0);
6358 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_CFG_NIC_ROCE_SEL_B
, 0);
6360 req
->vf_offset
= vport
->vport_id
/ HCLGE_VF_NUM_PER_CMD
;
6361 req
->vf_bitmap
[req
->vf_offset
] =
6362 1 << (vport
->vport_id
% HCLGE_VF_NUM_PER_BYTE
);
6364 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
6366 dev_err(&hdev
->pdev
->dev
,
6367 "Send port txvlan cfg command fail, ret =%d\n",
6373 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport
*vport
)
6375 struct hclge_rx_vtag_cfg
*vcfg
= &vport
->rxvlan_cfg
;
6376 struct hclge_vport_vtag_rx_cfg_cmd
*req
;
6377 struct hclge_dev
*hdev
= vport
->back
;
6378 struct hclge_desc desc
;
6381 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_PORT_RX_CFG
, false);
6383 req
= (struct hclge_vport_vtag_rx_cfg_cmd
*)desc
.data
;
6384 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_REM_TAG1_EN_B
,
6385 vcfg
->strip_tag1_en
? 1 : 0);
6386 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_REM_TAG2_EN_B
,
6387 vcfg
->strip_tag2_en
? 1 : 0);
6388 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_SHOW_TAG1_EN_B
,
6389 vcfg
->vlan1_vlan_prionly
? 1 : 0);
6390 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_SHOW_TAG2_EN_B
,
6391 vcfg
->vlan2_vlan_prionly
? 1 : 0);
6393 req
->vf_offset
= vport
->vport_id
/ HCLGE_VF_NUM_PER_CMD
;
6394 req
->vf_bitmap
[req
->vf_offset
] =
6395 1 << (vport
->vport_id
% HCLGE_VF_NUM_PER_BYTE
);
6397 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
6399 dev_err(&hdev
->pdev
->dev
,
6400 "Send port rxvlan cfg command fail, ret =%d\n",
6406 static int hclge_set_vlan_protocol_type(struct hclge_dev
*hdev
)
6408 struct hclge_rx_vlan_type_cfg_cmd
*rx_req
;
6409 struct hclge_tx_vlan_type_cfg_cmd
*tx_req
;
6410 struct hclge_desc desc
;
6413 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_TYPE_ID
, false);
6414 rx_req
= (struct hclge_rx_vlan_type_cfg_cmd
*)desc
.data
;
6415 rx_req
->ot_fst_vlan_type
=
6416 cpu_to_le16(hdev
->vlan_type_cfg
.rx_ot_fst_vlan_type
);
6417 rx_req
->ot_sec_vlan_type
=
6418 cpu_to_le16(hdev
->vlan_type_cfg
.rx_ot_sec_vlan_type
);
6419 rx_req
->in_fst_vlan_type
=
6420 cpu_to_le16(hdev
->vlan_type_cfg
.rx_in_fst_vlan_type
);
6421 rx_req
->in_sec_vlan_type
=
6422 cpu_to_le16(hdev
->vlan_type_cfg
.rx_in_sec_vlan_type
);
6424 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
6426 dev_err(&hdev
->pdev
->dev
,
6427 "Send rxvlan protocol type command fail, ret =%d\n",
6432 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_INSERT
, false);
6434 tx_req
= (struct hclge_tx_vlan_type_cfg_cmd
*)desc
.data
;
6435 tx_req
->ot_vlan_type
= cpu_to_le16(hdev
->vlan_type_cfg
.tx_ot_vlan_type
);
6436 tx_req
->in_vlan_type
= cpu_to_le16(hdev
->vlan_type_cfg
.tx_in_vlan_type
);
6438 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
6440 dev_err(&hdev
->pdev
->dev
,
6441 "Send txvlan protocol type command fail, ret =%d\n",
6447 static int hclge_init_vlan_config(struct hclge_dev
*hdev
)
6449 #define HCLGE_DEF_VLAN_TYPE 0x8100
6451 struct hnae3_handle
*handle
= &hdev
->vport
[0].nic
;
6452 struct hclge_vport
*vport
;
6456 if (hdev
->pdev
->revision
>= 0x21) {
6457 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
,
6458 HCLGE_FILTER_FE_EGRESS
, true);
6462 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_PORT
,
6463 HCLGE_FILTER_FE_INGRESS
, true);
6467 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
,
6468 HCLGE_FILTER_FE_EGRESS_V1_B
,
6474 handle
->netdev_flags
|= HNAE3_VLAN_FLTR
;
6476 hdev
->vlan_type_cfg
.rx_in_fst_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
6477 hdev
->vlan_type_cfg
.rx_in_sec_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
6478 hdev
->vlan_type_cfg
.rx_ot_fst_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
6479 hdev
->vlan_type_cfg
.rx_ot_sec_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
6480 hdev
->vlan_type_cfg
.tx_ot_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
6481 hdev
->vlan_type_cfg
.tx_in_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
6483 ret
= hclge_set_vlan_protocol_type(hdev
);
6487 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
6488 vport
= &hdev
->vport
[i
];
6489 vport
->txvlan_cfg
.accept_tag1
= true;
6490 vport
->txvlan_cfg
.accept_untag1
= true;
6492 /* accept_tag2 and accept_untag2 are not supported on
6493 * pdev revision(0x20), new revision support them. The
6494 * value of this two fields will not return error when driver
6495 * send command to fireware in revision(0x20).
6496 * This two fields can not configured by user.
6498 vport
->txvlan_cfg
.accept_tag2
= true;
6499 vport
->txvlan_cfg
.accept_untag2
= true;
6501 vport
->txvlan_cfg
.insert_tag1_en
= false;
6502 vport
->txvlan_cfg
.insert_tag2_en
= false;
6503 vport
->txvlan_cfg
.default_tag1
= 0;
6504 vport
->txvlan_cfg
.default_tag2
= 0;
6506 ret
= hclge_set_vlan_tx_offload_cfg(vport
);
6510 vport
->rxvlan_cfg
.strip_tag1_en
= false;
6511 vport
->rxvlan_cfg
.strip_tag2_en
= true;
6512 vport
->rxvlan_cfg
.vlan1_vlan_prionly
= false;
6513 vport
->rxvlan_cfg
.vlan2_vlan_prionly
= false;
6515 ret
= hclge_set_vlan_rx_offload_cfg(vport
);
6520 return hclge_set_vlan_filter(handle
, htons(ETH_P_8021Q
), 0, false);
6523 int hclge_en_hw_strip_rxvtag(struct hnae3_handle
*handle
, bool enable
)
6525 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6527 vport
->rxvlan_cfg
.strip_tag1_en
= false;
6528 vport
->rxvlan_cfg
.strip_tag2_en
= enable
;
6529 vport
->rxvlan_cfg
.vlan1_vlan_prionly
= false;
6530 vport
->rxvlan_cfg
.vlan2_vlan_prionly
= false;
6532 return hclge_set_vlan_rx_offload_cfg(vport
);
6535 static int hclge_set_mac_mtu(struct hclge_dev
*hdev
, int new_mps
)
6537 struct hclge_config_max_frm_size_cmd
*req
;
6538 struct hclge_desc desc
;
6540 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAX_FRM_SIZE
, false);
6542 req
= (struct hclge_config_max_frm_size_cmd
*)desc
.data
;
6543 req
->max_frm_size
= cpu_to_le16(new_mps
);
6544 req
->min_frm_size
= HCLGE_MAC_MIN_FRAME
;
6546 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
6549 static int hclge_set_mtu(struct hnae3_handle
*handle
, int new_mtu
)
6551 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6553 return hclge_set_vport_mtu(vport
, new_mtu
);
6556 int hclge_set_vport_mtu(struct hclge_vport
*vport
, int new_mtu
)
6558 struct hclge_dev
*hdev
= vport
->back
;
6559 int i
, max_frm_size
, ret
= 0;
6561 max_frm_size
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ 2 * VLAN_HLEN
;
6562 if (max_frm_size
< HCLGE_MAC_MIN_FRAME
||
6563 max_frm_size
> HCLGE_MAC_MAX_FRAME
)
6566 max_frm_size
= max(max_frm_size
, HCLGE_MAC_DEFAULT_FRAME
);
6567 mutex_lock(&hdev
->vport_lock
);
6568 /* VF's mps must fit within hdev->mps */
6569 if (vport
->vport_id
&& max_frm_size
> hdev
->mps
) {
6570 mutex_unlock(&hdev
->vport_lock
);
6572 } else if (vport
->vport_id
) {
6573 vport
->mps
= max_frm_size
;
6574 mutex_unlock(&hdev
->vport_lock
);
6578 /* PF's mps must be greater then VF's mps */
6579 for (i
= 1; i
< hdev
->num_alloc_vport
; i
++)
6580 if (max_frm_size
< hdev
->vport
[i
].mps
) {
6581 mutex_unlock(&hdev
->vport_lock
);
6585 hclge_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
6587 ret
= hclge_set_mac_mtu(hdev
, max_frm_size
);
6589 dev_err(&hdev
->pdev
->dev
,
6590 "Change mtu fail, ret =%d\n", ret
);
6594 hdev
->mps
= max_frm_size
;
6595 vport
->mps
= max_frm_size
;
6597 ret
= hclge_buffer_alloc(hdev
);
6599 dev_err(&hdev
->pdev
->dev
,
6600 "Allocate buffer fail, ret =%d\n", ret
);
6603 hclge_notify_client(hdev
, HNAE3_UP_CLIENT
);
6604 mutex_unlock(&hdev
->vport_lock
);
6608 static int hclge_send_reset_tqp_cmd(struct hclge_dev
*hdev
, u16 queue_id
,
6611 struct hclge_reset_tqp_queue_cmd
*req
;
6612 struct hclge_desc desc
;
6615 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, false);
6617 req
= (struct hclge_reset_tqp_queue_cmd
*)desc
.data
;
6618 req
->tqp_id
= cpu_to_le16(queue_id
& HCLGE_RING_ID_MASK
);
6619 hnae3_set_bit(req
->reset_req
, HCLGE_TQP_RESET_B
, enable
);
6621 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
6623 dev_err(&hdev
->pdev
->dev
,
6624 "Send tqp reset cmd error, status =%d\n", ret
);
6631 static int hclge_get_reset_status(struct hclge_dev
*hdev
, u16 queue_id
)
6633 struct hclge_reset_tqp_queue_cmd
*req
;
6634 struct hclge_desc desc
;
6637 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, true);
6639 req
= (struct hclge_reset_tqp_queue_cmd
*)desc
.data
;
6640 req
->tqp_id
= cpu_to_le16(queue_id
& HCLGE_RING_ID_MASK
);
6642 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
6644 dev_err(&hdev
->pdev
->dev
,
6645 "Get reset status error, status =%d\n", ret
);
6649 return hnae3_get_bit(req
->ready_to_reset
, HCLGE_TQP_RESET_B
);
6652 u16
hclge_covert_handle_qid_global(struct hnae3_handle
*handle
, u16 queue_id
)
6654 struct hnae3_queue
*queue
;
6655 struct hclge_tqp
*tqp
;
6657 queue
= handle
->kinfo
.tqp
[queue_id
];
6658 tqp
= container_of(queue
, struct hclge_tqp
, q
);
6663 int hclge_reset_tqp(struct hnae3_handle
*handle
, u16 queue_id
)
6665 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6666 struct hclge_dev
*hdev
= vport
->back
;
6667 int reset_try_times
= 0;
6672 queue_gid
= hclge_covert_handle_qid_global(handle
, queue_id
);
6674 ret
= hclge_tqp_enable(hdev
, queue_id
, 0, false);
6676 dev_err(&hdev
->pdev
->dev
, "Disable tqp fail, ret = %d\n", ret
);
6680 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_gid
, true);
6682 dev_err(&hdev
->pdev
->dev
,
6683 "Send reset tqp cmd fail, ret = %d\n", ret
);
6687 reset_try_times
= 0;
6688 while (reset_try_times
++ < HCLGE_TQP_RESET_TRY_TIMES
) {
6689 /* Wait for tqp hw reset */
6691 reset_status
= hclge_get_reset_status(hdev
, queue_gid
);
6696 if (reset_try_times
>= HCLGE_TQP_RESET_TRY_TIMES
) {
6697 dev_err(&hdev
->pdev
->dev
, "Reset TQP fail\n");
6701 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_gid
, false);
6703 dev_err(&hdev
->pdev
->dev
,
6704 "Deassert the soft reset fail, ret = %d\n", ret
);
6709 void hclge_reset_vf_queue(struct hclge_vport
*vport
, u16 queue_id
)
6711 struct hclge_dev
*hdev
= vport
->back
;
6712 int reset_try_times
= 0;
6717 queue_gid
= hclge_covert_handle_qid_global(&vport
->nic
, queue_id
);
6719 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_gid
, true);
6721 dev_warn(&hdev
->pdev
->dev
,
6722 "Send reset tqp cmd fail, ret = %d\n", ret
);
6726 reset_try_times
= 0;
6727 while (reset_try_times
++ < HCLGE_TQP_RESET_TRY_TIMES
) {
6728 /* Wait for tqp hw reset */
6730 reset_status
= hclge_get_reset_status(hdev
, queue_gid
);
6735 if (reset_try_times
>= HCLGE_TQP_RESET_TRY_TIMES
) {
6736 dev_warn(&hdev
->pdev
->dev
, "Reset TQP fail\n");
6740 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_gid
, false);
6742 dev_warn(&hdev
->pdev
->dev
,
6743 "Deassert the soft reset fail, ret = %d\n", ret
);
6746 static u32
hclge_get_fw_version(struct hnae3_handle
*handle
)
6748 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6749 struct hclge_dev
*hdev
= vport
->back
;
6751 return hdev
->fw_version
;
6754 static void hclge_set_flowctrl_adv(struct hclge_dev
*hdev
, u32 rx_en
, u32 tx_en
)
6756 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
6761 phydev
->advertising
&= ~(ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
6764 phydev
->advertising
|= ADVERTISED_Pause
| ADVERTISED_Asym_Pause
;
6767 phydev
->advertising
^= ADVERTISED_Asym_Pause
;
6770 static int hclge_cfg_pauseparam(struct hclge_dev
*hdev
, u32 rx_en
, u32 tx_en
)
6775 hdev
->fc_mode_last_time
= HCLGE_FC_FULL
;
6776 else if (rx_en
&& !tx_en
)
6777 hdev
->fc_mode_last_time
= HCLGE_FC_RX_PAUSE
;
6778 else if (!rx_en
&& tx_en
)
6779 hdev
->fc_mode_last_time
= HCLGE_FC_TX_PAUSE
;
6781 hdev
->fc_mode_last_time
= HCLGE_FC_NONE
;
6783 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
)
6786 ret
= hclge_mac_pause_en_cfg(hdev
, tx_en
, rx_en
);
6788 dev_err(&hdev
->pdev
->dev
, "configure pauseparam error, ret = %d.\n",
6793 hdev
->tm_info
.fc_mode
= hdev
->fc_mode_last_time
;
6798 int hclge_cfg_flowctrl(struct hclge_dev
*hdev
)
6800 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
6801 u16 remote_advertising
= 0;
6802 u16 local_advertising
= 0;
6803 u32 rx_pause
, tx_pause
;
6806 if (!phydev
->link
|| !phydev
->autoneg
)
6809 if (phydev
->advertising
& ADVERTISED_Pause
)
6810 local_advertising
= ADVERTISE_PAUSE_CAP
;
6812 if (phydev
->advertising
& ADVERTISED_Asym_Pause
)
6813 local_advertising
|= ADVERTISE_PAUSE_ASYM
;
6816 remote_advertising
= LPA_PAUSE_CAP
;
6818 if (phydev
->asym_pause
)
6819 remote_advertising
|= LPA_PAUSE_ASYM
;
6821 flowctl
= mii_resolve_flowctrl_fdx(local_advertising
,
6822 remote_advertising
);
6823 tx_pause
= flowctl
& FLOW_CTRL_TX
;
6824 rx_pause
= flowctl
& FLOW_CTRL_RX
;
6826 if (phydev
->duplex
== HCLGE_MAC_HALF
) {
6831 return hclge_cfg_pauseparam(hdev
, rx_pause
, tx_pause
);
6834 static void hclge_get_pauseparam(struct hnae3_handle
*handle
, u32
*auto_neg
,
6835 u32
*rx_en
, u32
*tx_en
)
6837 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6838 struct hclge_dev
*hdev
= vport
->back
;
6840 *auto_neg
= hclge_get_autoneg(handle
);
6842 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
) {
6848 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_RX_PAUSE
) {
6851 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_TX_PAUSE
) {
6854 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_FULL
) {
6863 static int hclge_set_pauseparam(struct hnae3_handle
*handle
, u32 auto_neg
,
6864 u32 rx_en
, u32 tx_en
)
6866 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6867 struct hclge_dev
*hdev
= vport
->back
;
6868 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
6871 fc_autoneg
= hclge_get_autoneg(handle
);
6872 if (auto_neg
!= fc_autoneg
) {
6873 dev_info(&hdev
->pdev
->dev
,
6874 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
6878 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
) {
6879 dev_info(&hdev
->pdev
->dev
,
6880 "Priority flow control enabled. Cannot set link flow control.\n");
6884 hclge_set_flowctrl_adv(hdev
, rx_en
, tx_en
);
6887 return hclge_cfg_pauseparam(hdev
, rx_en
, tx_en
);
6889 /* Only support flow control negotiation for netdev with
6890 * phy attached for now.
6895 return phy_start_aneg(phydev
);
6898 static void hclge_get_ksettings_an_result(struct hnae3_handle
*handle
,
6899 u8
*auto_neg
, u32
*speed
, u8
*duplex
)
6901 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6902 struct hclge_dev
*hdev
= vport
->back
;
6905 *speed
= hdev
->hw
.mac
.speed
;
6907 *duplex
= hdev
->hw
.mac
.duplex
;
6909 *auto_neg
= hdev
->hw
.mac
.autoneg
;
6912 static void hclge_get_media_type(struct hnae3_handle
*handle
, u8
*media_type
)
6914 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6915 struct hclge_dev
*hdev
= vport
->back
;
6918 *media_type
= hdev
->hw
.mac
.media_type
;
6921 static void hclge_get_mdix_mode(struct hnae3_handle
*handle
,
6922 u8
*tp_mdix_ctrl
, u8
*tp_mdix
)
6924 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6925 struct hclge_dev
*hdev
= vport
->back
;
6926 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
6927 int mdix_ctrl
, mdix
, retval
, is_resolved
;
6930 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
6931 *tp_mdix
= ETH_TP_MDI_INVALID
;
6935 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_MDIX
);
6937 retval
= phy_read(phydev
, HCLGE_PHY_CSC_REG
);
6938 mdix_ctrl
= hnae3_get_field(retval
, HCLGE_PHY_MDIX_CTRL_M
,
6939 HCLGE_PHY_MDIX_CTRL_S
);
6941 retval
= phy_read(phydev
, HCLGE_PHY_CSS_REG
);
6942 mdix
= hnae3_get_bit(retval
, HCLGE_PHY_MDIX_STATUS_B
);
6943 is_resolved
= hnae3_get_bit(retval
, HCLGE_PHY_SPEED_DUP_RESOLVE_B
);
6945 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_COPPER
);
6947 switch (mdix_ctrl
) {
6949 *tp_mdix_ctrl
= ETH_TP_MDI
;
6952 *tp_mdix_ctrl
= ETH_TP_MDI_X
;
6955 *tp_mdix_ctrl
= ETH_TP_MDI_AUTO
;
6958 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
6963 *tp_mdix
= ETH_TP_MDI_INVALID
;
6965 *tp_mdix
= ETH_TP_MDI_X
;
6967 *tp_mdix
= ETH_TP_MDI
;
6970 static int hclge_init_instance_hw(struct hclge_dev
*hdev
)
6972 return hclge_mac_connect_phy(hdev
);
6975 static void hclge_uninit_instance_hw(struct hclge_dev
*hdev
)
6977 hclge_mac_disconnect_phy(hdev
);
6980 static int hclge_init_client_instance(struct hnae3_client
*client
,
6981 struct hnae3_ae_dev
*ae_dev
)
6983 struct hclge_dev
*hdev
= ae_dev
->priv
;
6984 struct hclge_vport
*vport
;
6987 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
6988 vport
= &hdev
->vport
[i
];
6990 switch (client
->type
) {
6991 case HNAE3_CLIENT_KNIC
:
6993 hdev
->nic_client
= client
;
6994 vport
->nic
.client
= client
;
6995 ret
= client
->ops
->init_instance(&vport
->nic
);
6999 ret
= hclge_init_instance_hw(hdev
);
7001 client
->ops
->uninit_instance(&vport
->nic
,
7006 hnae3_set_client_init_flag(client
, ae_dev
, 1);
7008 if (hdev
->roce_client
&&
7009 hnae3_dev_roce_supported(hdev
)) {
7010 struct hnae3_client
*rc
= hdev
->roce_client
;
7012 ret
= hclge_init_roce_base_info(vport
);
7016 ret
= rc
->ops
->init_instance(&vport
->roce
);
7020 hnae3_set_client_init_flag(hdev
->roce_client
,
7025 case HNAE3_CLIENT_UNIC
:
7026 hdev
->nic_client
= client
;
7027 vport
->nic
.client
= client
;
7029 ret
= client
->ops
->init_instance(&vport
->nic
);
7033 hnae3_set_client_init_flag(client
, ae_dev
, 1);
7036 case HNAE3_CLIENT_ROCE
:
7037 if (hnae3_dev_roce_supported(hdev
)) {
7038 hdev
->roce_client
= client
;
7039 vport
->roce
.client
= client
;
7042 if (hdev
->roce_client
&& hdev
->nic_client
) {
7043 ret
= hclge_init_roce_base_info(vport
);
7047 ret
= client
->ops
->init_instance(&vport
->roce
);
7051 hnae3_set_client_init_flag(client
, ae_dev
, 1);
7063 hdev
->nic_client
= NULL
;
7064 vport
->nic
.client
= NULL
;
7067 hdev
->roce_client
= NULL
;
7068 vport
->roce
.client
= NULL
;
7072 static void hclge_uninit_client_instance(struct hnae3_client
*client
,
7073 struct hnae3_ae_dev
*ae_dev
)
7075 struct hclge_dev
*hdev
= ae_dev
->priv
;
7076 struct hclge_vport
*vport
;
7079 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
7080 vport
= &hdev
->vport
[i
];
7081 if (hdev
->roce_client
) {
7082 hdev
->roce_client
->ops
->uninit_instance(&vport
->roce
,
7084 hdev
->roce_client
= NULL
;
7085 vport
->roce
.client
= NULL
;
7087 if (client
->type
== HNAE3_CLIENT_ROCE
)
7089 if (hdev
->nic_client
&& client
->ops
->uninit_instance
) {
7090 hclge_uninit_instance_hw(hdev
);
7091 client
->ops
->uninit_instance(&vport
->nic
, 0);
7092 hdev
->nic_client
= NULL
;
7093 vport
->nic
.client
= NULL
;
7098 static int hclge_pci_init(struct hclge_dev
*hdev
)
7100 struct pci_dev
*pdev
= hdev
->pdev
;
7101 struct hclge_hw
*hw
;
7104 ret
= pci_enable_device(pdev
);
7106 dev_err(&pdev
->dev
, "failed to enable PCI device\n");
7110 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
7112 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
7115 "can't set consistent PCI DMA");
7116 goto err_disable_device
;
7118 dev_warn(&pdev
->dev
, "set DMA mask to 32 bits\n");
7121 ret
= pci_request_regions(pdev
, HCLGE_DRIVER_NAME
);
7123 dev_err(&pdev
->dev
, "PCI request regions failed %d\n", ret
);
7124 goto err_disable_device
;
7127 pci_set_master(pdev
);
7129 hw
->io_base
= pcim_iomap(pdev
, 2, 0);
7131 dev_err(&pdev
->dev
, "Can't map configuration register space\n");
7133 goto err_clr_master
;
7136 hdev
->num_req_vfs
= pci_sriov_get_totalvfs(pdev
);
7140 pci_clear_master(pdev
);
7141 pci_release_regions(pdev
);
7143 pci_disable_device(pdev
);
7148 static void hclge_pci_uninit(struct hclge_dev
*hdev
)
7150 struct pci_dev
*pdev
= hdev
->pdev
;
7152 pcim_iounmap(pdev
, hdev
->hw
.io_base
);
7153 pci_free_irq_vectors(pdev
);
7154 pci_clear_master(pdev
);
7155 pci_release_mem_regions(pdev
);
7156 pci_disable_device(pdev
);
7159 static void hclge_state_init(struct hclge_dev
*hdev
)
7161 set_bit(HCLGE_STATE_SERVICE_INITED
, &hdev
->state
);
7162 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
7163 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
);
7164 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
7165 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
7166 clear_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
);
7169 static void hclge_state_uninit(struct hclge_dev
*hdev
)
7171 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
7173 if (hdev
->service_timer
.function
)
7174 del_timer_sync(&hdev
->service_timer
);
7175 if (hdev
->reset_timer
.function
)
7176 del_timer_sync(&hdev
->reset_timer
);
7177 if (hdev
->service_task
.func
)
7178 cancel_work_sync(&hdev
->service_task
);
7179 if (hdev
->rst_service_task
.func
)
7180 cancel_work_sync(&hdev
->rst_service_task
);
7181 if (hdev
->mbx_service_task
.func
)
7182 cancel_work_sync(&hdev
->mbx_service_task
);
7185 static void hclge_flr_prepare(struct hnae3_ae_dev
*ae_dev
)
7187 #define HCLGE_FLR_WAIT_MS 100
7188 #define HCLGE_FLR_WAIT_CNT 50
7189 struct hclge_dev
*hdev
= ae_dev
->priv
;
7192 clear_bit(HNAE3_FLR_DOWN
, &hdev
->flr_state
);
7193 clear_bit(HNAE3_FLR_DONE
, &hdev
->flr_state
);
7194 set_bit(HNAE3_FLR_RESET
, &hdev
->default_reset_request
);
7195 hclge_reset_event(hdev
->pdev
, NULL
);
7197 while (!test_bit(HNAE3_FLR_DOWN
, &hdev
->flr_state
) &&
7198 cnt
++ < HCLGE_FLR_WAIT_CNT
)
7199 msleep(HCLGE_FLR_WAIT_MS
);
7201 if (!test_bit(HNAE3_FLR_DOWN
, &hdev
->flr_state
))
7202 dev_err(&hdev
->pdev
->dev
,
7203 "flr wait down timeout: %d\n", cnt
);
7206 static void hclge_flr_done(struct hnae3_ae_dev
*ae_dev
)
7208 struct hclge_dev
*hdev
= ae_dev
->priv
;
7210 set_bit(HNAE3_FLR_DONE
, &hdev
->flr_state
);
7213 static int hclge_init_ae_dev(struct hnae3_ae_dev
*ae_dev
)
7215 struct pci_dev
*pdev
= ae_dev
->pdev
;
7216 struct hclge_dev
*hdev
;
7219 hdev
= devm_kzalloc(&pdev
->dev
, sizeof(*hdev
), GFP_KERNEL
);
7226 hdev
->ae_dev
= ae_dev
;
7227 hdev
->reset_type
= HNAE3_NONE_RESET
;
7228 hdev
->reset_level
= HNAE3_FUNC_RESET
;
7229 ae_dev
->priv
= hdev
;
7230 hdev
->mps
= ETH_FRAME_LEN
+ ETH_FCS_LEN
+ 2 * VLAN_HLEN
;
7232 mutex_init(&hdev
->vport_lock
);
7234 ret
= hclge_pci_init(hdev
);
7236 dev_err(&pdev
->dev
, "PCI init failed\n");
7240 /* Firmware command queue initialize */
7241 ret
= hclge_cmd_queue_init(hdev
);
7243 dev_err(&pdev
->dev
, "Cmd queue init failed, ret = %d.\n", ret
);
7244 goto err_pci_uninit
;
7247 /* Firmware command initialize */
7248 ret
= hclge_cmd_init(hdev
);
7250 goto err_cmd_uninit
;
7252 ret
= hclge_get_cap(hdev
);
7254 dev_err(&pdev
->dev
, "get hw capability error, ret = %d.\n",
7256 goto err_cmd_uninit
;
7259 ret
= hclge_configure(hdev
);
7261 dev_err(&pdev
->dev
, "Configure dev error, ret = %d.\n", ret
);
7262 goto err_cmd_uninit
;
7265 ret
= hclge_init_msi(hdev
);
7267 dev_err(&pdev
->dev
, "Init MSI/MSI-X error, ret = %d.\n", ret
);
7268 goto err_cmd_uninit
;
7271 ret
= hclge_misc_irq_init(hdev
);
7274 "Misc IRQ(vector0) init error, ret = %d.\n",
7276 goto err_msi_uninit
;
7279 ret
= hclge_alloc_tqps(hdev
);
7281 dev_err(&pdev
->dev
, "Allocate TQPs error, ret = %d.\n", ret
);
7282 goto err_msi_irq_uninit
;
7285 ret
= hclge_alloc_vport(hdev
);
7287 dev_err(&pdev
->dev
, "Allocate vport error, ret = %d.\n", ret
);
7288 goto err_msi_irq_uninit
;
7291 ret
= hclge_map_tqp(hdev
);
7293 dev_err(&pdev
->dev
, "Map tqp error, ret = %d.\n", ret
);
7294 goto err_msi_irq_uninit
;
7297 if (hdev
->hw
.mac
.media_type
== HNAE3_MEDIA_TYPE_COPPER
) {
7298 ret
= hclge_mac_mdio_config(hdev
);
7300 dev_err(&hdev
->pdev
->dev
,
7301 "mdio config fail ret=%d\n", ret
);
7302 goto err_msi_irq_uninit
;
7306 ret
= hclge_init_umv_space(hdev
);
7308 dev_err(&pdev
->dev
, "umv space init error, ret=%d.\n", ret
);
7309 goto err_msi_irq_uninit
;
7312 ret
= hclge_mac_init(hdev
);
7314 dev_err(&pdev
->dev
, "Mac init error, ret = %d\n", ret
);
7315 goto err_mdiobus_unreg
;
7318 ret
= hclge_config_tso(hdev
, HCLGE_TSO_MSS_MIN
, HCLGE_TSO_MSS_MAX
);
7320 dev_err(&pdev
->dev
, "Enable tso fail, ret =%d\n", ret
);
7321 goto err_mdiobus_unreg
;
7324 ret
= hclge_config_gro(hdev
, true);
7326 goto err_mdiobus_unreg
;
7328 ret
= hclge_init_vlan_config(hdev
);
7330 dev_err(&pdev
->dev
, "VLAN init fail, ret =%d\n", ret
);
7331 goto err_mdiobus_unreg
;
7334 ret
= hclge_tm_schd_init(hdev
);
7336 dev_err(&pdev
->dev
, "tm schd init fail, ret =%d\n", ret
);
7337 goto err_mdiobus_unreg
;
7340 hclge_rss_init_cfg(hdev
);
7341 ret
= hclge_rss_init_hw(hdev
);
7343 dev_err(&pdev
->dev
, "Rss init fail, ret =%d\n", ret
);
7344 goto err_mdiobus_unreg
;
7347 ret
= init_mgr_tbl(hdev
);
7349 dev_err(&pdev
->dev
, "manager table init fail, ret =%d\n", ret
);
7350 goto err_mdiobus_unreg
;
7353 ret
= hclge_init_fd_config(hdev
);
7356 "fd table init fail, ret=%d\n", ret
);
7357 goto err_mdiobus_unreg
;
7360 ret
= hclge_hw_error_set_state(hdev
, true);
7363 "fail(%d) to enable hw error interrupts\n", ret
);
7364 goto err_mdiobus_unreg
;
7367 hclge_dcb_ops_set(hdev
);
7369 timer_setup(&hdev
->service_timer
, hclge_service_timer
, 0);
7370 timer_setup(&hdev
->reset_timer
, hclge_reset_timer
, 0);
7371 INIT_WORK(&hdev
->service_task
, hclge_service_task
);
7372 INIT_WORK(&hdev
->rst_service_task
, hclge_reset_service_task
);
7373 INIT_WORK(&hdev
->mbx_service_task
, hclge_mailbox_service_task
);
7375 hclge_clear_all_event_cause(hdev
);
7377 /* Enable MISC vector(vector0) */
7378 hclge_enable_vector(&hdev
->misc_vector
, true);
7380 hclge_state_init(hdev
);
7381 hdev
->last_reset_time
= jiffies
;
7383 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME
);
7387 if (hdev
->hw
.mac
.phydev
)
7388 mdiobus_unregister(hdev
->hw
.mac
.mdio_bus
);
7390 hclge_misc_irq_uninit(hdev
);
7392 pci_free_irq_vectors(pdev
);
7394 hclge_destroy_cmd_queue(&hdev
->hw
);
7396 pcim_iounmap(pdev
, hdev
->hw
.io_base
);
7397 pci_clear_master(pdev
);
7398 pci_release_regions(pdev
);
7399 pci_disable_device(pdev
);
7404 static void hclge_stats_clear(struct hclge_dev
*hdev
)
7406 memset(&hdev
->hw_stats
, 0, sizeof(hdev
->hw_stats
));
7409 static void hclge_reset_vport_state(struct hclge_dev
*hdev
)
7411 struct hclge_vport
*vport
= hdev
->vport
;
7414 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
7415 hclge_vport_start(vport
);
7420 static int hclge_reset_ae_dev(struct hnae3_ae_dev
*ae_dev
)
7422 struct hclge_dev
*hdev
= ae_dev
->priv
;
7423 struct pci_dev
*pdev
= ae_dev
->pdev
;
7426 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
7428 hclge_stats_clear(hdev
);
7429 memset(hdev
->vlan_table
, 0, sizeof(hdev
->vlan_table
));
7431 ret
= hclge_cmd_init(hdev
);
7433 dev_err(&pdev
->dev
, "Cmd queue init failed\n");
7437 ret
= hclge_map_tqp(hdev
);
7439 dev_err(&pdev
->dev
, "Map tqp error, ret = %d.\n", ret
);
7443 hclge_reset_umv_space(hdev
);
7445 ret
= hclge_mac_init(hdev
);
7447 dev_err(&pdev
->dev
, "Mac init error, ret = %d\n", ret
);
7451 ret
= hclge_config_tso(hdev
, HCLGE_TSO_MSS_MIN
, HCLGE_TSO_MSS_MAX
);
7453 dev_err(&pdev
->dev
, "Enable tso fail, ret =%d\n", ret
);
7457 ret
= hclge_config_gro(hdev
, true);
7461 ret
= hclge_init_vlan_config(hdev
);
7463 dev_err(&pdev
->dev
, "VLAN init fail, ret =%d\n", ret
);
7467 ret
= hclge_tm_init_hw(hdev
);
7469 dev_err(&pdev
->dev
, "tm init hw fail, ret =%d\n", ret
);
7473 ret
= hclge_rss_init_hw(hdev
);
7475 dev_err(&pdev
->dev
, "Rss init fail, ret =%d\n", ret
);
7479 ret
= hclge_init_fd_config(hdev
);
7482 "fd table init fail, ret=%d\n", ret
);
7486 /* Re-enable the hw error interrupts because
7487 * the interrupts get disabled on core/global reset.
7489 ret
= hclge_hw_error_set_state(hdev
, true);
7492 "fail(%d) to re-enable HNS hw error interrupts\n", ret
);
7496 hclge_reset_vport_state(hdev
);
7498 dev_info(&pdev
->dev
, "Reset done, %s driver initialization finished.\n",
7504 static void hclge_uninit_ae_dev(struct hnae3_ae_dev
*ae_dev
)
7506 struct hclge_dev
*hdev
= ae_dev
->priv
;
7507 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
7509 hclge_state_uninit(hdev
);
7512 mdiobus_unregister(mac
->mdio_bus
);
7514 hclge_uninit_umv_space(hdev
);
7516 /* Disable MISC vector(vector0) */
7517 hclge_enable_vector(&hdev
->misc_vector
, false);
7518 synchronize_irq(hdev
->misc_vector
.vector_irq
);
7520 hclge_hw_error_set_state(hdev
, false);
7521 hclge_destroy_cmd_queue(&hdev
->hw
);
7522 hclge_misc_irq_uninit(hdev
);
7523 hclge_pci_uninit(hdev
);
7524 mutex_destroy(&hdev
->vport_lock
);
7525 ae_dev
->priv
= NULL
;
7528 static u32
hclge_get_max_channels(struct hnae3_handle
*handle
)
7530 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
7531 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7532 struct hclge_dev
*hdev
= vport
->back
;
7534 return min_t(u32
, hdev
->rss_size_max
* kinfo
->num_tc
, hdev
->num_tqps
);
7537 static void hclge_get_channels(struct hnae3_handle
*handle
,
7538 struct ethtool_channels
*ch
)
7540 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7542 ch
->max_combined
= hclge_get_max_channels(handle
);
7543 ch
->other_count
= 1;
7545 ch
->combined_count
= vport
->alloc_tqps
;
7548 static void hclge_get_tqps_and_rss_info(struct hnae3_handle
*handle
,
7549 u16
*alloc_tqps
, u16
*max_rss_size
)
7551 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7552 struct hclge_dev
*hdev
= vport
->back
;
7554 *alloc_tqps
= vport
->alloc_tqps
;
7555 *max_rss_size
= hdev
->rss_size_max
;
7558 static void hclge_release_tqp(struct hclge_vport
*vport
)
7560 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
7561 struct hclge_dev
*hdev
= vport
->back
;
7564 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
7565 struct hclge_tqp
*tqp
=
7566 container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
7568 tqp
->q
.handle
= NULL
;
7569 tqp
->q
.tqp_index
= 0;
7570 tqp
->alloced
= false;
7573 devm_kfree(&hdev
->pdev
->dev
, kinfo
->tqp
);
7577 static int hclge_set_channels(struct hnae3_handle
*handle
, u32 new_tqps_num
)
7579 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7580 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
7581 struct hclge_dev
*hdev
= vport
->back
;
7582 int cur_rss_size
= kinfo
->rss_size
;
7583 int cur_tqps
= kinfo
->num_tqps
;
7584 u16 tc_offset
[HCLGE_MAX_TC_NUM
];
7585 u16 tc_valid
[HCLGE_MAX_TC_NUM
];
7586 u16 tc_size
[HCLGE_MAX_TC_NUM
];
7591 /* Free old tqps, and reallocate with new tqp number when nic setup */
7592 hclge_release_tqp(vport
);
7594 ret
= hclge_knic_setup(vport
, new_tqps_num
, kinfo
->num_desc
);
7596 dev_err(&hdev
->pdev
->dev
, "setup nic fail, ret =%d\n", ret
);
7600 ret
= hclge_map_tqp_to_vport(hdev
, vport
);
7602 dev_err(&hdev
->pdev
->dev
, "map vport tqp fail, ret =%d\n", ret
);
7606 ret
= hclge_tm_schd_init(hdev
);
7608 dev_err(&hdev
->pdev
->dev
, "tm schd init fail, ret =%d\n", ret
);
7612 roundup_size
= roundup_pow_of_two(kinfo
->rss_size
);
7613 roundup_size
= ilog2(roundup_size
);
7614 /* Set the RSS TC mode according to the new RSS size */
7615 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
7618 if (!(hdev
->hw_tc_map
& BIT(i
)))
7622 tc_size
[i
] = roundup_size
;
7623 tc_offset
[i
] = kinfo
->rss_size
* i
;
7625 ret
= hclge_set_rss_tc_mode(hdev
, tc_valid
, tc_size
, tc_offset
);
7629 /* Reinitializes the rss indirect table according to the new RSS size */
7630 rss_indir
= kcalloc(HCLGE_RSS_IND_TBL_SIZE
, sizeof(u32
), GFP_KERNEL
);
7634 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
7635 rss_indir
[i
] = i
% kinfo
->rss_size
;
7637 ret
= hclge_set_rss(handle
, rss_indir
, NULL
, 0);
7639 dev_err(&hdev
->pdev
->dev
, "set rss indir table fail, ret=%d\n",
7645 dev_info(&hdev
->pdev
->dev
,
7646 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
7647 cur_rss_size
, kinfo
->rss_size
,
7648 cur_tqps
, kinfo
->rss_size
* kinfo
->num_tc
);
7653 static int hclge_get_regs_num(struct hclge_dev
*hdev
, u32
*regs_num_32_bit
,
7654 u32
*regs_num_64_bit
)
7656 struct hclge_desc desc
;
7660 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_REG_NUM
, true);
7661 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7663 dev_err(&hdev
->pdev
->dev
,
7664 "Query register number cmd failed, ret = %d.\n", ret
);
7668 *regs_num_32_bit
= le32_to_cpu(desc
.data
[0]);
7669 *regs_num_64_bit
= le32_to_cpu(desc
.data
[1]);
7671 total_num
= *regs_num_32_bit
+ *regs_num_64_bit
;
7678 static int hclge_get_32_bit_regs(struct hclge_dev
*hdev
, u32 regs_num
,
7681 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
7683 struct hclge_desc
*desc
;
7684 u32
*reg_val
= data
;
7693 cmd_num
= DIV_ROUND_UP(regs_num
+ 2, HCLGE_32_BIT_REG_RTN_DATANUM
);
7694 desc
= kcalloc(cmd_num
, sizeof(struct hclge_desc
), GFP_KERNEL
);
7698 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_QUERY_32_BIT_REG
, true);
7699 ret
= hclge_cmd_send(&hdev
->hw
, desc
, cmd_num
);
7701 dev_err(&hdev
->pdev
->dev
,
7702 "Query 32 bit register cmd failed, ret = %d.\n", ret
);
7707 for (i
= 0; i
< cmd_num
; i
++) {
7709 desc_data
= (__le32
*)(&desc
[i
].data
[0]);
7710 n
= HCLGE_32_BIT_REG_RTN_DATANUM
- 2;
7712 desc_data
= (__le32
*)(&desc
[i
]);
7713 n
= HCLGE_32_BIT_REG_RTN_DATANUM
;
7715 for (k
= 0; k
< n
; k
++) {
7716 *reg_val
++ = le32_to_cpu(*desc_data
++);
7728 static int hclge_get_64_bit_regs(struct hclge_dev
*hdev
, u32 regs_num
,
7731 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
7733 struct hclge_desc
*desc
;
7734 u64
*reg_val
= data
;
7743 cmd_num
= DIV_ROUND_UP(regs_num
+ 1, HCLGE_64_BIT_REG_RTN_DATANUM
);
7744 desc
= kcalloc(cmd_num
, sizeof(struct hclge_desc
), GFP_KERNEL
);
7748 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_QUERY_64_BIT_REG
, true);
7749 ret
= hclge_cmd_send(&hdev
->hw
, desc
, cmd_num
);
7751 dev_err(&hdev
->pdev
->dev
,
7752 "Query 64 bit register cmd failed, ret = %d.\n", ret
);
7757 for (i
= 0; i
< cmd_num
; i
++) {
7759 desc_data
= (__le64
*)(&desc
[i
].data
[0]);
7760 n
= HCLGE_64_BIT_REG_RTN_DATANUM
- 1;
7762 desc_data
= (__le64
*)(&desc
[i
]);
7763 n
= HCLGE_64_BIT_REG_RTN_DATANUM
;
7765 for (k
= 0; k
< n
; k
++) {
7766 *reg_val
++ = le64_to_cpu(*desc_data
++);
7778 #define MAX_SEPARATE_NUM 4
7779 #define SEPARATOR_VALUE 0xFFFFFFFF
7780 #define REG_NUM_PER_LINE 4
7781 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
7783 static int hclge_get_regs_len(struct hnae3_handle
*handle
)
7785 int cmdq_lines
, common_lines
, ring_lines
, tqp_intr_lines
;
7786 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
7787 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7788 struct hclge_dev
*hdev
= vport
->back
;
7789 u32 regs_num_32_bit
, regs_num_64_bit
;
7792 ret
= hclge_get_regs_num(hdev
, ®s_num_32_bit
, ®s_num_64_bit
);
7794 dev_err(&hdev
->pdev
->dev
,
7795 "Get register number failed, ret = %d.\n", ret
);
7799 cmdq_lines
= sizeof(cmdq_reg_addr_list
) / REG_LEN_PER_LINE
+ 1;
7800 common_lines
= sizeof(common_reg_addr_list
) / REG_LEN_PER_LINE
+ 1;
7801 ring_lines
= sizeof(ring_reg_addr_list
) / REG_LEN_PER_LINE
+ 1;
7802 tqp_intr_lines
= sizeof(tqp_intr_reg_addr_list
) / REG_LEN_PER_LINE
+ 1;
7804 return (cmdq_lines
+ common_lines
+ ring_lines
* kinfo
->num_tqps
+
7805 tqp_intr_lines
* (hdev
->num_msi_used
- 1)) * REG_LEN_PER_LINE
+
7806 regs_num_32_bit
* sizeof(u32
) + regs_num_64_bit
* sizeof(u64
);
7809 static void hclge_get_regs(struct hnae3_handle
*handle
, u32
*version
,
7812 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
7813 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7814 struct hclge_dev
*hdev
= vport
->back
;
7815 u32 regs_num_32_bit
, regs_num_64_bit
;
7816 int i
, j
, reg_um
, separator_num
;
7820 *version
= hdev
->fw_version
;
7822 ret
= hclge_get_regs_num(hdev
, ®s_num_32_bit
, ®s_num_64_bit
);
7824 dev_err(&hdev
->pdev
->dev
,
7825 "Get register number failed, ret = %d.\n", ret
);
7829 /* fetching per-PF registers valus from PF PCIe register space */
7830 reg_um
= sizeof(cmdq_reg_addr_list
) / sizeof(u32
);
7831 separator_num
= MAX_SEPARATE_NUM
- reg_um
% REG_NUM_PER_LINE
;
7832 for (i
= 0; i
< reg_um
; i
++)
7833 *reg
++ = hclge_read_dev(&hdev
->hw
, cmdq_reg_addr_list
[i
]);
7834 for (i
= 0; i
< separator_num
; i
++)
7835 *reg
++ = SEPARATOR_VALUE
;
7837 reg_um
= sizeof(common_reg_addr_list
) / sizeof(u32
);
7838 separator_num
= MAX_SEPARATE_NUM
- reg_um
% REG_NUM_PER_LINE
;
7839 for (i
= 0; i
< reg_um
; i
++)
7840 *reg
++ = hclge_read_dev(&hdev
->hw
, common_reg_addr_list
[i
]);
7841 for (i
= 0; i
< separator_num
; i
++)
7842 *reg
++ = SEPARATOR_VALUE
;
7844 reg_um
= sizeof(ring_reg_addr_list
) / sizeof(u32
);
7845 separator_num
= MAX_SEPARATE_NUM
- reg_um
% REG_NUM_PER_LINE
;
7846 for (j
= 0; j
< kinfo
->num_tqps
; j
++) {
7847 for (i
= 0; i
< reg_um
; i
++)
7848 *reg
++ = hclge_read_dev(&hdev
->hw
,
7849 ring_reg_addr_list
[i
] +
7851 for (i
= 0; i
< separator_num
; i
++)
7852 *reg
++ = SEPARATOR_VALUE
;
7855 reg_um
= sizeof(tqp_intr_reg_addr_list
) / sizeof(u32
);
7856 separator_num
= MAX_SEPARATE_NUM
- reg_um
% REG_NUM_PER_LINE
;
7857 for (j
= 0; j
< hdev
->num_msi_used
- 1; j
++) {
7858 for (i
= 0; i
< reg_um
; i
++)
7859 *reg
++ = hclge_read_dev(&hdev
->hw
,
7860 tqp_intr_reg_addr_list
[i
] +
7862 for (i
= 0; i
< separator_num
; i
++)
7863 *reg
++ = SEPARATOR_VALUE
;
7866 /* fetching PF common registers values from firmware */
7867 ret
= hclge_get_32_bit_regs(hdev
, regs_num_32_bit
, reg
);
7869 dev_err(&hdev
->pdev
->dev
,
7870 "Get 32 bit register failed, ret = %d.\n", ret
);
7874 reg
+= regs_num_32_bit
;
7875 ret
= hclge_get_64_bit_regs(hdev
, regs_num_64_bit
, reg
);
7877 dev_err(&hdev
->pdev
->dev
,
7878 "Get 64 bit register failed, ret = %d.\n", ret
);
7881 static int hclge_set_led_status(struct hclge_dev
*hdev
, u8 locate_led_status
)
7883 struct hclge_set_led_state_cmd
*req
;
7884 struct hclge_desc desc
;
7887 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_LED_STATUS_CFG
, false);
7889 req
= (struct hclge_set_led_state_cmd
*)desc
.data
;
7890 hnae3_set_field(req
->locate_led_config
, HCLGE_LED_LOCATE_STATE_M
,
7891 HCLGE_LED_LOCATE_STATE_S
, locate_led_status
);
7893 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7895 dev_err(&hdev
->pdev
->dev
,
7896 "Send set led state cmd error, ret =%d\n", ret
);
7901 enum hclge_led_status
{
7904 HCLGE_LED_NO_CHANGE
= 0xFF,
7907 static int hclge_set_led_id(struct hnae3_handle
*handle
,
7908 enum ethtool_phys_id_state status
)
7910 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7911 struct hclge_dev
*hdev
= vport
->back
;
7914 case ETHTOOL_ID_ACTIVE
:
7915 return hclge_set_led_status(hdev
, HCLGE_LED_ON
);
7916 case ETHTOOL_ID_INACTIVE
:
7917 return hclge_set_led_status(hdev
, HCLGE_LED_OFF
);
7923 static void hclge_get_link_mode(struct hnae3_handle
*handle
,
7924 unsigned long *supported
,
7925 unsigned long *advertising
)
7927 unsigned int size
= BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS
);
7928 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7929 struct hclge_dev
*hdev
= vport
->back
;
7930 unsigned int idx
= 0;
7932 for (; idx
< size
; idx
++) {
7933 supported
[idx
] = hdev
->hw
.mac
.supported
[idx
];
7934 advertising
[idx
] = hdev
->hw
.mac
.advertising
[idx
];
7938 static const struct hnae3_ae_ops hclge_ops
= {
7939 .init_ae_dev
= hclge_init_ae_dev
,
7940 .uninit_ae_dev
= hclge_uninit_ae_dev
,
7941 .flr_prepare
= hclge_flr_prepare
,
7942 .flr_done
= hclge_flr_done
,
7943 .init_client_instance
= hclge_init_client_instance
,
7944 .uninit_client_instance
= hclge_uninit_client_instance
,
7945 .map_ring_to_vector
= hclge_map_ring_to_vector
,
7946 .unmap_ring_from_vector
= hclge_unmap_ring_frm_vector
,
7947 .get_vector
= hclge_get_vector
,
7948 .put_vector
= hclge_put_vector
,
7949 .set_promisc_mode
= hclge_set_promisc_mode
,
7950 .set_loopback
= hclge_set_loopback
,
7951 .start
= hclge_ae_start
,
7952 .stop
= hclge_ae_stop
,
7953 .client_start
= hclge_client_start
,
7954 .client_stop
= hclge_client_stop
,
7955 .get_status
= hclge_get_status
,
7956 .get_ksettings_an_result
= hclge_get_ksettings_an_result
,
7957 .update_speed_duplex_h
= hclge_update_speed_duplex_h
,
7958 .cfg_mac_speed_dup_h
= hclge_cfg_mac_speed_dup_h
,
7959 .get_media_type
= hclge_get_media_type
,
7960 .get_rss_key_size
= hclge_get_rss_key_size
,
7961 .get_rss_indir_size
= hclge_get_rss_indir_size
,
7962 .get_rss
= hclge_get_rss
,
7963 .set_rss
= hclge_set_rss
,
7964 .set_rss_tuple
= hclge_set_rss_tuple
,
7965 .get_rss_tuple
= hclge_get_rss_tuple
,
7966 .get_tc_size
= hclge_get_tc_size
,
7967 .get_mac_addr
= hclge_get_mac_addr
,
7968 .set_mac_addr
= hclge_set_mac_addr
,
7969 .do_ioctl
= hclge_do_ioctl
,
7970 .add_uc_addr
= hclge_add_uc_addr
,
7971 .rm_uc_addr
= hclge_rm_uc_addr
,
7972 .add_mc_addr
= hclge_add_mc_addr
,
7973 .rm_mc_addr
= hclge_rm_mc_addr
,
7974 .set_autoneg
= hclge_set_autoneg
,
7975 .get_autoneg
= hclge_get_autoneg
,
7976 .get_pauseparam
= hclge_get_pauseparam
,
7977 .set_pauseparam
= hclge_set_pauseparam
,
7978 .set_mtu
= hclge_set_mtu
,
7979 .reset_queue
= hclge_reset_tqp
,
7980 .get_stats
= hclge_get_stats
,
7981 .update_stats
= hclge_update_stats
,
7982 .get_strings
= hclge_get_strings
,
7983 .get_sset_count
= hclge_get_sset_count
,
7984 .get_fw_version
= hclge_get_fw_version
,
7985 .get_mdix_mode
= hclge_get_mdix_mode
,
7986 .enable_vlan_filter
= hclge_enable_vlan_filter
,
7987 .set_vlan_filter
= hclge_set_vlan_filter
,
7988 .set_vf_vlan_filter
= hclge_set_vf_vlan_filter
,
7989 .enable_hw_strip_rxvtag
= hclge_en_hw_strip_rxvtag
,
7990 .reset_event
= hclge_reset_event
,
7991 .set_default_reset_request
= hclge_set_def_reset_request
,
7992 .get_tqps_and_rss_info
= hclge_get_tqps_and_rss_info
,
7993 .set_channels
= hclge_set_channels
,
7994 .get_channels
= hclge_get_channels
,
7995 .get_regs_len
= hclge_get_regs_len
,
7996 .get_regs
= hclge_get_regs
,
7997 .set_led_id
= hclge_set_led_id
,
7998 .get_link_mode
= hclge_get_link_mode
,
7999 .add_fd_entry
= hclge_add_fd_entry
,
8000 .del_fd_entry
= hclge_del_fd_entry
,
8001 .del_all_fd_entries
= hclge_del_all_fd_entries
,
8002 .get_fd_rule_cnt
= hclge_get_fd_rule_cnt
,
8003 .get_fd_rule_info
= hclge_get_fd_rule_info
,
8004 .get_fd_all_rules
= hclge_get_all_rules
,
8005 .restore_fd_rules
= hclge_restore_fd_entries
,
8006 .enable_fd
= hclge_enable_fd
,
8007 .dbg_run_cmd
= hclge_dbg_run_cmd
,
8008 .handle_hw_ras_error
= hclge_handle_hw_ras_error
,
8009 .get_hw_reset_stat
= hclge_get_hw_reset_stat
,
8010 .ae_dev_resetting
= hclge_ae_dev_resetting
,
8011 .ae_dev_reset_cnt
= hclge_ae_dev_reset_cnt
,
8012 .get_global_queue_id
= hclge_covert_handle_qid_global
,
8013 .set_timer_task
= hclge_set_timer_task
,
8016 static struct hnae3_ae_algo ae_algo
= {
8018 .pdev_id_table
= ae_algo_pci_tbl
,
8021 static int hclge_init(void)
8023 pr_info("%s is initializing\n", HCLGE_NAME
);
8025 hnae3_register_ae_algo(&ae_algo
);
8030 static void hclge_exit(void)
8032 hnae3_unregister_ae_algo(&ae_algo
);
8034 module_init(hclge_init
);
8035 module_exit(hclge_exit
);
8037 MODULE_LICENSE("GPL");
8038 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8039 MODULE_DESCRIPTION("HCLGE Driver");
8040 MODULE_VERSION(HCLGE_MOD_VERSION
);